1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
4 */
5#ifndef __LINUX_BIO_H
6#define __LINUX_BIO_H
7
8#include <linux/mempool.h>
9/* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
10#include <linux/blk_types.h>
11#include <linux/uio.h>
12
13#define BIO_MAX_VECS		256U
14
15struct queue_limits;
16
17static inline unsigned int bio_max_segs(unsigned int nr_segs)
18{
19	return min(nr_segs, BIO_MAX_VECS);
20}
21
22#define bio_prio(bio)			(bio)->bi_ioprio
23#define bio_set_prio(bio, prio)		((bio)->bi_ioprio = prio)
24
25#define bio_iter_iovec(bio, iter)				\
26	bvec_iter_bvec((bio)->bi_io_vec, (iter))
27
28#define bio_iter_page(bio, iter)				\
29	bvec_iter_page((bio)->bi_io_vec, (iter))
30#define bio_iter_len(bio, iter)					\
31	bvec_iter_len((bio)->bi_io_vec, (iter))
32#define bio_iter_offset(bio, iter)				\
33	bvec_iter_offset((bio)->bi_io_vec, (iter))
34
35#define bio_page(bio)		bio_iter_page((bio), (bio)->bi_iter)
36#define bio_offset(bio)		bio_iter_offset((bio), (bio)->bi_iter)
37#define bio_iovec(bio)		bio_iter_iovec((bio), (bio)->bi_iter)
38
39#define bvec_iter_sectors(iter)	((iter).bi_size >> 9)
40#define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter)))
41
42#define bio_sectors(bio)	bvec_iter_sectors((bio)->bi_iter)
43#define bio_end_sector(bio)	bvec_iter_end_sector((bio)->bi_iter)
44
45/*
46 * Return the data direction, READ or WRITE.
47 */
48#define bio_data_dir(bio) \
49	(op_is_write(bio_op(bio)) ? WRITE : READ)
50
51/*
52 * Check whether this bio carries any data or not. A NULL bio is allowed.
53 */
54static inline bool bio_has_data(struct bio *bio)
55{
56	if (bio &&
57	    bio->bi_iter.bi_size &&
58	    bio_op(bio) != REQ_OP_DISCARD &&
59	    bio_op(bio) != REQ_OP_SECURE_ERASE &&
60	    bio_op(bio) != REQ_OP_WRITE_ZEROES)
61		return true;
62
63	return false;
64}
65
66static inline bool bio_no_advance_iter(const struct bio *bio)
67{
68	return bio_op(bio) == REQ_OP_DISCARD ||
69	       bio_op(bio) == REQ_OP_SECURE_ERASE ||
70	       bio_op(bio) == REQ_OP_WRITE_ZEROES;
71}
72
73static inline void *bio_data(struct bio *bio)
74{
75	if (bio_has_data(bio))
76		return page_address(bio_page(bio)) + bio_offset(bio);
77
78	return NULL;
79}
80
81static inline bool bio_next_segment(const struct bio *bio,
82				    struct bvec_iter_all *iter)
83{
84	if (iter->idx >= bio->bi_vcnt)
85		return false;
86
87	bvec_advance(&bio->bi_io_vec[iter->idx], iter);
88	return true;
89}
90
91/*
92 * drivers should _never_ use the all version - the bio may have been split
93 * before it got to the driver and the driver won't own all of it
94 */
95#define bio_for_each_segment_all(bvl, bio, iter) \
96	for (bvl = bvec_init_iter_all(&iter); bio_next_segment((bio), &iter); )
97
98static inline void bio_advance_iter(const struct bio *bio,
99				    struct bvec_iter *iter, unsigned int bytes)
100{
101	iter->bi_sector += bytes >> 9;
102
103	if (bio_no_advance_iter(bio))
104		iter->bi_size -= bytes;
105	else
106		bvec_iter_advance(bio->bi_io_vec, iter, bytes);
107		/* TODO: It is reasonable to complete bio with error here. */
108}
109
110/* @bytes should be less or equal to bvec[i->bi_idx].bv_len */
111static inline void bio_advance_iter_single(const struct bio *bio,
112					   struct bvec_iter *iter,
113					   unsigned int bytes)
114{
115	iter->bi_sector += bytes >> 9;
116
117	if (bio_no_advance_iter(bio))
118		iter->bi_size -= bytes;
119	else
120		bvec_iter_advance_single(bio->bi_io_vec, iter, bytes);
121}
122
123void __bio_advance(struct bio *, unsigned bytes);
124
125/**
126 * bio_advance - increment/complete a bio by some number of bytes
127 * @bio:	bio to advance
128 * @nbytes:	number of bytes to complete
129 *
130 * This updates bi_sector, bi_size and bi_idx; if the number of bytes to
131 * complete doesn't align with a bvec boundary, then bv_len and bv_offset will
132 * be updated on the last bvec as well.
133 *
134 * @bio will then represent the remaining, uncompleted portion of the io.
135 */
136static inline void bio_advance(struct bio *bio, unsigned int nbytes)
137{
138	if (nbytes == bio->bi_iter.bi_size) {
139		bio->bi_iter.bi_size = 0;
140		return;
141	}
142	__bio_advance(bio, nbytes);
143}
144
145#define __bio_for_each_segment(bvl, bio, iter, start)			\
146	for (iter = (start);						\
147	     (iter).bi_size &&						\
148		((bvl = bio_iter_iovec((bio), (iter))), 1);		\
149	     bio_advance_iter_single((bio), &(iter), (bvl).bv_len))
150
151#define bio_for_each_segment(bvl, bio, iter)				\
152	__bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)
153
154#define __bio_for_each_bvec(bvl, bio, iter, start)		\
155	for (iter = (start);						\
156	     (iter).bi_size &&						\
157		((bvl = mp_bvec_iter_bvec((bio)->bi_io_vec, (iter))), 1); \
158	     bio_advance_iter_single((bio), &(iter), (bvl).bv_len))
159
160/* iterate over multi-page bvec */
161#define bio_for_each_bvec(bvl, bio, iter)			\
162	__bio_for_each_bvec(bvl, bio, iter, (bio)->bi_iter)
163
164/*
165 * Iterate over all multi-page bvecs. Drivers shouldn't use this version for the
166 * same reasons as bio_for_each_segment_all().
167 */
168#define bio_for_each_bvec_all(bvl, bio, i)		\
169	for (i = 0, bvl = bio_first_bvec_all(bio);	\
170	     i < (bio)->bi_vcnt; i++, bvl++)
171
172#define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
173
174static inline unsigned bio_segments(struct bio *bio)
175{
176	unsigned segs = 0;
177	struct bio_vec bv;
178	struct bvec_iter iter;
179
180	/*
181	 * We special case discard/write same/write zeroes, because they
182	 * interpret bi_size differently:
183	 */
184
185	switch (bio_op(bio)) {
186	case REQ_OP_DISCARD:
187	case REQ_OP_SECURE_ERASE:
188	case REQ_OP_WRITE_ZEROES:
189		return 0;
190	default:
191		break;
192	}
193
194	bio_for_each_segment(bv, bio, iter)
195		segs++;
196
197	return segs;
198}
199
200/*
201 * get a reference to a bio, so it won't disappear. the intended use is
202 * something like:
203 *
204 * bio_get(bio);
205 * submit_bio(rw, bio);
206 * if (bio->bi_flags ...)
207 *	do_something
208 * bio_put(bio);
209 *
210 * without the bio_get(), it could potentially complete I/O before submit_bio
211 * returns. and then bio would be freed memory when if (bio->bi_flags ...)
212 * runs
213 */
214static inline void bio_get(struct bio *bio)
215{
216	bio->bi_flags |= (1 << BIO_REFFED);
217	smp_mb__before_atomic();
218	atomic_inc(&bio->__bi_cnt);
219}
220
221static inline void bio_cnt_set(struct bio *bio, unsigned int count)
222{
223	if (count != 1) {
224		bio->bi_flags |= (1 << BIO_REFFED);
225		smp_mb();
226	}
227	atomic_set(&bio->__bi_cnt, count);
228}
229
230static inline bool bio_flagged(struct bio *bio, unsigned int bit)
231{
232	return bio->bi_flags & (1U << bit);
233}
234
235static inline void bio_set_flag(struct bio *bio, unsigned int bit)
236{
237	bio->bi_flags |= (1U << bit);
238}
239
240static inline void bio_clear_flag(struct bio *bio, unsigned int bit)
241{
242	bio->bi_flags &= ~(1U << bit);
243}
244
245static inline struct bio_vec *bio_first_bvec_all(struct bio *bio)
246{
247	WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
248	return bio->bi_io_vec;
249}
250
251static inline struct page *bio_first_page_all(struct bio *bio)
252{
253	return bio_first_bvec_all(bio)->bv_page;
254}
255
256static inline struct folio *bio_first_folio_all(struct bio *bio)
257{
258	return page_folio(bio_first_page_all(bio));
259}
260
261static inline struct bio_vec *bio_last_bvec_all(struct bio *bio)
262{
263	WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
264	return &bio->bi_io_vec[bio->bi_vcnt - 1];
265}
266
267/**
268 * struct folio_iter - State for iterating all folios in a bio.
269 * @folio: The current folio we're iterating.  NULL after the last folio.
270 * @offset: The byte offset within the current folio.
271 * @length: The number of bytes in this iteration (will not cross folio
272 *	boundary).
273 */
274struct folio_iter {
275	struct folio *folio;
276	size_t offset;
277	size_t length;
278	/* private: for use by the iterator */
279	struct folio *_next;
280	size_t _seg_count;
281	int _i;
282};
283
284static inline void bio_first_folio(struct folio_iter *fi, struct bio *bio,
285				   int i)
286{
287	struct bio_vec *bvec = bio_first_bvec_all(bio) + i;
288
289	if (unlikely(i >= bio->bi_vcnt)) {
290		fi->folio = NULL;
291		return;
292	}
293
294	fi->folio = page_folio(bvec->bv_page);
295	fi->offset = bvec->bv_offset +
296			PAGE_SIZE * (bvec->bv_page - &fi->folio->page);
297	fi->_seg_count = bvec->bv_len;
298	fi->length = min(folio_size(fi->folio) - fi->offset, fi->_seg_count);
299	fi->_next = folio_next(fi->folio);
300	fi->_i = i;
301}
302
303static inline void bio_next_folio(struct folio_iter *fi, struct bio *bio)
304{
305	fi->_seg_count -= fi->length;
306	if (fi->_seg_count) {
307		fi->folio = fi->_next;
308		fi->offset = 0;
309		fi->length = min(folio_size(fi->folio), fi->_seg_count);
310		fi->_next = folio_next(fi->folio);
311	} else {
312		bio_first_folio(fi, bio, fi->_i + 1);
313	}
314}
315
316/**
317 * bio_for_each_folio_all - Iterate over each folio in a bio.
318 * @fi: struct folio_iter which is updated for each folio.
319 * @bio: struct bio to iterate over.
320 */
321#define bio_for_each_folio_all(fi, bio)				\
322	for (bio_first_folio(&fi, bio, 0); fi.folio; bio_next_folio(&fi, bio))
323
324enum bip_flags {
325	BIP_BLOCK_INTEGRITY	= 1 << 0, /* block layer owns integrity data */
326	BIP_MAPPED_INTEGRITY	= 1 << 1, /* ref tag has been remapped */
327	BIP_CTRL_NOCHECK	= 1 << 2, /* disable HBA integrity checking */
328	BIP_DISK_NOCHECK	= 1 << 3, /* disable disk integrity checking */
329	BIP_IP_CHECKSUM		= 1 << 4, /* IP checksum */
330	BIP_INTEGRITY_USER	= 1 << 5, /* Integrity payload is user address */
331	BIP_COPY_USER		= 1 << 6, /* Kernel bounce buffer in use */
332};
333
334/*
335 * bio integrity payload
336 */
337struct bio_integrity_payload {
338	struct bio		*bip_bio;	/* parent bio */
339
340	struct bvec_iter	bip_iter;
341
342	unsigned short		bip_vcnt;	/* # of integrity bio_vecs */
343	unsigned short		bip_max_vcnt;	/* integrity bio_vec slots */
344	unsigned short		bip_flags;	/* control flags */
345
346	struct bvec_iter	bio_iter;	/* for rewinding parent bio */
347
348	struct work_struct	bip_work;	/* I/O completion */
349
350	struct bio_vec		*bip_vec;
351	struct bio_vec		bip_inline_vecs[];/* embedded bvec array */
352};
353
354#if defined(CONFIG_BLK_DEV_INTEGRITY)
355
356static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
357{
358	if (bio->bi_opf & REQ_INTEGRITY)
359		return bio->bi_integrity;
360
361	return NULL;
362}
363
364static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
365{
366	struct bio_integrity_payload *bip = bio_integrity(bio);
367
368	if (bip)
369		return bip->bip_flags & flag;
370
371	return false;
372}
373
374static inline sector_t bip_get_seed(struct bio_integrity_payload *bip)
375{
376	return bip->bip_iter.bi_sector;
377}
378
379static inline void bip_set_seed(struct bio_integrity_payload *bip,
380				sector_t seed)
381{
382	bip->bip_iter.bi_sector = seed;
383}
384
385#endif /* CONFIG_BLK_DEV_INTEGRITY */
386
387void bio_trim(struct bio *bio, sector_t offset, sector_t size);
388extern struct bio *bio_split(struct bio *bio, int sectors,
389			     gfp_t gfp, struct bio_set *bs);
390struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
391		unsigned *segs, struct bio_set *bs, unsigned max_bytes);
392
393/**
394 * bio_next_split - get next @sectors from a bio, splitting if necessary
395 * @bio:	bio to split
396 * @sectors:	number of sectors to split from the front of @bio
397 * @gfp:	gfp mask
398 * @bs:		bio set to allocate from
399 *
400 * Return: a bio representing the next @sectors of @bio - if the bio is smaller
401 * than @sectors, returns the original bio unchanged.
402 */
403static inline struct bio *bio_next_split(struct bio *bio, int sectors,
404					 gfp_t gfp, struct bio_set *bs)
405{
406	if (sectors >= bio_sectors(bio))
407		return bio;
408
409	return bio_split(bio, sectors, gfp, bs);
410}
411
412enum {
413	BIOSET_NEED_BVECS = BIT(0),
414	BIOSET_NEED_RESCUER = BIT(1),
415	BIOSET_PERCPU_CACHE = BIT(2),
416};
417extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags);
418extern void bioset_exit(struct bio_set *);
419extern int biovec_init_pool(mempool_t *pool, int pool_entries);
420
421struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
422			     blk_opf_t opf, gfp_t gfp_mask,
423			     struct bio_set *bs);
424struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask);
425extern void bio_put(struct bio *);
426
427struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src,
428		gfp_t gfp, struct bio_set *bs);
429int bio_init_clone(struct block_device *bdev, struct bio *bio,
430		struct bio *bio_src, gfp_t gfp);
431
432extern struct bio_set fs_bio_set;
433
434static inline struct bio *bio_alloc(struct block_device *bdev,
435		unsigned short nr_vecs, blk_opf_t opf, gfp_t gfp_mask)
436{
437	return bio_alloc_bioset(bdev, nr_vecs, opf, gfp_mask, &fs_bio_set);
438}
439
440void submit_bio(struct bio *bio);
441
442extern void bio_endio(struct bio *);
443
444static inline void bio_io_error(struct bio *bio)
445{
446	bio->bi_status = BLK_STS_IOERR;
447	bio_endio(bio);
448}
449
450static inline void bio_wouldblock_error(struct bio *bio)
451{
452	bio_set_flag(bio, BIO_QUIET);
453	bio->bi_status = BLK_STS_AGAIN;
454	bio_endio(bio);
455}
456
457/*
458 * Calculate number of bvec segments that should be allocated to fit data
459 * pointed by @iter. If @iter is backed by bvec it's going to be reused
460 * instead of allocating a new one.
461 */
462static inline int bio_iov_vecs_to_alloc(struct iov_iter *iter, int max_segs)
463{
464	if (iov_iter_is_bvec(iter))
465		return 0;
466	return iov_iter_npages(iter, max_segs);
467}
468
469struct request_queue;
470
471extern int submit_bio_wait(struct bio *bio);
472void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
473	      unsigned short max_vecs, blk_opf_t opf);
474extern void bio_uninit(struct bio *);
475void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf);
476void bio_chain(struct bio *, struct bio *);
477
478int __must_check bio_add_page(struct bio *bio, struct page *page, unsigned len,
479			      unsigned off);
480bool __must_check bio_add_folio(struct bio *bio, struct folio *folio,
481				size_t len, size_t off);
482extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
483			   unsigned int, unsigned int);
484int bio_add_zone_append_page(struct bio *bio, struct page *page,
485			     unsigned int len, unsigned int offset);
486void __bio_add_page(struct bio *bio, struct page *page,
487		unsigned int len, unsigned int off);
488void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len,
489			  size_t off);
490int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
491void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter);
492void __bio_release_pages(struct bio *bio, bool mark_dirty);
493extern void bio_set_pages_dirty(struct bio *bio);
494extern void bio_check_pages_dirty(struct bio *bio);
495
496extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
497			       struct bio *src, struct bvec_iter *src_iter);
498extern void bio_copy_data(struct bio *dst, struct bio *src);
499extern void bio_free_pages(struct bio *bio);
500void guard_bio_eod(struct bio *bio);
501void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter);
502
503static inline void zero_fill_bio(struct bio *bio)
504{
505	zero_fill_bio_iter(bio, bio->bi_iter);
506}
507
508static inline void bio_release_pages(struct bio *bio, bool mark_dirty)
509{
510	if (bio_flagged(bio, BIO_PAGE_PINNED))
511		__bio_release_pages(bio, mark_dirty);
512}
513
514#define bio_dev(bio) \
515	disk_devt((bio)->bi_bdev->bd_disk)
516
517#ifdef CONFIG_BLK_CGROUP
518void bio_associate_blkg(struct bio *bio);
519void bio_associate_blkg_from_css(struct bio *bio,
520				 struct cgroup_subsys_state *css);
521void bio_clone_blkg_association(struct bio *dst, struct bio *src);
522void blkcg_punt_bio_submit(struct bio *bio);
523#else	/* CONFIG_BLK_CGROUP */
524static inline void bio_associate_blkg(struct bio *bio) { }
525static inline void bio_associate_blkg_from_css(struct bio *bio,
526					       struct cgroup_subsys_state *css)
527{ }
528static inline void bio_clone_blkg_association(struct bio *dst,
529					      struct bio *src) { }
530static inline void blkcg_punt_bio_submit(struct bio *bio)
531{
532	submit_bio(bio);
533}
534#endif	/* CONFIG_BLK_CGROUP */
535
536static inline void bio_set_dev(struct bio *bio, struct block_device *bdev)
537{
538	bio_clear_flag(bio, BIO_REMAPPED);
539	if (bio->bi_bdev != bdev)
540		bio_clear_flag(bio, BIO_BPS_THROTTLED);
541	bio->bi_bdev = bdev;
542	bio_associate_blkg(bio);
543}
544
545/*
546 * BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
547 *
548 * A bio_list anchors a singly-linked list of bios chained through the bi_next
549 * member of the bio.  The bio_list also caches the last list member to allow
550 * fast access to the tail.
551 */
552struct bio_list {
553	struct bio *head;
554	struct bio *tail;
555};
556
557static inline int bio_list_empty(const struct bio_list *bl)
558{
559	return bl->head == NULL;
560}
561
562static inline void bio_list_init(struct bio_list *bl)
563{
564	bl->head = bl->tail = NULL;
565}
566
567#define BIO_EMPTY_LIST	{ NULL, NULL }
568
569#define bio_list_for_each(bio, bl) \
570	for (bio = (bl)->head; bio; bio = bio->bi_next)
571
572static inline unsigned bio_list_size(const struct bio_list *bl)
573{
574	unsigned sz = 0;
575	struct bio *bio;
576
577	bio_list_for_each(bio, bl)
578		sz++;
579
580	return sz;
581}
582
583static inline void bio_list_add(struct bio_list *bl, struct bio *bio)
584{
585	bio->bi_next = NULL;
586
587	if (bl->tail)
588		bl->tail->bi_next = bio;
589	else
590		bl->head = bio;
591
592	bl->tail = bio;
593}
594
595static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio)
596{
597	bio->bi_next = bl->head;
598
599	bl->head = bio;
600
601	if (!bl->tail)
602		bl->tail = bio;
603}
604
605static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2)
606{
607	if (!bl2->head)
608		return;
609
610	if (bl->tail)
611		bl->tail->bi_next = bl2->head;
612	else
613		bl->head = bl2->head;
614
615	bl->tail = bl2->tail;
616}
617
618static inline void bio_list_merge_head(struct bio_list *bl,
619				       struct bio_list *bl2)
620{
621	if (!bl2->head)
622		return;
623
624	if (bl->head)
625		bl2->tail->bi_next = bl->head;
626	else
627		bl->tail = bl2->tail;
628
629	bl->head = bl2->head;
630}
631
632static inline struct bio *bio_list_peek(struct bio_list *bl)
633{
634	return bl->head;
635}
636
637static inline struct bio *bio_list_pop(struct bio_list *bl)
638{
639	struct bio *bio = bl->head;
640
641	if (bio) {
642		bl->head = bl->head->bi_next;
643		if (!bl->head)
644			bl->tail = NULL;
645
646		bio->bi_next = NULL;
647	}
648
649	return bio;
650}
651
652static inline struct bio *bio_list_get(struct bio_list *bl)
653{
654	struct bio *bio = bl->head;
655
656	bl->head = bl->tail = NULL;
657
658	return bio;
659}
660
661/*
662 * Increment chain count for the bio. Make sure the CHAIN flag update
663 * is visible before the raised count.
664 */
665static inline void bio_inc_remaining(struct bio *bio)
666{
667	bio_set_flag(bio, BIO_CHAIN);
668	smp_mb__before_atomic();
669	atomic_inc(&bio->__bi_remaining);
670}
671
672/*
673 * bio_set is used to allow other portions of the IO system to
674 * allocate their own private memory pools for bio and iovec structures.
675 * These memory pools in turn all allocate from the bio_slab
676 * and the bvec_slabs[].
677 */
678#define BIO_POOL_SIZE 2
679
680struct bio_set {
681	struct kmem_cache *bio_slab;
682	unsigned int front_pad;
683
684	/*
685	 * per-cpu bio alloc cache
686	 */
687	struct bio_alloc_cache __percpu *cache;
688
689	mempool_t bio_pool;
690	mempool_t bvec_pool;
691#if defined(CONFIG_BLK_DEV_INTEGRITY)
692	mempool_t bio_integrity_pool;
693	mempool_t bvec_integrity_pool;
694#endif
695
696	unsigned int back_pad;
697	/*
698	 * Deadlock avoidance for stacking block drivers: see comments in
699	 * bio_alloc_bioset() for details
700	 */
701	spinlock_t		rescue_lock;
702	struct bio_list		rescue_list;
703	struct work_struct	rescue_work;
704	struct workqueue_struct	*rescue_workqueue;
705
706	/*
707	 * Hot un-plug notifier for the per-cpu cache, if used
708	 */
709	struct hlist_node cpuhp_dead;
710};
711
712static inline bool bioset_initialized(struct bio_set *bs)
713{
714	return bs->bio_slab != NULL;
715}
716
717#if defined(CONFIG_BLK_DEV_INTEGRITY)
718
719#define bip_for_each_vec(bvl, bip, iter)				\
720	for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter)
721
722#define bio_for_each_integrity_vec(_bvl, _bio, _iter)			\
723	for_each_bio(_bio)						\
724		bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)
725
726int bio_integrity_map_user(struct bio *bio, void __user *ubuf, ssize_t len, u32 seed);
727extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
728extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
729extern bool bio_integrity_prep(struct bio *);
730extern void bio_integrity_advance(struct bio *, unsigned int);
731extern void bio_integrity_trim(struct bio *);
732extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
733extern int bioset_integrity_create(struct bio_set *, int);
734extern void bioset_integrity_free(struct bio_set *);
735extern void bio_integrity_init(void);
736
737#else /* CONFIG_BLK_DEV_INTEGRITY */
738
739static inline void *bio_integrity(struct bio *bio)
740{
741	return NULL;
742}
743
744static inline int bioset_integrity_create(struct bio_set *bs, int pool_size)
745{
746	return 0;
747}
748
749static inline void bioset_integrity_free (struct bio_set *bs)
750{
751	return;
752}
753
754static inline bool bio_integrity_prep(struct bio *bio)
755{
756	return true;
757}
758
759static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
760				      gfp_t gfp_mask)
761{
762	return 0;
763}
764
765static inline void bio_integrity_advance(struct bio *bio,
766					 unsigned int bytes_done)
767{
768	return;
769}
770
771static inline void bio_integrity_trim(struct bio *bio)
772{
773	return;
774}
775
776static inline void bio_integrity_init(void)
777{
778	return;
779}
780
781static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
782{
783	return false;
784}
785
786static inline void *bio_integrity_alloc(struct bio * bio, gfp_t gfp,
787								unsigned int nr)
788{
789	return ERR_PTR(-EINVAL);
790}
791
792static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
793					unsigned int len, unsigned int offset)
794{
795	return 0;
796}
797
798static inline int bio_integrity_map_user(struct bio *bio, void __user *ubuf,
799					 ssize_t len, u32 seed)
800{
801	return -EINVAL;
802}
803
804#endif /* CONFIG_BLK_DEV_INTEGRITY */
805
806/*
807 * Mark a bio as polled. Note that for async polled IO, the caller must
808 * expect -EWOULDBLOCK if we cannot allocate a request (or other resources).
809 * We cannot block waiting for requests on polled IO, as those completions
810 * must be found by the caller. This is different than IRQ driven IO, where
811 * it's safe to wait for IO to complete.
812 */
813static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb)
814{
815	bio->bi_opf |= REQ_POLLED;
816	if (kiocb->ki_flags & IOCB_NOWAIT)
817		bio->bi_opf |= REQ_NOWAIT;
818}
819
820static inline void bio_clear_polled(struct bio *bio)
821{
822	bio->bi_opf &= ~REQ_POLLED;
823}
824
825struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
826		unsigned int nr_pages, blk_opf_t opf, gfp_t gfp);
827
828#endif /* __LINUX_BIO_H */
829