1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright (C) 2011 Lawrence Livermore National Security, LLC.
24 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
25 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
26 * LLNL-CODE-403049.
27 */
28
29#ifndef _ZFS_BLKDEV_H
30#define	_ZFS_BLKDEV_H
31
32#include <linux/blkdev.h>
33#include <linux/backing-dev.h>
34#include <linux/hdreg.h>
35#include <linux/major.h>
36#include <linux/msdos_fs.h>	/* for SECTOR_* */
37#include <linux/bio.h>
38
39#ifdef HAVE_BLK_MQ
40#include <linux/blk-mq.h>
41#endif
42
43#ifndef HAVE_BLK_QUEUE_FLAG_SET
44static inline void
45blk_queue_flag_set(unsigned int flag, struct request_queue *q)
46{
47	queue_flag_set(flag, q);
48}
49#endif
50
51#ifndef HAVE_BLK_QUEUE_FLAG_CLEAR
52static inline void
53blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
54{
55	queue_flag_clear(flag, q);
56}
57#endif
58
59/*
60 * 4.7 API,
61 * The blk_queue_write_cache() interface has replaced blk_queue_flush()
62 * interface.  However, the new interface is GPL-only thus we implement
63 * our own trivial wrapper when the GPL-only version is detected.
64 *
65 * 2.6.36 - 4.6 API,
66 * The blk_queue_flush() interface has replaced blk_queue_ordered()
67 * interface.  However, while the old interface was available to all the
68 * new one is GPL-only.   Thus if the GPL-only version is detected we
69 * implement our own trivial helper.
70 */
71static inline void
72blk_queue_set_write_cache(struct request_queue *q, bool wc, bool fua)
73{
74#if defined(HAVE_BLK_QUEUE_WRITE_CACHE_GPL_ONLY)
75	if (wc)
76		blk_queue_flag_set(QUEUE_FLAG_WC, q);
77	else
78		blk_queue_flag_clear(QUEUE_FLAG_WC, q);
79	if (fua)
80		blk_queue_flag_set(QUEUE_FLAG_FUA, q);
81	else
82		blk_queue_flag_clear(QUEUE_FLAG_FUA, q);
83#elif defined(HAVE_BLK_QUEUE_WRITE_CACHE)
84	blk_queue_write_cache(q, wc, fua);
85#elif defined(HAVE_BLK_QUEUE_FLUSH_GPL_ONLY)
86	if (wc)
87		q->flush_flags |= REQ_FLUSH;
88	if (fua)
89		q->flush_flags |= REQ_FUA;
90#elif defined(HAVE_BLK_QUEUE_FLUSH)
91	blk_queue_flush(q, (wc ? REQ_FLUSH : 0) | (fua ? REQ_FUA : 0));
92#else
93#error "Unsupported kernel"
94#endif
95}
96
97/*
98 * Detect if a device has a write cache. Used to set the intial value for the
99 * vdev nowritecache flag.
100 *
101 * 4.10: QUEUE_FLAG_WC added. Initialised by the driver, but can be changed
102 *       later by the operator. If not set, kernel will return flush requests
103 *       immediately without doing anything.
104 * 6.6: QUEUE_FLAG_HW_WC added. Initialised by the driver, can't be changed.
105 *      Only controls if the operator is allowed to change _WC. Initial version
106 *      buggy; aliased to QUEUE_FLAG_FUA, so unuseable.
107 * 6.6.10, 6.7: QUEUE_FLAG_HW_WC fixed.
108 *
109 * Older than 4.10 we just assume write cache, and let the normal flush fail
110 * detection apply.
111 */
112static inline boolean_t
113zfs_bdev_has_write_cache(struct block_device *bdev)
114{
115#if defined(QUEUE_FLAG_HW_WC) && QUEUE_FLAG_HW_WC != QUEUE_FLAG_FUA
116	return (test_bit(QUEUE_FLAG_HW_WC, &bdev_get_queue(bdev)->queue_flags));
117#elif defined(QUEUE_FLAG_WC)
118	return (test_bit(QUEUE_FLAG_WC, &bdev_get_queue(bdev)->queue_flags));
119#else
120	return (B_TRUE);
121#endif
122}
123
124static inline void
125blk_queue_set_read_ahead(struct request_queue *q, unsigned long ra_pages)
126{
127#if !defined(HAVE_BLK_QUEUE_UPDATE_READAHEAD) && \
128	!defined(HAVE_DISK_UPDATE_READAHEAD)
129#ifdef HAVE_BLK_QUEUE_BDI_DYNAMIC
130	q->backing_dev_info->ra_pages = ra_pages;
131#else
132	q->backing_dev_info.ra_pages = ra_pages;
133#endif
134#endif
135}
136
137#ifdef HAVE_BIO_BVEC_ITER
138#define	BIO_BI_SECTOR(bio)	(bio)->bi_iter.bi_sector
139#define	BIO_BI_SIZE(bio)	(bio)->bi_iter.bi_size
140#define	BIO_BI_IDX(bio)		(bio)->bi_iter.bi_idx
141#define	BIO_BI_SKIP(bio)	(bio)->bi_iter.bi_bvec_done
142#define	bio_for_each_segment4(bv, bvp, b, i)	\
143	bio_for_each_segment((bv), (b), (i))
144typedef struct bvec_iter bvec_iterator_t;
145#else
146#define	BIO_BI_SECTOR(bio)	(bio)->bi_sector
147#define	BIO_BI_SIZE(bio)	(bio)->bi_size
148#define	BIO_BI_IDX(bio)		(bio)->bi_idx
149#define	BIO_BI_SKIP(bio)	(0)
150#define	bio_for_each_segment4(bv, bvp, b, i)	\
151	bio_for_each_segment((bvp), (b), (i))
152typedef int bvec_iterator_t;
153#endif
154
155static inline void
156bio_set_flags_failfast(struct block_device *bdev, int *flags, bool dev,
157    bool transport, bool driver)
158{
159#ifdef CONFIG_BUG
160	/*
161	 * Disable FAILFAST for loopback devices because of the
162	 * following incorrect BUG_ON() in loop_make_request().
163	 * This support is also disabled for md devices because the
164	 * test suite layers md devices on top of loopback devices.
165	 * This may be removed when the loopback driver is fixed.
166	 *
167	 *   BUG_ON(!lo || (rw != READ && rw != WRITE));
168	 */
169	if ((MAJOR(bdev->bd_dev) == LOOP_MAJOR) ||
170	    (MAJOR(bdev->bd_dev) == MD_MAJOR))
171		return;
172
173#ifdef BLOCK_EXT_MAJOR
174	if (MAJOR(bdev->bd_dev) == BLOCK_EXT_MAJOR)
175		return;
176#endif /* BLOCK_EXT_MAJOR */
177#endif /* CONFIG_BUG */
178
179	if (dev)
180		*flags |= REQ_FAILFAST_DEV;
181	if (transport)
182		*flags |= REQ_FAILFAST_TRANSPORT;
183	if (driver)
184		*flags |= REQ_FAILFAST_DRIVER;
185}
186
187/*
188 * Maximum disk label length, it may be undefined for some kernels.
189 */
190#if !defined(DISK_NAME_LEN)
191#define	DISK_NAME_LEN	32
192#endif /* DISK_NAME_LEN */
193
194#ifdef HAVE_BIO_BI_STATUS
195static inline int
196bi_status_to_errno(blk_status_t status)
197{
198	switch (status)	{
199	case BLK_STS_OK:
200		return (0);
201	case BLK_STS_NOTSUPP:
202		return (EOPNOTSUPP);
203	case BLK_STS_TIMEOUT:
204		return (ETIMEDOUT);
205	case BLK_STS_NOSPC:
206		return (ENOSPC);
207	case BLK_STS_TRANSPORT:
208		return (ENOLINK);
209	case BLK_STS_TARGET:
210		return (EREMOTEIO);
211#ifdef HAVE_BLK_STS_RESV_CONFLICT
212	case BLK_STS_RESV_CONFLICT:
213#else
214	case BLK_STS_NEXUS:
215#endif
216		return (EBADE);
217	case BLK_STS_MEDIUM:
218		return (ENODATA);
219	case BLK_STS_PROTECTION:
220		return (EILSEQ);
221	case BLK_STS_RESOURCE:
222		return (ENOMEM);
223	case BLK_STS_AGAIN:
224		return (EAGAIN);
225	case BLK_STS_IOERR:
226		return (EIO);
227	default:
228		return (EIO);
229	}
230}
231
232static inline blk_status_t
233errno_to_bi_status(int error)
234{
235	switch (error) {
236	case 0:
237		return (BLK_STS_OK);
238	case EOPNOTSUPP:
239		return (BLK_STS_NOTSUPP);
240	case ETIMEDOUT:
241		return (BLK_STS_TIMEOUT);
242	case ENOSPC:
243		return (BLK_STS_NOSPC);
244	case ENOLINK:
245		return (BLK_STS_TRANSPORT);
246	case EREMOTEIO:
247		return (BLK_STS_TARGET);
248	case EBADE:
249#ifdef HAVE_BLK_STS_RESV_CONFLICT
250		return (BLK_STS_RESV_CONFLICT);
251#else
252		return (BLK_STS_NEXUS);
253#endif
254	case ENODATA:
255		return (BLK_STS_MEDIUM);
256	case EILSEQ:
257		return (BLK_STS_PROTECTION);
258	case ENOMEM:
259		return (BLK_STS_RESOURCE);
260	case EAGAIN:
261		return (BLK_STS_AGAIN);
262	case EIO:
263		return (BLK_STS_IOERR);
264	default:
265		return (BLK_STS_IOERR);
266	}
267}
268#endif /* HAVE_BIO_BI_STATUS */
269
270/*
271 * 4.3 API change
272 * The bio_endio() prototype changed slightly.  These are helper
273 * macro's to ensure the prototype and invocation are handled.
274 */
275#ifdef HAVE_1ARG_BIO_END_IO_T
276#ifdef HAVE_BIO_BI_STATUS
277#define	BIO_END_IO_ERROR(bio)		bi_status_to_errno(bio->bi_status)
278#define	BIO_END_IO_PROTO(fn, x, z)	static void fn(struct bio *x)
279#define	BIO_END_IO(bio, error)		bio_set_bi_status(bio, error)
280static inline void
281bio_set_bi_status(struct bio *bio, int error)
282{
283	ASSERT3S(error, <=, 0);
284	bio->bi_status = errno_to_bi_status(-error);
285	bio_endio(bio);
286}
287#else
288#define	BIO_END_IO_ERROR(bio)		(-(bio->bi_error))
289#define	BIO_END_IO_PROTO(fn, x, z)	static void fn(struct bio *x)
290#define	BIO_END_IO(bio, error)		bio_set_bi_error(bio, error)
291static inline void
292bio_set_bi_error(struct bio *bio, int error)
293{
294	ASSERT3S(error, <=, 0);
295	bio->bi_error = error;
296	bio_endio(bio);
297}
298#endif /* HAVE_BIO_BI_STATUS */
299
300#else
301#define	BIO_END_IO_PROTO(fn, x, z)	static void fn(struct bio *x, int z)
302#define	BIO_END_IO(bio, error)		bio_endio(bio, error);
303#endif /* HAVE_1ARG_BIO_END_IO_T */
304
305/*
306 * 5.15 MACRO,
307 *   GD_DEAD
308 *
309 * 2.6.36 - 5.14 MACRO,
310 *   GENHD_FL_UP
311 *
312 * Check the disk status and return B_TRUE if alive
313 * otherwise B_FALSE
314 */
315static inline boolean_t
316zfs_check_disk_status(struct block_device *bdev)
317{
318#if defined(GENHD_FL_UP)
319	return (!!(bdev->bd_disk->flags & GENHD_FL_UP));
320#elif defined(GD_DEAD)
321	return (!test_bit(GD_DEAD, &bdev->bd_disk->state));
322#else
323/*
324 * This is encountered if neither GENHD_FL_UP nor GD_DEAD is available in
325 * the kernel - likely due to an MACRO change that needs to be chased down.
326 */
327#error "Unsupported kernel: no usable disk status check"
328#endif
329}
330
331/*
332 * 4.1 API,
333 * 3.10.0 CentOS 7.x API,
334 *   blkdev_reread_part()
335 *
336 * For older kernels trigger a re-reading of the partition table by calling
337 * check_disk_change() which calls flush_disk() to invalidate the device.
338 *
339 * For newer kernels (as of 5.10), bdev_check_media_change is used, in favor of
340 * check_disk_change(), with the modification that invalidation is no longer
341 * forced.
342 */
343#ifdef HAVE_CHECK_DISK_CHANGE
344#define	zfs_check_media_change(bdev)	check_disk_change(bdev)
345#ifdef HAVE_BLKDEV_REREAD_PART
346#define	vdev_bdev_reread_part(bdev)	blkdev_reread_part(bdev)
347#else
348#define	vdev_bdev_reread_part(bdev)	check_disk_change(bdev)
349#endif /* HAVE_BLKDEV_REREAD_PART */
350#else
351#ifdef HAVE_BDEV_CHECK_MEDIA_CHANGE
352static inline int
353zfs_check_media_change(struct block_device *bdev)
354{
355#ifdef HAVE_BLOCK_DEVICE_OPERATIONS_REVALIDATE_DISK
356	struct gendisk *gd = bdev->bd_disk;
357	const struct block_device_operations *bdo = gd->fops;
358#endif
359
360	if (!bdev_check_media_change(bdev))
361		return (0);
362
363#ifdef HAVE_BLOCK_DEVICE_OPERATIONS_REVALIDATE_DISK
364	/*
365	 * Force revalidation, to mimic the old behavior of
366	 * check_disk_change()
367	 */
368	if (bdo->revalidate_disk)
369		bdo->revalidate_disk(gd);
370#endif
371
372	return (0);
373}
374#define	vdev_bdev_reread_part(bdev)	zfs_check_media_change(bdev)
375#elif defined(HAVE_DISK_CHECK_MEDIA_CHANGE)
376#define	vdev_bdev_reread_part(bdev)	disk_check_media_change(bdev->bd_disk)
377#define	zfs_check_media_change(bdev)	disk_check_media_change(bdev->bd_disk)
378#else
379/*
380 * This is encountered if check_disk_change() and bdev_check_media_change()
381 * are not available in the kernel - likely due to an API change that needs
382 * to be chased down.
383 */
384#error "Unsupported kernel: no usable disk change check"
385#endif /* HAVE_BDEV_CHECK_MEDIA_CHANGE */
386#endif /* HAVE_CHECK_DISK_CHANGE */
387
388/*
389 * 2.6.27 API change
390 * The function was exported for use, prior to this it existed but the
391 * symbol was not exported.
392 *
393 * 4.4.0-6.21 API change for Ubuntu
394 * lookup_bdev() gained a second argument, FMODE_*, to check inode permissions.
395 *
396 * 5.11 API change
397 * Changed to take a dev_t argument which is set on success and return a
398 * non-zero error code on failure.
399 */
400static inline int
401vdev_lookup_bdev(const char *path, dev_t *dev)
402{
403#if defined(HAVE_DEVT_LOOKUP_BDEV)
404	return (lookup_bdev(path, dev));
405#elif defined(HAVE_1ARG_LOOKUP_BDEV)
406	struct block_device *bdev = lookup_bdev(path);
407	if (IS_ERR(bdev))
408		return (PTR_ERR(bdev));
409
410	*dev = bdev->bd_dev;
411	bdput(bdev);
412
413	return (0);
414#elif defined(HAVE_MODE_LOOKUP_BDEV)
415	struct block_device *bdev = lookup_bdev(path, FMODE_READ);
416	if (IS_ERR(bdev))
417		return (PTR_ERR(bdev));
418
419	*dev = bdev->bd_dev;
420	bdput(bdev);
421
422	return (0);
423#else
424#error "Unsupported kernel"
425#endif
426}
427
428#if defined(HAVE_BLK_MODE_T)
429#define	blk_mode_is_open_write(flag)	((flag) & BLK_OPEN_WRITE)
430#else
431#define	blk_mode_is_open_write(flag)	((flag) & FMODE_WRITE)
432#endif
433
434/*
435 * Kernels without bio_set_op_attrs use bi_rw for the bio flags.
436 */
437#if !defined(HAVE_BIO_SET_OP_ATTRS)
438static inline void
439bio_set_op_attrs(struct bio *bio, unsigned rw, unsigned flags)
440{
441#if defined(HAVE_BIO_BI_OPF)
442	bio->bi_opf = rw | flags;
443#else
444	bio->bi_rw |= rw | flags;
445#endif /* HAVE_BIO_BI_OPF */
446}
447#endif
448
449/*
450 * bio_set_flush - Set the appropriate flags in a bio to guarantee
451 * data are on non-volatile media on completion.
452 *
453 * 2.6.37 - 4.8 API,
454 *   Introduce WRITE_FLUSH, WRITE_FUA, and WRITE_FLUSH_FUA flags as a
455 *   replacement for WRITE_BARRIER to allow expressing richer semantics
456 *   to the block layer.  It's up to the block layer to implement the
457 *   semantics correctly. Use the WRITE_FLUSH_FUA flag combination.
458 *
459 * 4.8 - 4.9 API,
460 *   REQ_FLUSH was renamed to REQ_PREFLUSH.  For consistency with previous
461 *   OpenZFS releases, prefer the WRITE_FLUSH_FUA flag set if it's available.
462 *
463 * 4.10 API,
464 *   The read/write flags and their modifiers, including WRITE_FLUSH,
465 *   WRITE_FUA and WRITE_FLUSH_FUA were removed from fs.h in
466 *   torvalds/linux@70fd7614 and replaced by direct flag modification
467 *   of the REQ_ flags in bio->bi_opf.  Use REQ_PREFLUSH.
468 */
469static inline void
470bio_set_flush(struct bio *bio)
471{
472#if defined(HAVE_REQ_PREFLUSH)	/* >= 4.10 */
473	bio_set_op_attrs(bio, 0, REQ_PREFLUSH | REQ_OP_WRITE);
474#elif defined(WRITE_FLUSH_FUA)	/* >= 2.6.37 and <= 4.9 */
475	bio_set_op_attrs(bio, 0, WRITE_FLUSH_FUA);
476#else
477#error	"Allowing the build will cause bio_set_flush requests to be ignored."
478#endif
479}
480
481/*
482 * 4.8 API,
483 *   REQ_OP_FLUSH
484 *
485 * 4.8-rc0 - 4.8-rc1,
486 *   REQ_PREFLUSH
487 *
488 * 2.6.36 - 4.7 API,
489 *   REQ_FLUSH
490 *
491 * in all cases but may have a performance impact for some kernels.  It
492 * has the advantage of minimizing kernel specific changes in the zvol code.
493 *
494 */
495static inline boolean_t
496bio_is_flush(struct bio *bio)
497{
498#if defined(HAVE_REQ_OP_FLUSH) && defined(HAVE_BIO_BI_OPF)
499	return ((bio_op(bio) == REQ_OP_FLUSH) || (bio->bi_opf & REQ_PREFLUSH));
500#elif defined(HAVE_REQ_PREFLUSH) && defined(HAVE_BIO_BI_OPF)
501	return (bio->bi_opf & REQ_PREFLUSH);
502#elif defined(HAVE_REQ_PREFLUSH) && !defined(HAVE_BIO_BI_OPF)
503	return (bio->bi_rw & REQ_PREFLUSH);
504#elif defined(HAVE_REQ_FLUSH)
505	return (bio->bi_rw & REQ_FLUSH);
506#else
507#error	"Unsupported kernel"
508#endif
509}
510
511/*
512 * 4.8 API,
513 *   REQ_FUA flag moved to bio->bi_opf
514 *
515 * 2.6.x - 4.7 API,
516 *   REQ_FUA
517 */
518static inline boolean_t
519bio_is_fua(struct bio *bio)
520{
521#if defined(HAVE_BIO_BI_OPF)
522	return (bio->bi_opf & REQ_FUA);
523#elif defined(REQ_FUA)
524	return (bio->bi_rw & REQ_FUA);
525#else
526#error	"Allowing the build will cause fua requests to be ignored."
527#endif
528}
529
530/*
531 * 4.8 API,
532 *   REQ_OP_DISCARD
533 *
534 * 2.6.36 - 4.7 API,
535 *   REQ_DISCARD
536 *
537 * In all cases the normal I/O path is used for discards.  The only
538 * difference is how the kernel tags individual I/Os as discards.
539 */
540static inline boolean_t
541bio_is_discard(struct bio *bio)
542{
543#if defined(HAVE_REQ_OP_DISCARD)
544	return (bio_op(bio) == REQ_OP_DISCARD);
545#elif defined(HAVE_REQ_DISCARD)
546	return (bio->bi_rw & REQ_DISCARD);
547#else
548#error "Unsupported kernel"
549#endif
550}
551
552/*
553 * 4.8 API,
554 *   REQ_OP_SECURE_ERASE
555 *
556 * 2.6.36 - 4.7 API,
557 *   REQ_SECURE
558 */
559static inline boolean_t
560bio_is_secure_erase(struct bio *bio)
561{
562#if defined(HAVE_REQ_OP_SECURE_ERASE)
563	return (bio_op(bio) == REQ_OP_SECURE_ERASE);
564#elif defined(REQ_SECURE)
565	return (bio->bi_rw & REQ_SECURE);
566#else
567	return (0);
568#endif
569}
570
571/*
572 * 2.6.33 API change
573 * Discard granularity and alignment restrictions may now be set.  For
574 * older kernels which do not support this it is safe to skip it.
575 */
576static inline void
577blk_queue_discard_granularity(struct request_queue *q, unsigned int dg)
578{
579	q->limits.discard_granularity = dg;
580}
581
582/*
583 * 5.19 API,
584 *   bdev_max_discard_sectors()
585 *
586 * 2.6.32 API,
587 *   blk_queue_discard()
588 */
589static inline boolean_t
590bdev_discard_supported(struct block_device *bdev)
591{
592#if defined(HAVE_BDEV_MAX_DISCARD_SECTORS)
593	return (bdev_max_discard_sectors(bdev) > 0 &&
594	    bdev_discard_granularity(bdev) > 0);
595#elif defined(HAVE_BLK_QUEUE_DISCARD)
596	return (blk_queue_discard(bdev_get_queue(bdev)) > 0 &&
597	    bdev_get_queue(bdev)->limits.discard_granularity > 0);
598#else
599#error "Unsupported kernel"
600#endif
601}
602
603/*
604 * 5.19 API,
605 *   bdev_max_secure_erase_sectors()
606 *
607 * 4.8 API,
608 *   blk_queue_secure_erase()
609 *
610 * 2.6.36 - 4.7 API,
611 *   blk_queue_secdiscard()
612 */
613static inline boolean_t
614bdev_secure_discard_supported(struct block_device *bdev)
615{
616#if defined(HAVE_BDEV_MAX_SECURE_ERASE_SECTORS)
617	return (!!bdev_max_secure_erase_sectors(bdev));
618#elif defined(HAVE_BLK_QUEUE_SECURE_ERASE)
619	return (!!blk_queue_secure_erase(bdev_get_queue(bdev)));
620#elif defined(HAVE_BLK_QUEUE_SECDISCARD)
621	return (!!blk_queue_secdiscard(bdev_get_queue(bdev)));
622#else
623#error "Unsupported kernel"
624#endif
625}
626
627/*
628 * A common holder for vdev_bdev_open() is used to relax the exclusive open
629 * semantics slightly.  Internal vdev disk callers may pass VDEV_HOLDER to
630 * allow them to open the device multiple times.  Other kernel callers and
631 * user space processes which don't pass this value will get EBUSY.  This is
632 * currently required for the correct operation of hot spares.
633 */
634#define	VDEV_HOLDER			((void *)0x2401de7)
635
636static inline unsigned long
637blk_generic_start_io_acct(struct request_queue *q __attribute__((unused)),
638    struct gendisk *disk __attribute__((unused)),
639    int rw __attribute__((unused)), struct bio *bio)
640{
641#if defined(HAVE_BDEV_IO_ACCT_63)
642	return (bdev_start_io_acct(bio->bi_bdev, bio_op(bio),
643	    jiffies));
644#elif defined(HAVE_BDEV_IO_ACCT_OLD)
645	return (bdev_start_io_acct(bio->bi_bdev, bio_sectors(bio),
646	    bio_op(bio), jiffies));
647#elif defined(HAVE_DISK_IO_ACCT)
648	return (disk_start_io_acct(disk, bio_sectors(bio), bio_op(bio)));
649#elif defined(HAVE_BIO_IO_ACCT)
650	return (bio_start_io_acct(bio));
651#elif defined(HAVE_GENERIC_IO_ACCT_3ARG)
652	unsigned long start_time = jiffies;
653	generic_start_io_acct(rw, bio_sectors(bio), &disk->part0);
654	return (start_time);
655#elif defined(HAVE_GENERIC_IO_ACCT_4ARG)
656	unsigned long start_time = jiffies;
657	generic_start_io_acct(q, rw, bio_sectors(bio), &disk->part0);
658	return (start_time);
659#else
660	/* Unsupported */
661	return (0);
662#endif
663}
664
665static inline void
666blk_generic_end_io_acct(struct request_queue *q __attribute__((unused)),
667    struct gendisk *disk __attribute__((unused)),
668    int rw __attribute__((unused)), struct bio *bio, unsigned long start_time)
669{
670#if defined(HAVE_BDEV_IO_ACCT_63)
671	bdev_end_io_acct(bio->bi_bdev, bio_op(bio), bio_sectors(bio),
672	    start_time);
673#elif defined(HAVE_BDEV_IO_ACCT_OLD)
674	bdev_end_io_acct(bio->bi_bdev, bio_op(bio), start_time);
675#elif defined(HAVE_DISK_IO_ACCT)
676	disk_end_io_acct(disk, bio_op(bio), start_time);
677#elif defined(HAVE_BIO_IO_ACCT)
678	bio_end_io_acct(bio, start_time);
679#elif defined(HAVE_GENERIC_IO_ACCT_3ARG)
680	generic_end_io_acct(rw, &disk->part0, start_time);
681#elif defined(HAVE_GENERIC_IO_ACCT_4ARG)
682	generic_end_io_acct(q, rw, &disk->part0, start_time);
683#endif
684}
685
686#ifndef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS
687static inline struct request_queue *
688blk_generic_alloc_queue(make_request_fn make_request, int node_id)
689{
690#if defined(HAVE_BLK_ALLOC_QUEUE_REQUEST_FN)
691	return (blk_alloc_queue(make_request, node_id));
692#elif defined(HAVE_BLK_ALLOC_QUEUE_REQUEST_FN_RH)
693	return (blk_alloc_queue_rh(make_request, node_id));
694#else
695	struct request_queue *q = blk_alloc_queue(GFP_KERNEL);
696	if (q != NULL)
697		blk_queue_make_request(q, make_request);
698
699	return (q);
700#endif
701}
702#endif /* !HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS */
703
704/*
705 * All the io_*() helper functions below can operate on a bio, or a rq, but
706 * not both.  The older submit_bio() codepath will pass a bio, and the
707 * newer blk-mq codepath will pass a rq.
708 */
709static inline int
710io_data_dir(struct bio *bio, struct request *rq)
711{
712#ifdef HAVE_BLK_MQ
713	if (rq != NULL) {
714		if (op_is_write(req_op(rq))) {
715			return (WRITE);
716		} else {
717			return (READ);
718		}
719	}
720#else
721	ASSERT3P(rq, ==, NULL);
722#endif
723	return (bio_data_dir(bio));
724}
725
726static inline int
727io_is_flush(struct bio *bio, struct request *rq)
728{
729#ifdef HAVE_BLK_MQ
730	if (rq != NULL)
731		return (req_op(rq) == REQ_OP_FLUSH);
732#else
733	ASSERT3P(rq, ==, NULL);
734#endif
735	return (bio_is_flush(bio));
736}
737
738static inline int
739io_is_discard(struct bio *bio, struct request *rq)
740{
741#ifdef HAVE_BLK_MQ
742	if (rq != NULL)
743		return (req_op(rq) == REQ_OP_DISCARD);
744#else
745	ASSERT3P(rq, ==, NULL);
746#endif
747	return (bio_is_discard(bio));
748}
749
750static inline int
751io_is_secure_erase(struct bio *bio, struct request *rq)
752{
753#ifdef HAVE_BLK_MQ
754	if (rq != NULL)
755		return (req_op(rq) == REQ_OP_SECURE_ERASE);
756#else
757	ASSERT3P(rq, ==, NULL);
758#endif
759	return (bio_is_secure_erase(bio));
760}
761
762static inline int
763io_is_fua(struct bio *bio, struct request *rq)
764{
765#ifdef HAVE_BLK_MQ
766	if (rq != NULL)
767		return (rq->cmd_flags & REQ_FUA);
768#else
769	ASSERT3P(rq, ==, NULL);
770#endif
771	return (bio_is_fua(bio));
772}
773
774
775static inline uint64_t
776io_offset(struct bio *bio, struct request *rq)
777{
778#ifdef HAVE_BLK_MQ
779	if (rq != NULL)
780		return (blk_rq_pos(rq) << 9);
781#else
782	ASSERT3P(rq, ==, NULL);
783#endif
784	return (BIO_BI_SECTOR(bio) << 9);
785}
786
787static inline uint64_t
788io_size(struct bio *bio, struct request *rq)
789{
790#ifdef HAVE_BLK_MQ
791	if (rq != NULL)
792		return (blk_rq_bytes(rq));
793#else
794	ASSERT3P(rq, ==, NULL);
795#endif
796	return (BIO_BI_SIZE(bio));
797}
798
799static inline int
800io_has_data(struct bio *bio, struct request *rq)
801{
802#ifdef HAVE_BLK_MQ
803	if (rq != NULL)
804		return (bio_has_data(rq->bio));
805#else
806	ASSERT3P(rq, ==, NULL);
807#endif
808	return (bio_has_data(bio));
809}
810#endif /* _ZFS_BLKDEV_H */
811