1#ifndef _LINUX_BLKDEV_H 2#define _LINUX_BLKDEV_H 3 4#ifdef CONFIG_BLOCK 5 6#include <linux/sched.h> 7#include <linux/major.h> 8#include <linux/genhd.h> 9#include <linux/list.h> 10#include <linux/timer.h> 11#include <linux/workqueue.h> 12#include <linux/pagemap.h> 13#include <linux/backing-dev.h> 14#include <linux/wait.h> 15#include <linux/mempool.h> 16#include <linux/bio.h> 17#include <linux/module.h> 18#include <linux/stringify.h> 19#include <linux/gfp.h> 20#include <linux/bsg.h> 21#include <linux/smp.h> 22 23#include <asm/scatterlist.h> 24 25struct scsi_ioctl_command; 26 27struct request_queue; 28struct elevator_queue; 29struct request_pm_state; 30struct blk_trace; 31struct request; 32struct sg_io_hdr; 33 34#define BLKDEV_MIN_RQ 4 35#define BLKDEV_MAX_RQ 128 /* Default maximum */ 36 37struct request; 38typedef void (rq_end_io_fn)(struct request *, int); 39 40struct request_list { 41 /* 42 * count[], starved[], and wait[] are indexed by 43 * BLK_RW_SYNC/BLK_RW_ASYNC 44 */ 45 int count[2]; 46 int starved[2]; 47 int elvpriv; 48 mempool_t *rq_pool; 49 wait_queue_head_t wait[2]; 50}; 51 52/* 53 * request command types 54 */ 55enum rq_cmd_type_bits { 56 REQ_TYPE_FS = 1, /* fs request */ 57 REQ_TYPE_BLOCK_PC, /* scsi command */ 58 REQ_TYPE_SENSE, /* sense request */ 59 REQ_TYPE_PM_SUSPEND, /* suspend request */ 60 REQ_TYPE_PM_RESUME, /* resume request */ 61 REQ_TYPE_PM_SHUTDOWN, /* shutdown request */ 62 REQ_TYPE_SPECIAL, /* driver defined type */ 63 /* 64 * for ATA/ATAPI devices. this really doesn't belong here, ide should 65 * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver 66 * private REQ_LB opcodes to differentiate what type of request this is 67 */ 68 REQ_TYPE_ATA_TASKFILE, 69 REQ_TYPE_ATA_PC, 70}; 71 72#define BLK_MAX_CDB 16 73 74/* 75 * try to put the fields that are referenced together in the same cacheline. 76 * if you modify this structure, be sure to check block/blk-core.c:rq_init() 77 * as well! 78 */ 79struct request { 80 struct list_head queuelist; 81 struct call_single_data csd; 82 83 struct request_queue *q; 84 85 unsigned int cmd_flags; 86 enum rq_cmd_type_bits cmd_type; 87 unsigned long atomic_flags; 88 89 int cpu; 90 91 /* the following two fields are internal, NEVER access directly */ 92 unsigned int __data_len; /* total data len */ 93 sector_t __sector; /* sector cursor */ 94 95 struct bio *bio; 96 struct bio *biotail; 97 98 struct hlist_node hash; /* merge hash */ 99 /* 100 * The rb_node is only used inside the io scheduler, requests 101 * are pruned when moved to the dispatch queue. So let the 102 * completion_data share space with the rb_node. 103 */ 104 union { 105 struct rb_node rb_node; /* sort/lookup */ 106 void *completion_data; 107 }; 108 109 /* 110 * Three pointers are available for the IO schedulers, if they need 111 * more they have to dynamically allocate it. 112 */ 113 void *elevator_private; 114 void *elevator_private2; 115 void *elevator_private3; 116 117 struct gendisk *rq_disk; 118 unsigned long start_time; 119#ifdef CONFIG_BLK_CGROUP 120 unsigned long long start_time_ns; 121 unsigned long long io_start_time_ns; /* when passed to hardware */ 122#endif 123 /* Number of scatter-gather DMA addr+len pairs after 124 * physical address coalescing is performed. 125 */ 126 unsigned short nr_phys_segments; 127 128 unsigned short ioprio; 129 130 int ref_count; 131 132 void *special; /* opaque pointer available for LLD use */ 133 char *buffer; /* kaddr of the current segment if available */ 134 135 int tag; 136 int errors; 137 138 /* 139 * when request is used as a packet command carrier 140 */ 141 unsigned char __cmd[BLK_MAX_CDB]; 142 unsigned char *cmd; 143 unsigned short cmd_len; 144 145 unsigned int extra_len; /* length of alignment and padding */ 146 unsigned int sense_len; 147 unsigned int resid_len; /* residual count */ 148 void *sense; 149 150 unsigned long deadline; 151 struct list_head timeout_list; 152 unsigned int timeout; 153 int retries; 154 155 /* 156 * completion callback. 157 */ 158 rq_end_io_fn *end_io; 159 void *end_io_data; 160 161 /* for bidi */ 162 struct request *next_rq; 163}; 164 165static inline unsigned short req_get_ioprio(struct request *req) 166{ 167 return req->ioprio; 168} 169 170/* 171 * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME 172 * requests. Some step values could eventually be made generic. 173 */ 174struct request_pm_state 175{ 176 /* PM state machine step value, currently driver specific */ 177 int pm_step; 178 /* requested PM state value (S1, S2, S3, S4, ...) */ 179 u32 pm_state; 180 void* data; /* for driver use */ 181}; 182 183#include <linux/elevator.h> 184 185typedef void (request_fn_proc) (struct request_queue *q); 186typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); 187typedef int (prep_rq_fn) (struct request_queue *, struct request *); 188typedef void (unprep_rq_fn) (struct request_queue *, struct request *); 189typedef void (unplug_fn) (struct request_queue *); 190 191struct bio_vec; 192struct bvec_merge_data { 193 struct block_device *bi_bdev; 194 sector_t bi_sector; 195 unsigned bi_size; 196 unsigned long bi_rw; 197}; 198typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *, 199 struct bio_vec *); 200typedef void (softirq_done_fn)(struct request *); 201typedef int (dma_drain_needed_fn)(struct request *); 202typedef int (lld_busy_fn) (struct request_queue *q); 203 204enum blk_eh_timer_return { 205 BLK_EH_NOT_HANDLED, 206 BLK_EH_HANDLED, 207 BLK_EH_RESET_TIMER, 208}; 209 210typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *); 211 212enum blk_queue_state { 213 Queue_down, 214 Queue_up, 215}; 216 217struct blk_queue_tag { 218 struct request **tag_index; /* map of busy tags */ 219 unsigned long *tag_map; /* bit map of free/busy tags */ 220 int busy; /* current depth */ 221 int max_depth; /* what we will send to device */ 222 int real_max_depth; /* what the array can hold */ 223 atomic_t refcnt; /* map can be shared */ 224}; 225 226#define BLK_SCSI_MAX_CMDS (256) 227#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) 228 229struct queue_limits { 230 unsigned long bounce_pfn; 231 unsigned long seg_boundary_mask; 232 233 unsigned int max_hw_sectors; 234 unsigned int max_sectors; 235 unsigned int max_segment_size; 236 unsigned int physical_block_size; 237 unsigned int alignment_offset; 238 unsigned int io_min; 239 unsigned int io_opt; 240 unsigned int max_discard_sectors; 241 unsigned int discard_granularity; 242 unsigned int discard_alignment; 243 244 unsigned short logical_block_size; 245 unsigned short max_segments; 246 247 unsigned char misaligned; 248 unsigned char discard_misaligned; 249 unsigned char cluster; 250 signed char discard_zeroes_data; 251}; 252 253struct request_queue 254{ 255 /* 256 * Together with queue_head for cacheline sharing 257 */ 258 struct list_head queue_head; 259 struct request *last_merge; 260 struct elevator_queue *elevator; 261 262 /* 263 * the queue request freelist, one for reads and one for writes 264 */ 265 struct request_list rq; 266 267 request_fn_proc *request_fn; 268 make_request_fn *make_request_fn; 269 prep_rq_fn *prep_rq_fn; 270 unprep_rq_fn *unprep_rq_fn; 271 unplug_fn *unplug_fn; 272 merge_bvec_fn *merge_bvec_fn; 273 softirq_done_fn *softirq_done_fn; 274 rq_timed_out_fn *rq_timed_out_fn; 275 dma_drain_needed_fn *dma_drain_needed; 276 lld_busy_fn *lld_busy_fn; 277 278 /* 279 * Dispatch queue sorting 280 */ 281 sector_t end_sector; 282 struct request *boundary_rq; 283 284 /* 285 * Auto-unplugging state 286 */ 287 struct timer_list unplug_timer; 288 int unplug_thresh; /* After this many requests */ 289 unsigned long unplug_delay; /* After this many jiffies */ 290 struct work_struct unplug_work; 291 292 struct backing_dev_info backing_dev_info; 293 294 /* 295 * The queue owner gets to use this for whatever they like. 296 * ll_rw_blk doesn't touch it. 297 */ 298 void *queuedata; 299 300 /* 301 * queue needs bounce pages for pages above this limit 302 */ 303 gfp_t bounce_gfp; 304 305 /* 306 * various queue flags, see QUEUE_* below 307 */ 308 unsigned long queue_flags; 309 310 /* 311 * protects queue structures from reentrancy. ->__queue_lock should 312 * _never_ be used directly, it is queue private. always use 313 * ->queue_lock. 314 */ 315 spinlock_t __queue_lock; 316 spinlock_t *queue_lock; 317 318 /* 319 * queue kobject 320 */ 321 struct kobject kobj; 322 323 /* 324 * queue settings 325 */ 326 unsigned long nr_requests; /* Max # of requests */ 327 unsigned int nr_congestion_on; 328 unsigned int nr_congestion_off; 329 unsigned int nr_batching; 330 331 void *dma_drain_buffer; 332 unsigned int dma_drain_size; 333 unsigned int dma_pad_mask; 334 unsigned int dma_alignment; 335 336 struct blk_queue_tag *queue_tags; 337 struct list_head tag_busy_list; 338 339 unsigned int nr_sorted; 340 unsigned int in_flight[2]; 341 342 unsigned int rq_timeout; 343 struct timer_list timeout; 344 struct list_head timeout_list; 345 346 struct queue_limits limits; 347 348 /* 349 * sg stuff 350 */ 351 unsigned int sg_timeout; 352 unsigned int sg_reserved_size; 353 int node; 354#ifdef CONFIG_BLK_DEV_IO_TRACE 355 struct blk_trace *blk_trace; 356#endif 357 /* 358 * reserved for flush operations 359 */ 360 unsigned int ordered, next_ordered, ordseq; 361 int orderr, ordcolor; 362 struct request pre_flush_rq, bar_rq, post_flush_rq; 363 struct request *orig_bar_rq; 364 365 struct mutex sysfs_lock; 366 367#if defined(CONFIG_BLK_DEV_BSG) 368 struct bsg_class_device bsg_dev; 369#endif 370}; 371 372#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 373#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ 374#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ 375#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ 376#define QUEUE_FLAG_DEAD 5 /* queue being torn down */ 377#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ 378#define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ 379#define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */ 380#define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */ 381#define QUEUE_FLAG_NOMERGES 10 /* disable merge attempts */ 382#define QUEUE_FLAG_SAME_COMP 11 /* force complete on same CPU */ 383#define QUEUE_FLAG_FAIL_IO 12 /* fake timeout */ 384#define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */ 385#define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */ 386#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 387#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ 388#define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */ 389#define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */ 390#define QUEUE_FLAG_ADD_RANDOM 18 /* Contributes to random pool */ 391#define QUEUE_FLAG_SECDISCARD 19 /* supports SECDISCARD */ 392 393#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 394 (1 << QUEUE_FLAG_STACKABLE) | \ 395 (1 << QUEUE_FLAG_SAME_COMP) | \ 396 (1 << QUEUE_FLAG_ADD_RANDOM)) 397 398static inline int queue_is_locked(struct request_queue *q) 399{ 400#ifdef CONFIG_SMP 401 spinlock_t *lock = q->queue_lock; 402 return lock && spin_is_locked(lock); 403#else 404 return 1; 405#endif 406} 407 408static inline void queue_flag_set_unlocked(unsigned int flag, 409 struct request_queue *q) 410{ 411 __set_bit(flag, &q->queue_flags); 412} 413 414static inline int queue_flag_test_and_clear(unsigned int flag, 415 struct request_queue *q) 416{ 417 WARN_ON_ONCE(!queue_is_locked(q)); 418 419 if (test_bit(flag, &q->queue_flags)) { 420 __clear_bit(flag, &q->queue_flags); 421 return 1; 422 } 423 424 return 0; 425} 426 427static inline int queue_flag_test_and_set(unsigned int flag, 428 struct request_queue *q) 429{ 430 WARN_ON_ONCE(!queue_is_locked(q)); 431 432 if (!test_bit(flag, &q->queue_flags)) { 433 __set_bit(flag, &q->queue_flags); 434 return 0; 435 } 436 437 return 1; 438} 439 440static inline void queue_flag_set(unsigned int flag, struct request_queue *q) 441{ 442 WARN_ON_ONCE(!queue_is_locked(q)); 443 __set_bit(flag, &q->queue_flags); 444} 445 446static inline void queue_flag_clear_unlocked(unsigned int flag, 447 struct request_queue *q) 448{ 449 __clear_bit(flag, &q->queue_flags); 450} 451 452static inline int queue_in_flight(struct request_queue *q) 453{ 454 return q->in_flight[0] + q->in_flight[1]; 455} 456 457static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) 458{ 459 WARN_ON_ONCE(!queue_is_locked(q)); 460 __clear_bit(flag, &q->queue_flags); 461} 462 463enum { 464 /* 465 * Hardbarrier is supported with one of the following methods. 466 * 467 * NONE : hardbarrier unsupported 468 * DRAIN : ordering by draining is enough 469 * DRAIN_FLUSH : ordering by draining w/ pre and post flushes 470 * DRAIN_FUA : ordering by draining w/ pre flush and FUA write 471 * TAG : ordering by tag is enough 472 * TAG_FLUSH : ordering by tag w/ pre and post flushes 473 * TAG_FUA : ordering by tag w/ pre flush and FUA write 474 */ 475 QUEUE_ORDERED_BY_DRAIN = 0x01, 476 QUEUE_ORDERED_BY_TAG = 0x02, 477 QUEUE_ORDERED_DO_PREFLUSH = 0x10, 478 QUEUE_ORDERED_DO_BAR = 0x20, 479 QUEUE_ORDERED_DO_POSTFLUSH = 0x40, 480 QUEUE_ORDERED_DO_FUA = 0x80, 481 482 QUEUE_ORDERED_NONE = 0x00, 483 484 QUEUE_ORDERED_DRAIN = QUEUE_ORDERED_BY_DRAIN | 485 QUEUE_ORDERED_DO_BAR, 486 QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN | 487 QUEUE_ORDERED_DO_PREFLUSH | 488 QUEUE_ORDERED_DO_POSTFLUSH, 489 QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN | 490 QUEUE_ORDERED_DO_PREFLUSH | 491 QUEUE_ORDERED_DO_FUA, 492 493 QUEUE_ORDERED_TAG = QUEUE_ORDERED_BY_TAG | 494 QUEUE_ORDERED_DO_BAR, 495 QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG | 496 QUEUE_ORDERED_DO_PREFLUSH | 497 QUEUE_ORDERED_DO_POSTFLUSH, 498 QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG | 499 QUEUE_ORDERED_DO_PREFLUSH | 500 QUEUE_ORDERED_DO_FUA, 501 502 /* 503 * Ordered operation sequence 504 */ 505 QUEUE_ORDSEQ_STARTED = 0x01, /* flushing in progress */ 506 QUEUE_ORDSEQ_DRAIN = 0x02, /* waiting for the queue to be drained */ 507 QUEUE_ORDSEQ_PREFLUSH = 0x04, /* pre-flushing in progress */ 508 QUEUE_ORDSEQ_BAR = 0x08, /* original barrier req in progress */ 509 QUEUE_ORDSEQ_POSTFLUSH = 0x10, /* post-flushing in progress */ 510 QUEUE_ORDSEQ_DONE = 0x20, 511}; 512 513#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) 514#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 515#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 516#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 517#define blk_queue_noxmerges(q) \ 518 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) 519#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) 520#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) 521#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) 522#define blk_queue_flushing(q) ((q)->ordseq) 523#define blk_queue_stackable(q) \ 524 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) 525#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) 526#define blk_queue_secdiscard(q) (blk_queue_discard(q) && \ 527 test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags)) 528 529#define blk_noretry_request(rq) \ 530 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ 531 REQ_FAILFAST_DRIVER)) 532 533#define blk_account_rq(rq) \ 534 (((rq)->cmd_flags & REQ_STARTED) && \ 535 ((rq)->cmd_type == REQ_TYPE_FS || \ 536 ((rq)->cmd_flags & REQ_DISCARD))) 537 538#define blk_pm_request(rq) \ 539 ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \ 540 (rq)->cmd_type == REQ_TYPE_PM_RESUME) 541 542#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) 543#define blk_bidi_rq(rq) ((rq)->next_rq != NULL) 544/* rq->queuelist of dequeued request must be list_empty() */ 545#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) 546 547#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 548 549#define rq_data_dir(rq) ((rq)->cmd_flags & 1) 550 551static inline unsigned int blk_queue_cluster(struct request_queue *q) 552{ 553 return q->limits.cluster; 554} 555 556/* 557 * We regard a request as sync, if either a read or a sync write 558 */ 559static inline bool rw_is_sync(unsigned int rw_flags) 560{ 561 return !(rw_flags & REQ_WRITE) || (rw_flags & REQ_SYNC); 562} 563 564static inline bool rq_is_sync(struct request *rq) 565{ 566 return rw_is_sync(rq->cmd_flags); 567} 568 569static inline int blk_queue_full(struct request_queue *q, int sync) 570{ 571 if (sync) 572 return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags); 573 return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags); 574} 575 576static inline void blk_set_queue_full(struct request_queue *q, int sync) 577{ 578 if (sync) 579 queue_flag_set(QUEUE_FLAG_SYNCFULL, q); 580 else 581 queue_flag_set(QUEUE_FLAG_ASYNCFULL, q); 582} 583 584static inline void blk_clear_queue_full(struct request_queue *q, int sync) 585{ 586 if (sync) 587 queue_flag_clear(QUEUE_FLAG_SYNCFULL, q); 588 else 589 queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q); 590} 591 592 593/* 594 * mergeable request must not have _NOMERGE or _BARRIER bit set, nor may 595 * it already be started by driver. 596 */ 597#define RQ_NOMERGE_FLAGS \ 598 (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER) 599#define rq_mergeable(rq) \ 600 (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ 601 (((rq)->cmd_flags & REQ_DISCARD) || \ 602 (rq)->cmd_type == REQ_TYPE_FS)) 603 604/* 605 * q->prep_rq_fn return values 606 */ 607#define BLKPREP_OK 0 /* serve it */ 608#define BLKPREP_KILL 1 /* fatal error, kill */ 609#define BLKPREP_DEFER 2 /* leave on queue */ 610 611extern unsigned long blk_max_low_pfn, blk_max_pfn; 612 613/* 614 * standard bounce addresses: 615 * 616 * BLK_BOUNCE_HIGH : bounce all highmem pages 617 * BLK_BOUNCE_ANY : don't bounce anything 618 * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary 619 */ 620 621#if BITS_PER_LONG == 32 622#define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT) 623#else 624#define BLK_BOUNCE_HIGH -1ULL 625#endif 626#define BLK_BOUNCE_ANY (-1ULL) 627#define BLK_BOUNCE_ISA (DMA_BIT_MASK(24)) 628 629/* 630 * default timeout for SG_IO if none specified 631 */ 632#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) 633#define BLK_MIN_SG_TIMEOUT (7 * HZ) 634 635#ifdef CONFIG_BOUNCE 636extern int init_emergency_isa_pool(void); 637extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); 638#else 639static inline int init_emergency_isa_pool(void) 640{ 641 return 0; 642} 643static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) 644{ 645} 646#endif /* CONFIG_MMU */ 647 648struct rq_map_data { 649 struct page **pages; 650 int page_order; 651 int nr_entries; 652 unsigned long offset; 653 int null_mapped; 654 int from_user; 655}; 656 657struct req_iterator { 658 int i; 659 struct bio *bio; 660}; 661 662/* This should not be used directly - use rq_for_each_segment */ 663#define for_each_bio(_bio) \ 664 for (; _bio; _bio = _bio->bi_next) 665#define __rq_for_each_bio(_bio, rq) \ 666 if ((rq->bio)) \ 667 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) 668 669#define rq_for_each_segment(bvl, _rq, _iter) \ 670 __rq_for_each_bio(_iter.bio, _rq) \ 671 bio_for_each_segment(bvl, _iter.bio, _iter.i) 672 673#define rq_iter_last(rq, _iter) \ 674 (_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1) 675 676#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 677# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" 678#endif 679#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 680extern void rq_flush_dcache_pages(struct request *rq); 681#else 682static inline void rq_flush_dcache_pages(struct request *rq) 683{ 684} 685#endif 686 687extern int blk_register_queue(struct gendisk *disk); 688extern void blk_unregister_queue(struct gendisk *disk); 689extern void register_disk(struct gendisk *dev); 690extern void generic_make_request(struct bio *bio); 691extern void blk_rq_init(struct request_queue *q, struct request *rq); 692extern void blk_put_request(struct request *); 693extern void __blk_put_request(struct request_queue *, struct request *); 694extern struct request *blk_get_request(struct request_queue *, int, gfp_t); 695extern struct request *blk_make_request(struct request_queue *, struct bio *, 696 gfp_t); 697extern void blk_insert_request(struct request_queue *, struct request *, int, void *); 698extern void blk_requeue_request(struct request_queue *, struct request *); 699extern void blk_add_request_payload(struct request *rq, struct page *page, 700 unsigned int len); 701extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); 702extern int blk_lld_busy(struct request_queue *q); 703extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 704 struct bio_set *bs, gfp_t gfp_mask, 705 int (*bio_ctr)(struct bio *, struct bio *, void *), 706 void *data); 707extern void blk_rq_unprep_clone(struct request *rq); 708extern int blk_insert_cloned_request(struct request_queue *q, 709 struct request *rq); 710extern void blk_plug_device(struct request_queue *); 711extern void blk_plug_device_unlocked(struct request_queue *); 712extern int blk_remove_plug(struct request_queue *); 713extern void blk_recount_segments(struct request_queue *, struct bio *); 714extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, 715 unsigned int, void __user *); 716extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, 717 struct scsi_ioctl_command __user *); 718 719/* 720 * A queue has just exitted congestion. Note this in the global counter of 721 * congested queues, and wake up anyone who was waiting for requests to be 722 * put back. 723 */ 724static inline void blk_clear_queue_congested(struct request_queue *q, int sync) 725{ 726 clear_bdi_congested(&q->backing_dev_info, sync); 727} 728 729/* 730 * A queue has just entered congestion. Flag that in the queue's VM-visible 731 * state flags and increment the global gounter of congested queues. 732 */ 733static inline void blk_set_queue_congested(struct request_queue *q, int sync) 734{ 735 set_bdi_congested(&q->backing_dev_info, sync); 736} 737 738extern void blk_start_queue(struct request_queue *q); 739extern void blk_stop_queue(struct request_queue *q); 740extern void blk_sync_queue(struct request_queue *q); 741extern void __blk_stop_queue(struct request_queue *q); 742extern void __blk_run_queue(struct request_queue *); 743extern void blk_run_queue(struct request_queue *); 744extern int blk_rq_map_user(struct request_queue *, struct request *, 745 struct rq_map_data *, void __user *, unsigned long, 746 gfp_t); 747extern int blk_rq_unmap_user(struct bio *); 748extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); 749extern int blk_rq_map_user_iov(struct request_queue *, struct request *, 750 struct rq_map_data *, struct sg_iovec *, int, 751 unsigned int, gfp_t); 752extern int blk_execute_rq(struct request_queue *, struct gendisk *, 753 struct request *, int); 754extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, 755 struct request *, int, rq_end_io_fn *); 756extern void blk_unplug(struct request_queue *q); 757 758static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 759{ 760 return bdev->bd_disk->queue; 761} 762 763/* 764 * blk_rq_pos() : the current sector 765 * blk_rq_bytes() : bytes left in the entire request 766 * blk_rq_cur_bytes() : bytes left in the current segment 767 * blk_rq_err_bytes() : bytes left till the next error boundary 768 * blk_rq_sectors() : sectors left in the entire request 769 * blk_rq_cur_sectors() : sectors left in the current segment 770 */ 771static inline sector_t blk_rq_pos(const struct request *rq) 772{ 773 return rq->__sector; 774} 775 776static inline unsigned int blk_rq_bytes(const struct request *rq) 777{ 778 return rq->__data_len; 779} 780 781static inline int blk_rq_cur_bytes(const struct request *rq) 782{ 783 return rq->bio ? bio_cur_bytes(rq->bio) : 0; 784} 785 786extern unsigned int blk_rq_err_bytes(const struct request *rq); 787 788static inline unsigned int blk_rq_sectors(const struct request *rq) 789{ 790 return blk_rq_bytes(rq) >> 9; 791} 792 793static inline unsigned int blk_rq_cur_sectors(const struct request *rq) 794{ 795 return blk_rq_cur_bytes(rq) >> 9; 796} 797 798/* 799 * Request issue related functions. 800 */ 801extern struct request *blk_peek_request(struct request_queue *q); 802extern void blk_start_request(struct request *rq); 803extern struct request *blk_fetch_request(struct request_queue *q); 804 805/* 806 * Request completion related functions. 807 * 808 * blk_update_request() completes given number of bytes and updates 809 * the request without completing it. 810 * 811 * blk_end_request() and friends. __blk_end_request() must be called 812 * with the request queue spinlock acquired. 813 * 814 * Several drivers define their own end_request and call 815 * blk_end_request() for parts of the original function. 816 * This prevents code duplication in drivers. 817 */ 818extern bool blk_update_request(struct request *rq, int error, 819 unsigned int nr_bytes); 820extern bool blk_end_request(struct request *rq, int error, 821 unsigned int nr_bytes); 822extern void blk_end_request_all(struct request *rq, int error); 823extern bool blk_end_request_cur(struct request *rq, int error); 824extern bool blk_end_request_err(struct request *rq, int error); 825extern bool __blk_end_request(struct request *rq, int error, 826 unsigned int nr_bytes); 827extern void __blk_end_request_all(struct request *rq, int error); 828extern bool __blk_end_request_cur(struct request *rq, int error); 829extern bool __blk_end_request_err(struct request *rq, int error); 830 831extern void blk_complete_request(struct request *); 832extern void __blk_complete_request(struct request *); 833extern void blk_abort_request(struct request *); 834extern void blk_abort_queue(struct request_queue *); 835extern void blk_unprep_request(struct request *); 836 837/* 838 * Access functions for manipulating queue properties 839 */ 840extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, 841 spinlock_t *lock, int node_id); 842extern struct request_queue *blk_init_allocated_queue_node(struct request_queue *, 843 request_fn_proc *, 844 spinlock_t *, int node_id); 845extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); 846extern struct request_queue *blk_init_allocated_queue(struct request_queue *, 847 request_fn_proc *, spinlock_t *); 848extern void blk_cleanup_queue(struct request_queue *); 849extern void blk_queue_make_request(struct request_queue *, make_request_fn *); 850extern void blk_queue_bounce_limit(struct request_queue *, u64); 851extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); 852extern void blk_queue_max_segments(struct request_queue *, unsigned short); 853extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 854extern void blk_queue_max_discard_sectors(struct request_queue *q, 855 unsigned int max_discard_sectors); 856extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); 857extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); 858extern void blk_queue_alignment_offset(struct request_queue *q, 859 unsigned int alignment); 860extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); 861extern void blk_queue_io_min(struct request_queue *q, unsigned int min); 862extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); 863extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); 864extern void blk_set_default_limits(struct queue_limits *lim); 865extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 866 sector_t offset); 867extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, 868 sector_t offset); 869extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, 870 sector_t offset); 871extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); 872extern void blk_queue_dma_pad(struct request_queue *, unsigned int); 873extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); 874extern int blk_queue_dma_drain(struct request_queue *q, 875 dma_drain_needed_fn *dma_drain_needed, 876 void *buf, unsigned int size); 877extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); 878extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 879extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); 880extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn); 881extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); 882extern void blk_queue_dma_alignment(struct request_queue *, int); 883extern void blk_queue_update_dma_alignment(struct request_queue *, int); 884extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); 885extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); 886extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 887extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 888extern int blk_queue_ordered(struct request_queue *, unsigned); 889extern bool blk_do_ordered(struct request_queue *, struct request **); 890extern unsigned blk_ordered_cur_seq(struct request_queue *); 891extern unsigned blk_ordered_req_seq(struct request *); 892extern bool blk_ordered_complete_seq(struct request_queue *, unsigned, int); 893 894extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); 895extern void blk_dump_rq_flags(struct request *, char *); 896extern void generic_unplug_device(struct request_queue *); 897extern long nr_blockdev_pages(void); 898 899int blk_get_queue(struct request_queue *); 900struct request_queue *blk_alloc_queue(gfp_t); 901struct request_queue *blk_alloc_queue_node(gfp_t, int); 902extern void blk_put_queue(struct request_queue *); 903 904/* 905 * tag stuff 906 */ 907#define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED) 908extern int blk_queue_start_tag(struct request_queue *, struct request *); 909extern struct request *blk_queue_find_tag(struct request_queue *, int); 910extern void blk_queue_end_tag(struct request_queue *, struct request *); 911extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *); 912extern void blk_queue_free_tags(struct request_queue *); 913extern int blk_queue_resize_tags(struct request_queue *, int); 914extern void blk_queue_invalidate_tags(struct request_queue *); 915extern struct blk_queue_tag *blk_init_tags(int); 916extern void blk_free_tags(struct blk_queue_tag *); 917 918static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, 919 int tag) 920{ 921 if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) 922 return NULL; 923 return bqt->tag_index[tag]; 924} 925enum{ 926 BLKDEV_WAIT, /* wait for completion */ 927 BLKDEV_BARRIER, /* issue request with barrier */ 928 BLKDEV_SECURE, /* secure discard */ 929}; 930#define BLKDEV_IFL_WAIT (1 << BLKDEV_WAIT) 931#define BLKDEV_IFL_BARRIER (1 << BLKDEV_BARRIER) 932#define BLKDEV_IFL_SECURE (1 << BLKDEV_SECURE) 933extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *, 934 unsigned long); 935extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 936 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); 937extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 938 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); 939static inline int sb_issue_discard(struct super_block *sb, 940 sector_t block, sector_t nr_blocks) 941{ 942 block <<= (sb->s_blocksize_bits - 9); 943 nr_blocks <<= (sb->s_blocksize_bits - 9); 944 return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_NOFS, 945 BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER); 946} 947 948extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); 949 950enum blk_default_limits { 951 BLK_MAX_SEGMENTS = 128, 952 BLK_SAFE_MAX_SECTORS = 255, 953 BLK_DEF_MAX_SECTORS = 1024, 954 BLK_MAX_SEGMENT_SIZE = 65536, 955 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, 956}; 957 958#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) 959 960static inline unsigned long queue_bounce_pfn(struct request_queue *q) 961{ 962 return q->limits.bounce_pfn; 963} 964 965static inline unsigned long queue_segment_boundary(struct request_queue *q) 966{ 967 return q->limits.seg_boundary_mask; 968} 969 970static inline unsigned int queue_max_sectors(struct request_queue *q) 971{ 972 return q->limits.max_sectors; 973} 974 975static inline unsigned int queue_max_hw_sectors(struct request_queue *q) 976{ 977 return q->limits.max_hw_sectors; 978} 979 980static inline unsigned short queue_max_segments(struct request_queue *q) 981{ 982 return q->limits.max_segments; 983} 984 985static inline unsigned int queue_max_segment_size(struct request_queue *q) 986{ 987 return q->limits.max_segment_size; 988} 989 990static inline unsigned short queue_logical_block_size(struct request_queue *q) 991{ 992 int retval = 512; 993 994 if (q && q->limits.logical_block_size) 995 retval = q->limits.logical_block_size; 996 997 return retval; 998} 999 1000static inline unsigned short bdev_logical_block_size(struct block_device *bdev) 1001{ 1002 return queue_logical_block_size(bdev_get_queue(bdev)); 1003} 1004 1005static inline unsigned int queue_physical_block_size(struct request_queue *q) 1006{ 1007 return q->limits.physical_block_size; 1008} 1009 1010static inline unsigned int bdev_physical_block_size(struct block_device *bdev) 1011{ 1012 return queue_physical_block_size(bdev_get_queue(bdev)); 1013} 1014 1015static inline unsigned int queue_io_min(struct request_queue *q) 1016{ 1017 return q->limits.io_min; 1018} 1019 1020static inline int bdev_io_min(struct block_device *bdev) 1021{ 1022 return queue_io_min(bdev_get_queue(bdev)); 1023} 1024 1025static inline unsigned int queue_io_opt(struct request_queue *q) 1026{ 1027 return q->limits.io_opt; 1028} 1029 1030static inline int bdev_io_opt(struct block_device *bdev) 1031{ 1032 return queue_io_opt(bdev_get_queue(bdev)); 1033} 1034 1035static inline int queue_alignment_offset(struct request_queue *q) 1036{ 1037 if (q->limits.misaligned) 1038 return -1; 1039 1040 return q->limits.alignment_offset; 1041} 1042 1043static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) 1044{ 1045 unsigned int granularity = max(lim->physical_block_size, lim->io_min); 1046 unsigned int alignment = (sector << 9) & (granularity - 1); 1047 1048 return (granularity + lim->alignment_offset - alignment) 1049 & (granularity - 1); 1050} 1051 1052static inline int bdev_alignment_offset(struct block_device *bdev) 1053{ 1054 struct request_queue *q = bdev_get_queue(bdev); 1055 1056 if (q->limits.misaligned) 1057 return -1; 1058 1059 if (bdev != bdev->bd_contains) 1060 return bdev->bd_part->alignment_offset; 1061 1062 return q->limits.alignment_offset; 1063} 1064 1065static inline int queue_discard_alignment(struct request_queue *q) 1066{ 1067 if (q->limits.discard_misaligned) 1068 return -1; 1069 1070 return q->limits.discard_alignment; 1071} 1072 1073static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector) 1074{ 1075 unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1); 1076 1077 return (lim->discard_granularity + lim->discard_alignment - alignment) 1078 & (lim->discard_granularity - 1); 1079} 1080 1081static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) 1082{ 1083 if (q->limits.discard_zeroes_data == 1) 1084 return 1; 1085 1086 return 0; 1087} 1088 1089static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev) 1090{ 1091 return queue_discard_zeroes_data(bdev_get_queue(bdev)); 1092} 1093 1094static inline int queue_dma_alignment(struct request_queue *q) 1095{ 1096 return q ? q->dma_alignment : 511; 1097} 1098 1099static inline int blk_rq_aligned(struct request_queue *q, void *addr, 1100 unsigned int len) 1101{ 1102 unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; 1103 return !((unsigned long)addr & alignment) && !(len & alignment); 1104} 1105 1106/* assumes size > 256 */ 1107static inline unsigned int blksize_bits(unsigned int size) 1108{ 1109 unsigned int bits = 8; 1110 do { 1111 bits++; 1112 size >>= 1; 1113 } while (size > 256); 1114 return bits; 1115} 1116 1117static inline unsigned int block_size(struct block_device *bdev) 1118{ 1119 return bdev->bd_block_size; 1120} 1121 1122typedef struct {struct page *v;} Sector; 1123 1124unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); 1125 1126static inline void put_dev_sector(Sector p) 1127{ 1128 page_cache_release(p.v); 1129} 1130 1131struct work_struct; 1132int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); 1133 1134#ifdef CONFIG_BLK_CGROUP 1135/* 1136 * This should not be using sched_clock(). A real patch is in progress 1137 * to fix this up, until that is in place we need to disable preemption 1138 * around sched_clock() in this function and set_io_start_time_ns(). 1139 */ 1140static inline void set_start_time_ns(struct request *req) 1141{ 1142 preempt_disable(); 1143 req->start_time_ns = sched_clock(); 1144 preempt_enable(); 1145} 1146 1147static inline void set_io_start_time_ns(struct request *req) 1148{ 1149 preempt_disable(); 1150 req->io_start_time_ns = sched_clock(); 1151 preempt_enable(); 1152} 1153 1154static inline uint64_t rq_start_time_ns(struct request *req) 1155{ 1156 return req->start_time_ns; 1157} 1158 1159static inline uint64_t rq_io_start_time_ns(struct request *req) 1160{ 1161 return req->io_start_time_ns; 1162} 1163#else 1164static inline void set_start_time_ns(struct request *req) {} 1165static inline void set_io_start_time_ns(struct request *req) {} 1166static inline uint64_t rq_start_time_ns(struct request *req) 1167{ 1168 return 0; 1169} 1170static inline uint64_t rq_io_start_time_ns(struct request *req) 1171{ 1172 return 0; 1173} 1174#endif 1175 1176#define MODULE_ALIAS_BLOCKDEV(major,minor) \ 1177 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 1178#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ 1179 MODULE_ALIAS("block-major-" __stringify(major) "-*") 1180 1181#if defined(CONFIG_BLK_DEV_INTEGRITY) 1182 1183#define INTEGRITY_FLAG_READ 2 /* verify data integrity on read */ 1184#define INTEGRITY_FLAG_WRITE 4 /* generate data integrity on write */ 1185 1186struct blk_integrity_exchg { 1187 void *prot_buf; 1188 void *data_buf; 1189 sector_t sector; 1190 unsigned int data_size; 1191 unsigned short sector_size; 1192 const char *disk_name; 1193}; 1194 1195typedef void (integrity_gen_fn) (struct blk_integrity_exchg *); 1196typedef int (integrity_vrfy_fn) (struct blk_integrity_exchg *); 1197typedef void (integrity_set_tag_fn) (void *, void *, unsigned int); 1198typedef void (integrity_get_tag_fn) (void *, void *, unsigned int); 1199 1200struct blk_integrity { 1201 integrity_gen_fn *generate_fn; 1202 integrity_vrfy_fn *verify_fn; 1203 integrity_set_tag_fn *set_tag_fn; 1204 integrity_get_tag_fn *get_tag_fn; 1205 1206 unsigned short flags; 1207 unsigned short tuple_size; 1208 unsigned short sector_size; 1209 unsigned short tag_size; 1210 1211 const char *name; 1212 1213 struct kobject kobj; 1214}; 1215 1216extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); 1217extern void blk_integrity_unregister(struct gendisk *); 1218extern int blk_integrity_compare(struct gendisk *, struct gendisk *); 1219extern int blk_rq_map_integrity_sg(struct request *, struct scatterlist *); 1220extern int blk_rq_count_integrity_sg(struct request *); 1221 1222static inline 1223struct blk_integrity *bdev_get_integrity(struct block_device *bdev) 1224{ 1225 return bdev->bd_disk->integrity; 1226} 1227 1228static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) 1229{ 1230 return disk->integrity; 1231} 1232 1233static inline int blk_integrity_rq(struct request *rq) 1234{ 1235 if (rq->bio == NULL) 1236 return 0; 1237 1238 return bio_integrity(rq->bio); 1239} 1240 1241#else /* CONFIG_BLK_DEV_INTEGRITY */ 1242 1243#define blk_integrity_rq(rq) (0) 1244#define blk_rq_count_integrity_sg(a) (0) 1245#define blk_rq_map_integrity_sg(a, b) (0) 1246#define bdev_get_integrity(a) (0) 1247#define blk_get_integrity(a) (0) 1248#define blk_integrity_compare(a, b) (0) 1249#define blk_integrity_register(a, b) (0) 1250#define blk_integrity_unregister(a) do { } while (0); 1251 1252#endif /* CONFIG_BLK_DEV_INTEGRITY */ 1253 1254struct block_device_operations { 1255 int (*open) (struct block_device *, fmode_t); 1256 int (*release) (struct gendisk *, fmode_t); 1257 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1258 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1259 int (*direct_access) (struct block_device *, sector_t, 1260 void **, unsigned long *); 1261 int (*media_changed) (struct gendisk *); 1262 void (*unlock_native_capacity) (struct gendisk *); 1263 int (*revalidate_disk) (struct gendisk *); 1264 int (*getgeo)(struct block_device *, struct hd_geometry *); 1265 /* this callback is with swap_lock and sometimes page table lock held */ 1266 void (*swap_slot_free_notify) (struct block_device *, unsigned long); 1267 struct module *owner; 1268}; 1269 1270extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, 1271 unsigned long); 1272#else /* CONFIG_BLOCK */ 1273/* 1274 * stubs for when the block layer is configured out 1275 */ 1276#define buffer_heads_over_limit 0 1277 1278static inline long nr_blockdev_pages(void) 1279{ 1280 return 0; 1281} 1282 1283#endif /* CONFIG_BLOCK */ 1284 1285#endif 1286