1/* 2 * CFQ, or complete fairness queueing, disk scheduler. 3 * 4 * Based on ideas from a previously unfinished io 5 * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli. 6 * 7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> 8 */ 9#include <linux/module.h> 10#include <linux/blkdev.h> 11#include <linux/elevator.h> 12#include <linux/rbtree.h> 13#include <linux/ioprio.h> 14 15/* 16 * tunables 17 */ 18static const int cfq_quantum = 4; /* max queue in one round of service */ 19static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 }; 20static const int cfq_back_max = 16 * 1024; /* maximum backwards seek, in KiB */ 21static const int cfq_back_penalty = 2; /* penalty of a backwards seek */ 22 23static const int cfq_slice_sync = HZ / 10; 24static int cfq_slice_async = HZ / 25; 25static const int cfq_slice_async_rq = 2; 26static int cfq_slice_idle = HZ / 125; 27 28/* 29 * grace period before allowing idle class to get disk access 30 */ 31#define CFQ_IDLE_GRACE (HZ / 10) 32 33/* 34 * below this threshold, we consider thinktime immediate 35 */ 36#define CFQ_MIN_TT (2) 37 38#define CFQ_SLICE_SCALE (5) 39 40#define RQ_CIC(rq) ((struct cfq_io_context*)(rq)->elevator_private) 41#define RQ_CFQQ(rq) ((rq)->elevator_private2) 42 43static struct kmem_cache *cfq_pool; 44static struct kmem_cache *cfq_ioc_pool; 45 46static DEFINE_PER_CPU(unsigned long, ioc_count); 47static struct completion *ioc_gone; 48 49#define CFQ_PRIO_LISTS IOPRIO_BE_NR 50#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE) 51#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT) 52 53#define ASYNC (0) 54#define SYNC (1) 55 56#define sample_valid(samples) ((samples) > 80) 57 58/* 59 * Most of our rbtree usage is for sorting with min extraction, so 60 * if we cache the leftmost node we don't have to walk down the tree 61 * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should 62 * move this into the elevator for the rq sorting as well. 63 */ 64struct cfq_rb_root { 65 struct rb_root rb; 66 struct rb_node *left; 67}; 68#define CFQ_RB_ROOT (struct cfq_rb_root) { RB_ROOT, NULL, } 69 70/* 71 * Per block device queue structure 72 */ 73struct cfq_data { 74 request_queue_t *queue; 75 76 /* 77 * rr list of queues with requests and the count of them 78 */ 79 struct cfq_rb_root service_tree; 80 unsigned int busy_queues; 81 82 int rq_in_driver; 83 int sync_flight; 84 int hw_tag; 85 86 /* 87 * idle window management 88 */ 89 struct timer_list idle_slice_timer; 90 struct work_struct unplug_work; 91 92 struct cfq_queue *active_queue; 93 struct cfq_io_context *active_cic; 94 95 struct timer_list idle_class_timer; 96 97 sector_t last_position; 98 unsigned long last_end_request; 99 100 /* 101 * tunables, see top of file 102 */ 103 unsigned int cfq_quantum; 104 unsigned int cfq_fifo_expire[2]; 105 unsigned int cfq_back_penalty; 106 unsigned int cfq_back_max; 107 unsigned int cfq_slice[2]; 108 unsigned int cfq_slice_async_rq; 109 unsigned int cfq_slice_idle; 110 111 struct list_head cic_list; 112 113 sector_t new_seek_mean; 114 u64 new_seek_total; 115}; 116 117/* 118 * Per process-grouping structure 119 */ 120struct cfq_queue { 121 /* reference count */ 122 atomic_t ref; 123 /* parent cfq_data */ 124 struct cfq_data *cfqd; 125 /* service_tree member */ 126 struct rb_node rb_node; 127 /* service_tree key */ 128 unsigned long rb_key; 129 /* sorted list of pending requests */ 130 struct rb_root sort_list; 131 /* if fifo isn't expired, next request to serve */ 132 struct request *next_rq; 133 /* requests queued in sort_list */ 134 int queued[2]; 135 /* currently allocated requests */ 136 int allocated[2]; 137 /* pending metadata requests */ 138 int meta_pending; 139 /* fifo list of requests in sort_list */ 140 struct list_head fifo; 141 142 unsigned long slice_end; 143 long slice_resid; 144 145 /* number of requests that are on the dispatch list or inside driver */ 146 int dispatched; 147 148 /* io prio of this group */ 149 unsigned short ioprio, org_ioprio; 150 unsigned short ioprio_class, org_ioprio_class; 151 152 /* various state flags, see below */ 153 unsigned int flags; 154 155 sector_t last_request_pos; 156}; 157 158enum cfqq_state_flags { 159 CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */ 160 CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */ 161 CFQ_CFQQ_FLAG_must_alloc, /* must be allowed rq alloc */ 162 CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */ 163 CFQ_CFQQ_FLAG_must_dispatch, /* must dispatch, even if expired */ 164 CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */ 165 CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */ 166 CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */ 167 CFQ_CFQQ_FLAG_queue_new, /* queue never been serviced */ 168 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */ 169 CFQ_CFQQ_FLAG_sync, /* synchronous queue */ 170}; 171 172#define CFQ_CFQQ_FNS(name) \ 173static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \ 174{ \ 175 cfqq->flags |= (1 << CFQ_CFQQ_FLAG_##name); \ 176} \ 177static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \ 178{ \ 179 cfqq->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \ 180} \ 181static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \ 182{ \ 183 return (cfqq->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \ 184} 185 186CFQ_CFQQ_FNS(on_rr); 187CFQ_CFQQ_FNS(wait_request); 188CFQ_CFQQ_FNS(must_alloc); 189CFQ_CFQQ_FNS(must_alloc_slice); 190CFQ_CFQQ_FNS(must_dispatch); 191CFQ_CFQQ_FNS(fifo_expire); 192CFQ_CFQQ_FNS(idle_window); 193CFQ_CFQQ_FNS(prio_changed); 194CFQ_CFQQ_FNS(queue_new); 195CFQ_CFQQ_FNS(slice_new); 196CFQ_CFQQ_FNS(sync); 197#undef CFQ_CFQQ_FNS 198 199static void cfq_dispatch_insert(request_queue_t *, struct request *); 200static struct cfq_queue *cfq_get_queue(struct cfq_data *, int, 201 struct task_struct *, gfp_t); 202static struct cfq_io_context *cfq_cic_rb_lookup(struct cfq_data *, 203 struct io_context *); 204 205static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic, 206 int is_sync) 207{ 208 return cic->cfqq[!!is_sync]; 209} 210 211static inline void cic_set_cfqq(struct cfq_io_context *cic, 212 struct cfq_queue *cfqq, int is_sync) 213{ 214 cic->cfqq[!!is_sync] = cfqq; 215} 216 217/* 218 * We regard a request as SYNC, if it's either a read or has the SYNC bit 219 * set (in which case it could also be direct WRITE). 220 */ 221static inline int cfq_bio_sync(struct bio *bio) 222{ 223 if (bio_data_dir(bio) == READ || bio_sync(bio)) 224 return 1; 225 226 return 0; 227} 228 229/* 230 * scheduler run of queue, if there are requests pending and no one in the 231 * driver that will restart queueing 232 */ 233static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) 234{ 235 if (cfqd->busy_queues) 236 kblockd_schedule_work(&cfqd->unplug_work); 237} 238 239static int cfq_queue_empty(request_queue_t *q) 240{ 241 struct cfq_data *cfqd = q->elevator->elevator_data; 242 243 return !cfqd->busy_queues; 244} 245 246/* 247 * Scale schedule slice based on io priority. Use the sync time slice only 248 * if a queue is marked sync and has sync io queued. A sync queue with async 249 * io only, should not get full sync slice length. 250 */ 251static inline int cfq_prio_slice(struct cfq_data *cfqd, int sync, 252 unsigned short prio) 253{ 254 const int base_slice = cfqd->cfq_slice[sync]; 255 256 WARN_ON(prio >= IOPRIO_BE_NR); 257 258 return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio)); 259} 260 261static inline int 262cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) 263{ 264 return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio); 265} 266 267static inline void 268cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) 269{ 270 cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies; 271} 272 273/* 274 * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end 275 * isn't valid until the first request from the dispatch is activated 276 * and the slice time set. 277 */ 278static inline int cfq_slice_used(struct cfq_queue *cfqq) 279{ 280 if (cfq_cfqq_slice_new(cfqq)) 281 return 0; 282 if (time_before(jiffies, cfqq->slice_end)) 283 return 0; 284 285 return 1; 286} 287 288/* 289 * Lifted from AS - choose which of rq1 and rq2 that is best served now. 290 * We choose the request that is closest to the head right now. Distance 291 * behind the head is penalized and only allowed to a certain extent. 292 */ 293static struct request * 294cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2) 295{ 296 sector_t last, s1, s2, d1 = 0, d2 = 0; 297 unsigned long back_max; 298#define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */ 299#define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */ 300 unsigned wrap = 0; /* bit mask: requests behind the disk head? */ 301 302 if (rq1 == NULL || rq1 == rq2) 303 return rq2; 304 if (rq2 == NULL) 305 return rq1; 306 307 if (rq_is_sync(rq1) && !rq_is_sync(rq2)) 308 return rq1; 309 else if (rq_is_sync(rq2) && !rq_is_sync(rq1)) 310 return rq2; 311 if (rq_is_meta(rq1) && !rq_is_meta(rq2)) 312 return rq1; 313 else if (rq_is_meta(rq2) && !rq_is_meta(rq1)) 314 return rq2; 315 316 s1 = rq1->sector; 317 s2 = rq2->sector; 318 319 last = cfqd->last_position; 320 321 /* 322 * by definition, 1KiB is 2 sectors 323 */ 324 back_max = cfqd->cfq_back_max * 2; 325 326 /* 327 * Strict one way elevator _except_ in the case where we allow 328 * short backward seeks which are biased as twice the cost of a 329 * similar forward seek. 330 */ 331 if (s1 >= last) 332 d1 = s1 - last; 333 else if (s1 + back_max >= last) 334 d1 = (last - s1) * cfqd->cfq_back_penalty; 335 else 336 wrap |= CFQ_RQ1_WRAP; 337 338 if (s2 >= last) 339 d2 = s2 - last; 340 else if (s2 + back_max >= last) 341 d2 = (last - s2) * cfqd->cfq_back_penalty; 342 else 343 wrap |= CFQ_RQ2_WRAP; 344 345 /* Found required data */ 346 347 /* 348 * By doing switch() on the bit mask "wrap" we avoid having to 349 * check two variables for all permutations: --> faster! 350 */ 351 switch (wrap) { 352 case 0: /* common case for CFQ: rq1 and rq2 not wrapped */ 353 if (d1 < d2) 354 return rq1; 355 else if (d2 < d1) 356 return rq2; 357 else { 358 if (s1 >= s2) 359 return rq1; 360 else 361 return rq2; 362 } 363 364 case CFQ_RQ2_WRAP: 365 return rq1; 366 case CFQ_RQ1_WRAP: 367 return rq2; 368 case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */ 369 default: 370 /* 371 * Since both rqs are wrapped, 372 * start with the one that's further behind head 373 * (--> only *one* back seek required), 374 * since back seek takes more time than forward. 375 */ 376 if (s1 <= s2) 377 return rq1; 378 else 379 return rq2; 380 } 381} 382 383/* 384 * The below is leftmost cache rbtree addon 385 */ 386static struct rb_node *cfq_rb_first(struct cfq_rb_root *root) 387{ 388 if (!root->left) 389 root->left = rb_first(&root->rb); 390 391 return root->left; 392} 393 394static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root) 395{ 396 if (root->left == n) 397 root->left = NULL; 398 399 rb_erase(n, &root->rb); 400 RB_CLEAR_NODE(n); 401} 402 403/* 404 * would be nice to take fifo expire time into account as well 405 */ 406static struct request * 407cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq, 408 struct request *last) 409{ 410 struct rb_node *rbnext = rb_next(&last->rb_node); 411 struct rb_node *rbprev = rb_prev(&last->rb_node); 412 struct request *next = NULL, *prev = NULL; 413 414 BUG_ON(RB_EMPTY_NODE(&last->rb_node)); 415 416 if (rbprev) 417 prev = rb_entry_rq(rbprev); 418 419 if (rbnext) 420 next = rb_entry_rq(rbnext); 421 else { 422 rbnext = rb_first(&cfqq->sort_list); 423 if (rbnext && rbnext != &last->rb_node) 424 next = rb_entry_rq(rbnext); 425 } 426 427 return cfq_choose_req(cfqd, next, prev); 428} 429 430static unsigned long cfq_slice_offset(struct cfq_data *cfqd, 431 struct cfq_queue *cfqq) 432{ 433 /* 434 * just an approximation, should be ok. 435 */ 436 return (cfqd->busy_queues - 1) * (cfq_prio_slice(cfqd, 1, 0) - 437 cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio)); 438} 439 440/* 441 * The cfqd->service_tree holds all pending cfq_queue's that have 442 * requests waiting to be processed. It is sorted in the order that 443 * we will service the queues. 444 */ 445static void cfq_service_tree_add(struct cfq_data *cfqd, 446 struct cfq_queue *cfqq, int add_front) 447{ 448 struct rb_node **p = &cfqd->service_tree.rb.rb_node; 449 struct rb_node *parent = NULL; 450 unsigned long rb_key; 451 int left; 452 453 if (!add_front) { 454 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies; 455 rb_key += cfqq->slice_resid; 456 cfqq->slice_resid = 0; 457 } else 458 rb_key = 0; 459 460 if (!RB_EMPTY_NODE(&cfqq->rb_node)) { 461 /* 462 * same position, nothing more to do 463 */ 464 if (rb_key == cfqq->rb_key) 465 return; 466 467 cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree); 468 } 469 470 left = 1; 471 while (*p) { 472 struct cfq_queue *__cfqq; 473 struct rb_node **n; 474 475 parent = *p; 476 __cfqq = rb_entry(parent, struct cfq_queue, rb_node); 477 478 /* 479 * sort RT queues first, we always want to give 480 * preference to them. IDLE queues goes to the back. 481 * after that, sort on the next service time. 482 */ 483 if (cfq_class_rt(cfqq) > cfq_class_rt(__cfqq)) 484 n = &(*p)->rb_left; 485 else if (cfq_class_rt(cfqq) < cfq_class_rt(__cfqq)) 486 n = &(*p)->rb_right; 487 else if (cfq_class_idle(cfqq) < cfq_class_idle(__cfqq)) 488 n = &(*p)->rb_left; 489 else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq)) 490 n = &(*p)->rb_right; 491 else if (rb_key < __cfqq->rb_key) 492 n = &(*p)->rb_left; 493 else 494 n = &(*p)->rb_right; 495 496 if (n == &(*p)->rb_right) 497 left = 0; 498 499 p = n; 500 } 501 502 if (left) 503 cfqd->service_tree.left = &cfqq->rb_node; 504 505 cfqq->rb_key = rb_key; 506 rb_link_node(&cfqq->rb_node, parent, p); 507 rb_insert_color(&cfqq->rb_node, &cfqd->service_tree.rb); 508} 509 510/* 511 * Update cfqq's position in the service tree. 512 */ 513static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq) 514{ 515 /* 516 * Resorting requires the cfqq to be on the RR list already. 517 */ 518 if (cfq_cfqq_on_rr(cfqq)) 519 cfq_service_tree_add(cfqd, cfqq, 0); 520} 521 522/* 523 * add to busy list of queues for service, trying to be fair in ordering 524 * the pending list according to last request service 525 */ 526static inline void 527cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) 528{ 529 BUG_ON(cfq_cfqq_on_rr(cfqq)); 530 cfq_mark_cfqq_on_rr(cfqq); 531 cfqd->busy_queues++; 532 533 cfq_resort_rr_list(cfqd, cfqq); 534} 535 536/* 537 * Called when the cfqq no longer has requests pending, remove it from 538 * the service tree. 539 */ 540static inline void 541cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) 542{ 543 BUG_ON(!cfq_cfqq_on_rr(cfqq)); 544 cfq_clear_cfqq_on_rr(cfqq); 545 546 if (!RB_EMPTY_NODE(&cfqq->rb_node)) 547 cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree); 548 549 BUG_ON(!cfqd->busy_queues); 550 cfqd->busy_queues--; 551} 552 553/* 554 * rb tree support functions 555 */ 556static inline void cfq_del_rq_rb(struct request *rq) 557{ 558 struct cfq_queue *cfqq = RQ_CFQQ(rq); 559 struct cfq_data *cfqd = cfqq->cfqd; 560 const int sync = rq_is_sync(rq); 561 562 BUG_ON(!cfqq->queued[sync]); 563 cfqq->queued[sync]--; 564 565 elv_rb_del(&cfqq->sort_list, rq); 566 567 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) 568 cfq_del_cfqq_rr(cfqd, cfqq); 569} 570 571static void cfq_add_rq_rb(struct request *rq) 572{ 573 struct cfq_queue *cfqq = RQ_CFQQ(rq); 574 struct cfq_data *cfqd = cfqq->cfqd; 575 struct request *__alias; 576 577 cfqq->queued[rq_is_sync(rq)]++; 578 579 /* 580 * looks a little odd, but the first insert might return an alias. 581 * if that happens, put the alias on the dispatch list 582 */ 583 while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL) 584 cfq_dispatch_insert(cfqd->queue, __alias); 585 586 if (!cfq_cfqq_on_rr(cfqq)) 587 cfq_add_cfqq_rr(cfqd, cfqq); 588 589 /* 590 * check if this request is a better next-serve candidate 591 */ 592 cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq); 593 BUG_ON(!cfqq->next_rq); 594} 595 596static inline void 597cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq) 598{ 599 elv_rb_del(&cfqq->sort_list, rq); 600 cfqq->queued[rq_is_sync(rq)]--; 601 cfq_add_rq_rb(rq); 602} 603 604static struct request * 605cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio) 606{ 607 struct task_struct *tsk = current; 608 struct cfq_io_context *cic; 609 struct cfq_queue *cfqq; 610 611 cic = cfq_cic_rb_lookup(cfqd, tsk->io_context); 612 if (!cic) 613 return NULL; 614 615 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); 616 if (cfqq) { 617 sector_t sector = bio->bi_sector + bio_sectors(bio); 618 619 return elv_rb_find(&cfqq->sort_list, sector); 620 } 621 622 return NULL; 623} 624 625static void cfq_activate_request(request_queue_t *q, struct request *rq) 626{ 627 struct cfq_data *cfqd = q->elevator->elevator_data; 628 629 cfqd->rq_in_driver++; 630 631 /* 632 * If the depth is larger 1, it really could be queueing. But lets 633 * make the mark a little higher - idling could still be good for 634 * low queueing, and a low queueing number could also just indicate 635 * a SCSI mid layer like behaviour where limit+1 is often seen. 636 */ 637 if (!cfqd->hw_tag && cfqd->rq_in_driver > 4) 638 cfqd->hw_tag = 1; 639 640 cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors; 641} 642 643static void cfq_deactivate_request(request_queue_t *q, struct request *rq) 644{ 645 struct cfq_data *cfqd = q->elevator->elevator_data; 646 647 WARN_ON(!cfqd->rq_in_driver); 648 cfqd->rq_in_driver--; 649} 650 651static void cfq_remove_request(struct request *rq) 652{ 653 struct cfq_queue *cfqq = RQ_CFQQ(rq); 654 655 if (cfqq->next_rq == rq) 656 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq); 657 658 list_del_init(&rq->queuelist); 659 cfq_del_rq_rb(rq); 660 661 if (rq_is_meta(rq)) { 662 WARN_ON(!cfqq->meta_pending); 663 cfqq->meta_pending--; 664 } 665} 666 667static int cfq_merge(request_queue_t *q, struct request **req, struct bio *bio) 668{ 669 struct cfq_data *cfqd = q->elevator->elevator_data; 670 struct request *__rq; 671 672 __rq = cfq_find_rq_fmerge(cfqd, bio); 673 if (__rq && elv_rq_merge_ok(__rq, bio)) { 674 *req = __rq; 675 return ELEVATOR_FRONT_MERGE; 676 } 677 678 return ELEVATOR_NO_MERGE; 679} 680 681static void cfq_merged_request(request_queue_t *q, struct request *req, 682 int type) 683{ 684 if (type == ELEVATOR_FRONT_MERGE) { 685 struct cfq_queue *cfqq = RQ_CFQQ(req); 686 687 cfq_reposition_rq_rb(cfqq, req); 688 } 689} 690 691static void 692cfq_merged_requests(request_queue_t *q, struct request *rq, 693 struct request *next) 694{ 695 /* 696 * reposition in fifo if next is older than rq 697 */ 698 if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && 699 time_before(next->start_time, rq->start_time)) 700 list_move(&rq->queuelist, &next->queuelist); 701 702 cfq_remove_request(next); 703} 704 705static int cfq_allow_merge(request_queue_t *q, struct request *rq, 706 struct bio *bio) 707{ 708 struct cfq_data *cfqd = q->elevator->elevator_data; 709 struct cfq_io_context *cic; 710 struct cfq_queue *cfqq; 711 712 /* 713 * Disallow merge of a sync bio into an async request. 714 */ 715 if (cfq_bio_sync(bio) && !rq_is_sync(rq)) 716 return 0; 717 718 /* 719 * Lookup the cfqq that this bio will be queued with. Allow 720 * merge only if rq is queued there. 721 */ 722 cic = cfq_cic_rb_lookup(cfqd, current->io_context); 723 if (!cic) 724 return 0; 725 726 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); 727 if (cfqq == RQ_CFQQ(rq)) 728 return 1; 729 730 return 0; 731} 732 733static inline void 734__cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) 735{ 736 if (cfqq) { 737 /* 738 * stop potential idle class queues waiting service 739 */ 740 del_timer(&cfqd->idle_class_timer); 741 742 cfqq->slice_end = 0; 743 cfq_clear_cfqq_must_alloc_slice(cfqq); 744 cfq_clear_cfqq_fifo_expire(cfqq); 745 cfq_mark_cfqq_slice_new(cfqq); 746 cfq_clear_cfqq_queue_new(cfqq); 747 } 748 749 cfqd->active_queue = cfqq; 750} 751 752/* 753 * current cfqq expired its slice (or was too idle), select new one 754 */ 755static void 756__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, 757 int timed_out) 758{ 759 if (cfq_cfqq_wait_request(cfqq)) 760 del_timer(&cfqd->idle_slice_timer); 761 762 cfq_clear_cfqq_must_dispatch(cfqq); 763 cfq_clear_cfqq_wait_request(cfqq); 764 765 /* 766 * store what was left of this slice, if the queue idled/timed out 767 */ 768 if (timed_out && !cfq_cfqq_slice_new(cfqq)) 769 cfqq->slice_resid = cfqq->slice_end - jiffies; 770 771 cfq_resort_rr_list(cfqd, cfqq); 772 773 if (cfqq == cfqd->active_queue) 774 cfqd->active_queue = NULL; 775 776 if (cfqd->active_cic) { 777 put_io_context(cfqd->active_cic->ioc); 778 cfqd->active_cic = NULL; 779 } 780} 781 782static inline void cfq_slice_expired(struct cfq_data *cfqd, int timed_out) 783{ 784 struct cfq_queue *cfqq = cfqd->active_queue; 785 786 if (cfqq) 787 __cfq_slice_expired(cfqd, cfqq, timed_out); 788} 789 790/* 791 * Get next queue for service. Unless we have a queue preemption, 792 * we'll simply select the first cfqq in the service tree. 793 */ 794static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd) 795{ 796 struct cfq_queue *cfqq; 797 struct rb_node *n; 798 799 if (RB_EMPTY_ROOT(&cfqd->service_tree.rb)) 800 return NULL; 801 802 n = cfq_rb_first(&cfqd->service_tree); 803 cfqq = rb_entry(n, struct cfq_queue, rb_node); 804 805 if (cfq_class_idle(cfqq)) { 806 unsigned long end; 807 808 /* 809 * if we have idle queues and no rt or be queues had 810 * pending requests, either allow immediate service if 811 * the grace period has passed or arm the idle grace 812 * timer 813 */ 814 end = cfqd->last_end_request + CFQ_IDLE_GRACE; 815 if (time_before(jiffies, end)) { 816 mod_timer(&cfqd->idle_class_timer, end); 817 cfqq = NULL; 818 } 819 } 820 821 return cfqq; 822} 823 824/* 825 * Get and set a new active queue for service. 826 */ 827static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd) 828{ 829 struct cfq_queue *cfqq; 830 831 cfqq = cfq_get_next_queue(cfqd); 832 __cfq_set_active_queue(cfqd, cfqq); 833 return cfqq; 834} 835 836static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd, 837 struct request *rq) 838{ 839 if (rq->sector >= cfqd->last_position) 840 return rq->sector - cfqd->last_position; 841 else 842 return cfqd->last_position - rq->sector; 843} 844 845static inline int cfq_rq_close(struct cfq_data *cfqd, struct request *rq) 846{ 847 struct cfq_io_context *cic = cfqd->active_cic; 848 849 if (!sample_valid(cic->seek_samples)) 850 return 0; 851 852 return cfq_dist_from_last(cfqd, rq) <= cic->seek_mean; 853} 854 855static int cfq_close_cooperator(struct cfq_data *cfq_data, 856 struct cfq_queue *cfqq) 857{ 858 /* 859 * We should notice if some of the queues are cooperating, eg 860 * working closely on the same area of the disk. In that case, 861 * we can group them together and don't waste time idling. 862 */ 863 return 0; 864} 865 866#define CIC_SEEKY(cic) ((cic)->seek_mean > (8 * 1024)) 867 868static void cfq_arm_slice_timer(struct cfq_data *cfqd) 869{ 870 struct cfq_queue *cfqq = cfqd->active_queue; 871 struct cfq_io_context *cic; 872 unsigned long sl; 873 874 WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list)); 875 WARN_ON(cfq_cfqq_slice_new(cfqq)); 876 877 /* 878 * idle is disabled, either manually or by past process history 879 */ 880 if (!cfqd->cfq_slice_idle || !cfq_cfqq_idle_window(cfqq)) 881 return; 882 883 /* 884 * task has exited, don't wait 885 */ 886 cic = cfqd->active_cic; 887 if (!cic || !cic->ioc->task) 888 return; 889 890 /* 891 * See if this prio level has a good candidate 892 */ 893 if (cfq_close_cooperator(cfqd, cfqq) && 894 (sample_valid(cic->ttime_samples) && cic->ttime_mean > 2)) 895 return; 896 897 cfq_mark_cfqq_must_dispatch(cfqq); 898 cfq_mark_cfqq_wait_request(cfqq); 899 900 /* 901 * we don't want to idle for seeks, but we do want to allow 902 * fair distribution of slice time for a process doing back-to-back 903 * seeks. so allow a little bit of time for him to submit a new rq 904 */ 905 sl = cfqd->cfq_slice_idle; 906 if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic)) 907 sl = min(sl, msecs_to_jiffies(CFQ_MIN_TT)); 908 909 mod_timer(&cfqd->idle_slice_timer, jiffies + sl); 910} 911 912/* 913 * Move request from internal lists to the request queue dispatch list. 914 */ 915static void cfq_dispatch_insert(request_queue_t *q, struct request *rq) 916{ 917 struct cfq_data *cfqd = q->elevator->elevator_data; 918 struct cfq_queue *cfqq = RQ_CFQQ(rq); 919 920 cfq_remove_request(rq); 921 cfqq->dispatched++; 922 elv_dispatch_sort(q, rq); 923 924 if (cfq_cfqq_sync(cfqq)) 925 cfqd->sync_flight++; 926} 927 928/* 929 * return expired entry, or NULL to just start from scratch in rbtree 930 */ 931static inline struct request *cfq_check_fifo(struct cfq_queue *cfqq) 932{ 933 struct cfq_data *cfqd = cfqq->cfqd; 934 struct request *rq; 935 int fifo; 936 937 if (cfq_cfqq_fifo_expire(cfqq)) 938 return NULL; 939 940 cfq_mark_cfqq_fifo_expire(cfqq); 941 942 if (list_empty(&cfqq->fifo)) 943 return NULL; 944 945 fifo = cfq_cfqq_sync(cfqq); 946 rq = rq_entry_fifo(cfqq->fifo.next); 947 948 if (time_before(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo])) 949 return NULL; 950 951 return rq; 952} 953 954static inline int 955cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq) 956{ 957 const int base_rq = cfqd->cfq_slice_async_rq; 958 959 WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR); 960 961 return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio)); 962} 963 964/* 965 * Select a queue for service. If we have a current active queue, 966 * check whether to continue servicing it, or retrieve and set a new one. 967 */ 968static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) 969{ 970 struct cfq_queue *cfqq; 971 972 cfqq = cfqd->active_queue; 973 if (!cfqq) 974 goto new_queue; 975 976 /* 977 * The active queue has run out of time, expire it and select new. 978 */ 979 if (cfq_slice_used(cfqq)) 980 goto expire; 981 982 /* 983 * The active queue has requests and isn't expired, allow it to 984 * dispatch. 985 */ 986 if (!RB_EMPTY_ROOT(&cfqq->sort_list)) 987 goto keep_queue; 988 989 /* 990 * No requests pending. If the active queue still has requests in 991 * flight or is idling for a new request, allow either of these 992 * conditions to happen (or time out) before selecting a new queue. 993 */ 994 if (timer_pending(&cfqd->idle_slice_timer) || 995 (cfqq->dispatched && cfq_cfqq_idle_window(cfqq))) { 996 cfqq = NULL; 997 goto keep_queue; 998 } 999 1000expire: 1001 cfq_slice_expired(cfqd, 0); 1002new_queue: 1003 cfqq = cfq_set_active_queue(cfqd); 1004keep_queue: 1005 return cfqq; 1006} 1007 1008/* 1009 * Dispatch some requests from cfqq, moving them to the request queue 1010 * dispatch list. 1011 */ 1012static int 1013__cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq, 1014 int max_dispatch) 1015{ 1016 int dispatched = 0; 1017 1018 BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list)); 1019 1020 do { 1021 struct request *rq; 1022 1023 /* 1024 * follow expired path, else get first next available 1025 */ 1026 if ((rq = cfq_check_fifo(cfqq)) == NULL) 1027 rq = cfqq->next_rq; 1028 1029 /* 1030 * finally, insert request into driver dispatch list 1031 */ 1032 cfq_dispatch_insert(cfqd->queue, rq); 1033 1034 dispatched++; 1035 1036 if (!cfqd->active_cic) { 1037 atomic_inc(&RQ_CIC(rq)->ioc->refcount); 1038 cfqd->active_cic = RQ_CIC(rq); 1039 } 1040 1041 if (RB_EMPTY_ROOT(&cfqq->sort_list)) 1042 break; 1043 1044 } while (dispatched < max_dispatch); 1045 1046 /* 1047 * expire an async queue immediately if it has used up its slice. idle 1048 * queue always expire after 1 dispatch round. 1049 */ 1050 if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) && 1051 dispatched >= cfq_prio_to_maxrq(cfqd, cfqq)) || 1052 cfq_class_idle(cfqq))) { 1053 cfqq->slice_end = jiffies + 1; 1054 cfq_slice_expired(cfqd, 0); 1055 } 1056 1057 return dispatched; 1058} 1059 1060static inline int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq) 1061{ 1062 int dispatched = 0; 1063 1064 while (cfqq->next_rq) { 1065 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq); 1066 dispatched++; 1067 } 1068 1069 BUG_ON(!list_empty(&cfqq->fifo)); 1070 return dispatched; 1071} 1072 1073/* 1074 * Drain our current requests. Used for barriers and when switching 1075 * io schedulers on-the-fly. 1076 */ 1077static int cfq_forced_dispatch(struct cfq_data *cfqd) 1078{ 1079 int dispatched = 0; 1080 struct rb_node *n; 1081 1082 while ((n = cfq_rb_first(&cfqd->service_tree)) != NULL) { 1083 struct cfq_queue *cfqq = rb_entry(n, struct cfq_queue, rb_node); 1084 1085 dispatched += __cfq_forced_dispatch_cfqq(cfqq); 1086 } 1087 1088 cfq_slice_expired(cfqd, 0); 1089 1090 BUG_ON(cfqd->busy_queues); 1091 1092 return dispatched; 1093} 1094 1095static int cfq_dispatch_requests(request_queue_t *q, int force) 1096{ 1097 struct cfq_data *cfqd = q->elevator->elevator_data; 1098 struct cfq_queue *cfqq; 1099 int dispatched; 1100 1101 if (!cfqd->busy_queues) 1102 return 0; 1103 1104 if (unlikely(force)) 1105 return cfq_forced_dispatch(cfqd); 1106 1107 dispatched = 0; 1108 while ((cfqq = cfq_select_queue(cfqd)) != NULL) { 1109 int max_dispatch; 1110 1111 max_dispatch = cfqd->cfq_quantum; 1112 if (cfq_class_idle(cfqq)) 1113 max_dispatch = 1; 1114 1115 if (cfqq->dispatched >= max_dispatch) { 1116 if (cfqd->busy_queues > 1) 1117 break; 1118 if (cfqq->dispatched >= 4 * max_dispatch) 1119 break; 1120 } 1121 1122 if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq)) 1123 break; 1124 1125 cfq_clear_cfqq_must_dispatch(cfqq); 1126 cfq_clear_cfqq_wait_request(cfqq); 1127 del_timer(&cfqd->idle_slice_timer); 1128 1129 dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch); 1130 } 1131 1132 return dispatched; 1133} 1134 1135/* 1136 * task holds one reference to the queue, dropped when task exits. each rq 1137 * in-flight on this queue also holds a reference, dropped when rq is freed. 1138 * 1139 * queue lock must be held here. 1140 */ 1141static void cfq_put_queue(struct cfq_queue *cfqq) 1142{ 1143 struct cfq_data *cfqd = cfqq->cfqd; 1144 1145 BUG_ON(atomic_read(&cfqq->ref) <= 0); 1146 1147 if (!atomic_dec_and_test(&cfqq->ref)) 1148 return; 1149 1150 BUG_ON(rb_first(&cfqq->sort_list)); 1151 BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]); 1152 BUG_ON(cfq_cfqq_on_rr(cfqq)); 1153 1154 if (unlikely(cfqd->active_queue == cfqq)) { 1155 __cfq_slice_expired(cfqd, cfqq, 0); 1156 cfq_schedule_dispatch(cfqd); 1157 } 1158 1159 kmem_cache_free(cfq_pool, cfqq); 1160} 1161 1162static void cfq_free_io_context(struct io_context *ioc) 1163{ 1164 struct cfq_io_context *__cic; 1165 struct rb_node *n; 1166 int freed = 0; 1167 1168 ioc->ioc_data = NULL; 1169 1170 while ((n = rb_first(&ioc->cic_root)) != NULL) { 1171 __cic = rb_entry(n, struct cfq_io_context, rb_node); 1172 rb_erase(&__cic->rb_node, &ioc->cic_root); 1173 kmem_cache_free(cfq_ioc_pool, __cic); 1174 freed++; 1175 } 1176 1177 elv_ioc_count_mod(ioc_count, -freed); 1178 1179 if (ioc_gone && !elv_ioc_count_read(ioc_count)) 1180 complete(ioc_gone); 1181} 1182 1183static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) 1184{ 1185 if (unlikely(cfqq == cfqd->active_queue)) { 1186 __cfq_slice_expired(cfqd, cfqq, 0); 1187 cfq_schedule_dispatch(cfqd); 1188 } 1189 1190 cfq_put_queue(cfqq); 1191} 1192 1193static void __cfq_exit_single_io_context(struct cfq_data *cfqd, 1194 struct cfq_io_context *cic) 1195{ 1196 list_del_init(&cic->queue_list); 1197 smp_wmb(); 1198 cic->key = NULL; 1199 1200 if (cic->cfqq[ASYNC]) { 1201 cfq_exit_cfqq(cfqd, cic->cfqq[ASYNC]); 1202 cic->cfqq[ASYNC] = NULL; 1203 } 1204 1205 if (cic->cfqq[SYNC]) { 1206 cfq_exit_cfqq(cfqd, cic->cfqq[SYNC]); 1207 cic->cfqq[SYNC] = NULL; 1208 } 1209} 1210 1211static void cfq_exit_single_io_context(struct cfq_io_context *cic) 1212{ 1213 struct cfq_data *cfqd = cic->key; 1214 1215 if (cfqd) { 1216 request_queue_t *q = cfqd->queue; 1217 1218 spin_lock_irq(q->queue_lock); 1219 __cfq_exit_single_io_context(cfqd, cic); 1220 spin_unlock_irq(q->queue_lock); 1221 } 1222} 1223 1224/* 1225 * The process that ioc belongs to has exited, we need to clean up 1226 * and put the internal structures we have that belongs to that process. 1227 */ 1228static void cfq_exit_io_context(struct io_context *ioc) 1229{ 1230 struct cfq_io_context *__cic; 1231 struct rb_node *n; 1232 1233 ioc->ioc_data = NULL; 1234 1235 /* 1236 * put the reference this task is holding to the various queues 1237 */ 1238 n = rb_first(&ioc->cic_root); 1239 while (n != NULL) { 1240 __cic = rb_entry(n, struct cfq_io_context, rb_node); 1241 1242 cfq_exit_single_io_context(__cic); 1243 n = rb_next(n); 1244 } 1245} 1246 1247static struct cfq_io_context * 1248cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) 1249{ 1250 struct cfq_io_context *cic; 1251 1252 cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask, cfqd->queue->node); 1253 if (cic) { 1254 memset(cic, 0, sizeof(*cic)); 1255 cic->last_end_request = jiffies; 1256 INIT_LIST_HEAD(&cic->queue_list); 1257 cic->dtor = cfq_free_io_context; 1258 cic->exit = cfq_exit_io_context; 1259 elv_ioc_count_inc(ioc_count); 1260 } 1261 1262 return cic; 1263} 1264 1265static void cfq_init_prio_data(struct cfq_queue *cfqq) 1266{ 1267 struct task_struct *tsk = current; 1268 int ioprio_class; 1269 1270 if (!cfq_cfqq_prio_changed(cfqq)) 1271 return; 1272 1273 ioprio_class = IOPRIO_PRIO_CLASS(tsk->ioprio); 1274 switch (ioprio_class) { 1275 default: 1276 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class); 1277 case IOPRIO_CLASS_NONE: 1278 /* 1279 * no prio set, place us in the middle of the BE classes 1280 */ 1281 if (tsk->policy == SCHED_IDLE) 1282 goto set_class_idle; 1283 cfqq->ioprio = task_nice_ioprio(tsk); 1284 cfqq->ioprio_class = IOPRIO_CLASS_BE; 1285 break; 1286 case IOPRIO_CLASS_RT: 1287 cfqq->ioprio = task_ioprio(tsk); 1288 cfqq->ioprio_class = IOPRIO_CLASS_RT; 1289 break; 1290 case IOPRIO_CLASS_BE: 1291 cfqq->ioprio = task_ioprio(tsk); 1292 cfqq->ioprio_class = IOPRIO_CLASS_BE; 1293 break; 1294 case IOPRIO_CLASS_IDLE: 1295 set_class_idle: 1296 cfqq->ioprio_class = IOPRIO_CLASS_IDLE; 1297 cfqq->ioprio = 7; 1298 cfq_clear_cfqq_idle_window(cfqq); 1299 break; 1300 } 1301 1302 /* 1303 * keep track of original prio settings in case we have to temporarily 1304 * elevate the priority of this queue 1305 */ 1306 cfqq->org_ioprio = cfqq->ioprio; 1307 cfqq->org_ioprio_class = cfqq->ioprio_class; 1308 cfq_clear_cfqq_prio_changed(cfqq); 1309} 1310 1311static inline void changed_ioprio(struct cfq_io_context *cic) 1312{ 1313 struct cfq_data *cfqd = cic->key; 1314 struct cfq_queue *cfqq; 1315 unsigned long flags; 1316 1317 if (unlikely(!cfqd)) 1318 return; 1319 1320 spin_lock_irqsave(cfqd->queue->queue_lock, flags); 1321 1322 cfqq = cic->cfqq[ASYNC]; 1323 if (cfqq) { 1324 struct cfq_queue *new_cfqq; 1325 new_cfqq = cfq_get_queue(cfqd, ASYNC, cic->ioc->task, 1326 GFP_ATOMIC); 1327 if (new_cfqq) { 1328 cic->cfqq[ASYNC] = new_cfqq; 1329 cfq_put_queue(cfqq); 1330 } 1331 } 1332 1333 cfqq = cic->cfqq[SYNC]; 1334 if (cfqq) 1335 cfq_mark_cfqq_prio_changed(cfqq); 1336 1337 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); 1338} 1339 1340static void cfq_ioc_set_ioprio(struct io_context *ioc) 1341{ 1342 struct cfq_io_context *cic; 1343 struct rb_node *n; 1344 1345 ioc->ioprio_changed = 0; 1346 1347 n = rb_first(&ioc->cic_root); 1348 while (n != NULL) { 1349 cic = rb_entry(n, struct cfq_io_context, rb_node); 1350 1351 changed_ioprio(cic); 1352 n = rb_next(n); 1353 } 1354} 1355 1356static struct cfq_queue * 1357cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk, 1358 gfp_t gfp_mask) 1359{ 1360 struct cfq_queue *cfqq, *new_cfqq = NULL; 1361 struct cfq_io_context *cic; 1362 1363retry: 1364 cic = cfq_cic_rb_lookup(cfqd, tsk->io_context); 1365 /* cic always exists here */ 1366 cfqq = cic_to_cfqq(cic, is_sync); 1367 1368 if (!cfqq) { 1369 if (new_cfqq) { 1370 cfqq = new_cfqq; 1371 new_cfqq = NULL; 1372 } else if (gfp_mask & __GFP_WAIT) { 1373 /* 1374 * Inform the allocator of the fact that we will 1375 * just repeat this allocation if it fails, to allow 1376 * the allocator to do whatever it needs to attempt to 1377 * free memory. 1378 */ 1379 spin_unlock_irq(cfqd->queue->queue_lock); 1380 new_cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask|__GFP_NOFAIL, cfqd->queue->node); 1381 spin_lock_irq(cfqd->queue->queue_lock); 1382 goto retry; 1383 } else { 1384 cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask, cfqd->queue->node); 1385 if (!cfqq) 1386 goto out; 1387 } 1388 1389 memset(cfqq, 0, sizeof(*cfqq)); 1390 1391 RB_CLEAR_NODE(&cfqq->rb_node); 1392 INIT_LIST_HEAD(&cfqq->fifo); 1393 1394 atomic_set(&cfqq->ref, 0); 1395 cfqq->cfqd = cfqd; 1396 1397 if (is_sync) { 1398 cfq_mark_cfqq_idle_window(cfqq); 1399 cfq_mark_cfqq_sync(cfqq); 1400 } 1401 1402 cfq_mark_cfqq_prio_changed(cfqq); 1403 cfq_mark_cfqq_queue_new(cfqq); 1404 1405 cfq_init_prio_data(cfqq); 1406 } 1407 1408 if (new_cfqq) 1409 kmem_cache_free(cfq_pool, new_cfqq); 1410 1411 atomic_inc(&cfqq->ref); 1412out: 1413 WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq); 1414 return cfqq; 1415} 1416 1417/* 1418 * We drop cfq io contexts lazily, so we may find a dead one. 1419 */ 1420static void 1421cfq_drop_dead_cic(struct io_context *ioc, struct cfq_io_context *cic) 1422{ 1423 WARN_ON(!list_empty(&cic->queue_list)); 1424 1425 if (ioc->ioc_data == cic) 1426 ioc->ioc_data = NULL; 1427 1428 rb_erase(&cic->rb_node, &ioc->cic_root); 1429 kmem_cache_free(cfq_ioc_pool, cic); 1430 elv_ioc_count_dec(ioc_count); 1431} 1432 1433static struct cfq_io_context * 1434cfq_cic_rb_lookup(struct cfq_data *cfqd, struct io_context *ioc) 1435{ 1436 struct rb_node *n; 1437 struct cfq_io_context *cic; 1438 void *k, *key = cfqd; 1439 1440 if (unlikely(!ioc)) 1441 return NULL; 1442 1443 /* 1444 * we maintain a last-hit cache, to avoid browsing over the tree 1445 */ 1446 cic = ioc->ioc_data; 1447 if (cic && cic->key == cfqd) 1448 return cic; 1449 1450restart: 1451 n = ioc->cic_root.rb_node; 1452 while (n) { 1453 cic = rb_entry(n, struct cfq_io_context, rb_node); 1454 /* ->key must be copied to avoid race with cfq_exit_queue() */ 1455 k = cic->key; 1456 if (unlikely(!k)) { 1457 cfq_drop_dead_cic(ioc, cic); 1458 goto restart; 1459 } 1460 1461 if (key < k) 1462 n = n->rb_left; 1463 else if (key > k) 1464 n = n->rb_right; 1465 else { 1466 ioc->ioc_data = cic; 1467 return cic; 1468 } 1469 } 1470 1471 return NULL; 1472} 1473 1474static inline void 1475cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc, 1476 struct cfq_io_context *cic) 1477{ 1478 struct rb_node **p; 1479 struct rb_node *parent; 1480 struct cfq_io_context *__cic; 1481 unsigned long flags; 1482 void *k; 1483 1484 cic->ioc = ioc; 1485 cic->key = cfqd; 1486 1487restart: 1488 parent = NULL; 1489 p = &ioc->cic_root.rb_node; 1490 while (*p) { 1491 parent = *p; 1492 __cic = rb_entry(parent, struct cfq_io_context, rb_node); 1493 /* ->key must be copied to avoid race with cfq_exit_queue() */ 1494 k = __cic->key; 1495 if (unlikely(!k)) { 1496 cfq_drop_dead_cic(ioc, __cic); 1497 goto restart; 1498 } 1499 1500 if (cic->key < k) 1501 p = &(*p)->rb_left; 1502 else if (cic->key > k) 1503 p = &(*p)->rb_right; 1504 else 1505 BUG(); 1506 } 1507 1508 rb_link_node(&cic->rb_node, parent, p); 1509 rb_insert_color(&cic->rb_node, &ioc->cic_root); 1510 1511 spin_lock_irqsave(cfqd->queue->queue_lock, flags); 1512 list_add(&cic->queue_list, &cfqd->cic_list); 1513 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); 1514} 1515 1516/* 1517 * Setup general io context and cfq io context. There can be several cfq 1518 * io contexts per general io context, if this process is doing io to more 1519 * than one device managed by cfq. 1520 */ 1521static struct cfq_io_context * 1522cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) 1523{ 1524 struct io_context *ioc = NULL; 1525 struct cfq_io_context *cic; 1526 1527 might_sleep_if(gfp_mask & __GFP_WAIT); 1528 1529 ioc = get_io_context(gfp_mask, cfqd->queue->node); 1530 if (!ioc) 1531 return NULL; 1532 1533 cic = cfq_cic_rb_lookup(cfqd, ioc); 1534 if (cic) 1535 goto out; 1536 1537 cic = cfq_alloc_io_context(cfqd, gfp_mask); 1538 if (cic == NULL) 1539 goto err; 1540 1541 cfq_cic_link(cfqd, ioc, cic); 1542out: 1543 smp_read_barrier_depends(); 1544 if (unlikely(ioc->ioprio_changed)) 1545 cfq_ioc_set_ioprio(ioc); 1546 1547 return cic; 1548err: 1549 put_io_context(ioc); 1550 return NULL; 1551} 1552 1553static void 1554cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic) 1555{ 1556 unsigned long elapsed = jiffies - cic->last_end_request; 1557 unsigned long ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle); 1558 1559 cic->ttime_samples = (7*cic->ttime_samples + 256) / 8; 1560 cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8; 1561 cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples; 1562} 1563 1564static void 1565cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic, 1566 struct request *rq) 1567{ 1568 sector_t sdist; 1569 u64 total; 1570 1571 if (cic->last_request_pos < rq->sector) 1572 sdist = rq->sector - cic->last_request_pos; 1573 else 1574 sdist = cic->last_request_pos - rq->sector; 1575 1576 if (!cic->seek_samples) { 1577 cfqd->new_seek_total = (7*cic->seek_total + (u64)256*sdist) / 8; 1578 cfqd->new_seek_mean = cfqd->new_seek_total / 256; 1579 } 1580 1581 /* 1582 * Don't allow the seek distance to get too large from the 1583 * odd fragment, pagein, etc 1584 */ 1585 if (cic->seek_samples <= 60) /* second&third seek */ 1586 sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*1024); 1587 else 1588 sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*64); 1589 1590 cic->seek_samples = (7*cic->seek_samples + 256) / 8; 1591 cic->seek_total = (7*cic->seek_total + (u64)256*sdist) / 8; 1592 total = cic->seek_total + (cic->seek_samples/2); 1593 do_div(total, cic->seek_samples); 1594 cic->seek_mean = (sector_t)total; 1595} 1596 1597/* 1598 * Disable idle window if the process thinks too long or seeks so much that 1599 * it doesn't matter 1600 */ 1601static void 1602cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, 1603 struct cfq_io_context *cic) 1604{ 1605 int enable_idle; 1606 1607 if (!cfq_cfqq_sync(cfqq)) 1608 return; 1609 1610 enable_idle = cfq_cfqq_idle_window(cfqq); 1611 1612 if (!cic->ioc->task || !cfqd->cfq_slice_idle || 1613 (cfqd->hw_tag && CIC_SEEKY(cic))) 1614 enable_idle = 0; 1615 else if (sample_valid(cic->ttime_samples)) { 1616 if (cic->ttime_mean > cfqd->cfq_slice_idle) 1617 enable_idle = 0; 1618 else 1619 enable_idle = 1; 1620 } 1621 1622 if (enable_idle) 1623 cfq_mark_cfqq_idle_window(cfqq); 1624 else 1625 cfq_clear_cfqq_idle_window(cfqq); 1626} 1627 1628/* 1629 * Check if new_cfqq should preempt the currently active queue. Return 0 for 1630 * no or if we aren't sure, a 1 will cause a preempt. 1631 */ 1632static int 1633cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, 1634 struct request *rq) 1635{ 1636 struct cfq_queue *cfqq; 1637 1638 cfqq = cfqd->active_queue; 1639 if (!cfqq) 1640 return 0; 1641 1642 if (cfq_slice_used(cfqq)) 1643 return 1; 1644 1645 if (cfq_class_idle(new_cfqq)) 1646 return 0; 1647 1648 if (cfq_class_idle(cfqq)) 1649 return 1; 1650 1651 /* 1652 * if the new request is sync, but the currently running queue is 1653 * not, let the sync request have priority. 1654 */ 1655 if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq)) 1656 return 1; 1657 1658 /* 1659 * So both queues are sync. Let the new request get disk time if 1660 * it's a metadata request and the current queue is doing regular IO. 1661 */ 1662 if (rq_is_meta(rq) && !cfqq->meta_pending) 1663 return 1; 1664 1665 if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq)) 1666 return 0; 1667 1668 /* 1669 * if this request is as-good as one we would expect from the 1670 * current cfqq, let it preempt 1671 */ 1672 if (cfq_rq_close(cfqd, rq)) 1673 return 1; 1674 1675 return 0; 1676} 1677 1678/* 1679 * cfqq preempts the active queue. if we allowed preempt with no slice left, 1680 * let it have half of its nominal slice. 1681 */ 1682static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) 1683{ 1684 cfq_slice_expired(cfqd, 1); 1685 1686 /* 1687 * Put the new queue at the front of the of the current list, 1688 * so we know that it will be selected next. 1689 */ 1690 BUG_ON(!cfq_cfqq_on_rr(cfqq)); 1691 1692 cfq_service_tree_add(cfqd, cfqq, 1); 1693 1694 cfqq->slice_end = 0; 1695 cfq_mark_cfqq_slice_new(cfqq); 1696} 1697 1698/* 1699 * Called when a new fs request (rq) is added (to cfqq). Check if there's 1700 * something we should do about it 1701 */ 1702static void 1703cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, 1704 struct request *rq) 1705{ 1706 struct cfq_io_context *cic = RQ_CIC(rq); 1707 1708 if (rq_is_meta(rq)) 1709 cfqq->meta_pending++; 1710 1711 cfq_update_io_thinktime(cfqd, cic); 1712 cfq_update_io_seektime(cfqd, cic, rq); 1713 cfq_update_idle_window(cfqd, cfqq, cic); 1714 1715 cic->last_request_pos = rq->sector + rq->nr_sectors; 1716 cfqq->last_request_pos = cic->last_request_pos; 1717 1718 if (cfqq == cfqd->active_queue) { 1719 /* 1720 * if we are waiting for a request for this queue, let it rip 1721 * immediately and flag that we must not expire this queue 1722 * just now 1723 */ 1724 if (cfq_cfqq_wait_request(cfqq)) { 1725 cfq_mark_cfqq_must_dispatch(cfqq); 1726 del_timer(&cfqd->idle_slice_timer); 1727 blk_start_queueing(cfqd->queue); 1728 } 1729 } else if (cfq_should_preempt(cfqd, cfqq, rq)) { 1730 /* 1731 * not the active queue - expire current slice if it is 1732 * idle and has expired it's mean thinktime or this new queue 1733 * has some old slice time left and is of higher priority 1734 */ 1735 cfq_preempt_queue(cfqd, cfqq); 1736 cfq_mark_cfqq_must_dispatch(cfqq); 1737 blk_start_queueing(cfqd->queue); 1738 } 1739} 1740 1741static void cfq_insert_request(request_queue_t *q, struct request *rq) 1742{ 1743 struct cfq_data *cfqd = q->elevator->elevator_data; 1744 struct cfq_queue *cfqq = RQ_CFQQ(rq); 1745 1746 cfq_init_prio_data(cfqq); 1747 1748 cfq_add_rq_rb(rq); 1749 1750 list_add_tail(&rq->queuelist, &cfqq->fifo); 1751 1752 cfq_rq_enqueued(cfqd, cfqq, rq); 1753} 1754 1755static void cfq_completed_request(request_queue_t *q, struct request *rq) 1756{ 1757 struct cfq_queue *cfqq = RQ_CFQQ(rq); 1758 struct cfq_data *cfqd = cfqq->cfqd; 1759 const int sync = rq_is_sync(rq); 1760 unsigned long now; 1761 1762 now = jiffies; 1763 1764 WARN_ON(!cfqd->rq_in_driver); 1765 WARN_ON(!cfqq->dispatched); 1766 cfqd->rq_in_driver--; 1767 cfqq->dispatched--; 1768 1769 if (cfq_cfqq_sync(cfqq)) 1770 cfqd->sync_flight--; 1771 1772 if (!cfq_class_idle(cfqq)) 1773 cfqd->last_end_request = now; 1774 1775 if (sync) 1776 RQ_CIC(rq)->last_end_request = now; 1777 1778 /* 1779 * If this is the active queue, check if it needs to be expired, 1780 * or if we want to idle in case it has no pending requests. 1781 */ 1782 if (cfqd->active_queue == cfqq) { 1783 if (cfq_cfqq_slice_new(cfqq)) { 1784 cfq_set_prio_slice(cfqd, cfqq); 1785 cfq_clear_cfqq_slice_new(cfqq); 1786 } 1787 if (cfq_slice_used(cfqq)) 1788 cfq_slice_expired(cfqd, 1); 1789 else if (sync && RB_EMPTY_ROOT(&cfqq->sort_list)) 1790 cfq_arm_slice_timer(cfqd); 1791 } 1792 1793 if (!cfqd->rq_in_driver) 1794 cfq_schedule_dispatch(cfqd); 1795} 1796 1797/* 1798 * we temporarily boost lower priority queues if they are holding fs exclusive 1799 * resources. they are boosted to normal prio (CLASS_BE/4) 1800 */ 1801static void cfq_prio_boost(struct cfq_queue *cfqq) 1802{ 1803 if (has_fs_excl()) { 1804 /* 1805 * boost idle prio on transactions that would lock out other 1806 * users of the filesystem 1807 */ 1808 if (cfq_class_idle(cfqq)) 1809 cfqq->ioprio_class = IOPRIO_CLASS_BE; 1810 if (cfqq->ioprio > IOPRIO_NORM) 1811 cfqq->ioprio = IOPRIO_NORM; 1812 } else { 1813 /* 1814 * check if we need to unboost the queue 1815 */ 1816 if (cfqq->ioprio_class != cfqq->org_ioprio_class) 1817 cfqq->ioprio_class = cfqq->org_ioprio_class; 1818 if (cfqq->ioprio != cfqq->org_ioprio) 1819 cfqq->ioprio = cfqq->org_ioprio; 1820 } 1821} 1822 1823static inline int __cfq_may_queue(struct cfq_queue *cfqq) 1824{ 1825 if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) && 1826 !cfq_cfqq_must_alloc_slice(cfqq)) { 1827 cfq_mark_cfqq_must_alloc_slice(cfqq); 1828 return ELV_MQUEUE_MUST; 1829 } 1830 1831 return ELV_MQUEUE_MAY; 1832} 1833 1834static int cfq_may_queue(request_queue_t *q, int rw) 1835{ 1836 struct cfq_data *cfqd = q->elevator->elevator_data; 1837 struct task_struct *tsk = current; 1838 struct cfq_io_context *cic; 1839 struct cfq_queue *cfqq; 1840 1841 /* 1842 * don't force setup of a queue from here, as a call to may_queue 1843 * does not necessarily imply that a request actually will be queued. 1844 * so just lookup a possibly existing queue, or return 'may queue' 1845 * if that fails 1846 */ 1847 cic = cfq_cic_rb_lookup(cfqd, tsk->io_context); 1848 if (!cic) 1849 return ELV_MQUEUE_MAY; 1850 1851 cfqq = cic_to_cfqq(cic, rw & REQ_RW_SYNC); 1852 if (cfqq) { 1853 cfq_init_prio_data(cfqq); 1854 cfq_prio_boost(cfqq); 1855 1856 return __cfq_may_queue(cfqq); 1857 } 1858 1859 return ELV_MQUEUE_MAY; 1860} 1861 1862/* 1863 * queue lock held here 1864 */ 1865static void cfq_put_request(struct request *rq) 1866{ 1867 struct cfq_queue *cfqq = RQ_CFQQ(rq); 1868 1869 if (cfqq) { 1870 const int rw = rq_data_dir(rq); 1871 1872 BUG_ON(!cfqq->allocated[rw]); 1873 cfqq->allocated[rw]--; 1874 1875 put_io_context(RQ_CIC(rq)->ioc); 1876 1877 rq->elevator_private = NULL; 1878 rq->elevator_private2 = NULL; 1879 1880 cfq_put_queue(cfqq); 1881 } 1882} 1883 1884/* 1885 * Allocate cfq data structures associated with this request. 1886 */ 1887static int 1888cfq_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask) 1889{ 1890 struct cfq_data *cfqd = q->elevator->elevator_data; 1891 struct task_struct *tsk = current; 1892 struct cfq_io_context *cic; 1893 const int rw = rq_data_dir(rq); 1894 const int is_sync = rq_is_sync(rq); 1895 struct cfq_queue *cfqq; 1896 unsigned long flags; 1897 1898 might_sleep_if(gfp_mask & __GFP_WAIT); 1899 1900 cic = cfq_get_io_context(cfqd, gfp_mask); 1901 1902 spin_lock_irqsave(q->queue_lock, flags); 1903 1904 if (!cic) 1905 goto queue_fail; 1906 1907 cfqq = cic_to_cfqq(cic, is_sync); 1908 if (!cfqq) { 1909 cfqq = cfq_get_queue(cfqd, is_sync, tsk, gfp_mask); 1910 1911 if (!cfqq) 1912 goto queue_fail; 1913 1914 cic_set_cfqq(cic, cfqq, is_sync); 1915 } 1916 1917 cfqq->allocated[rw]++; 1918 cfq_clear_cfqq_must_alloc(cfqq); 1919 atomic_inc(&cfqq->ref); 1920 1921 spin_unlock_irqrestore(q->queue_lock, flags); 1922 1923 rq->elevator_private = cic; 1924 rq->elevator_private2 = cfqq; 1925 return 0; 1926 1927queue_fail: 1928 if (cic) 1929 put_io_context(cic->ioc); 1930 1931 cfq_schedule_dispatch(cfqd); 1932 spin_unlock_irqrestore(q->queue_lock, flags); 1933 return 1; 1934} 1935 1936static void cfq_kick_queue(struct work_struct *work) 1937{ 1938 struct cfq_data *cfqd = 1939 container_of(work, struct cfq_data, unplug_work); 1940 request_queue_t *q = cfqd->queue; 1941 unsigned long flags; 1942 1943 spin_lock_irqsave(q->queue_lock, flags); 1944 blk_start_queueing(q); 1945 spin_unlock_irqrestore(q->queue_lock, flags); 1946} 1947 1948/* 1949 * Timer running if the active_queue is currently idling inside its time slice 1950 */ 1951static void cfq_idle_slice_timer(unsigned long data) 1952{ 1953 struct cfq_data *cfqd = (struct cfq_data *) data; 1954 struct cfq_queue *cfqq; 1955 unsigned long flags; 1956 int timed_out = 1; 1957 1958 spin_lock_irqsave(cfqd->queue->queue_lock, flags); 1959 1960 if ((cfqq = cfqd->active_queue) != NULL) { 1961 timed_out = 0; 1962 1963 /* 1964 * expired 1965 */ 1966 if (cfq_slice_used(cfqq)) 1967 goto expire; 1968 1969 /* 1970 * only expire and reinvoke request handler, if there are 1971 * other queues with pending requests 1972 */ 1973 if (!cfqd->busy_queues) 1974 goto out_cont; 1975 1976 /* 1977 * not expired and it has a request pending, let it dispatch 1978 */ 1979 if (!RB_EMPTY_ROOT(&cfqq->sort_list)) { 1980 cfq_mark_cfqq_must_dispatch(cfqq); 1981 goto out_kick; 1982 } 1983 } 1984expire: 1985 cfq_slice_expired(cfqd, timed_out); 1986out_kick: 1987 cfq_schedule_dispatch(cfqd); 1988out_cont: 1989 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); 1990} 1991 1992/* 1993 * Timer running if an idle class queue is waiting for service 1994 */ 1995static void cfq_idle_class_timer(unsigned long data) 1996{ 1997 struct cfq_data *cfqd = (struct cfq_data *) data; 1998 unsigned long flags, end; 1999 2000 spin_lock_irqsave(cfqd->queue->queue_lock, flags); 2001 2002 /* 2003 * race with a non-idle queue, reset timer 2004 */ 2005 end = cfqd->last_end_request + CFQ_IDLE_GRACE; 2006 if (!time_after_eq(jiffies, end)) 2007 mod_timer(&cfqd->idle_class_timer, end); 2008 else 2009 cfq_schedule_dispatch(cfqd); 2010 2011 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); 2012} 2013 2014static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) 2015{ 2016 del_timer_sync(&cfqd->idle_slice_timer); 2017 del_timer_sync(&cfqd->idle_class_timer); 2018 blk_sync_queue(cfqd->queue); 2019} 2020 2021static void cfq_exit_queue(elevator_t *e) 2022{ 2023 struct cfq_data *cfqd = e->elevator_data; 2024 request_queue_t *q = cfqd->queue; 2025 2026 cfq_shutdown_timer_wq(cfqd); 2027 2028 spin_lock_irq(q->queue_lock); 2029 2030 if (cfqd->active_queue) 2031 __cfq_slice_expired(cfqd, cfqd->active_queue, 0); 2032 2033 while (!list_empty(&cfqd->cic_list)) { 2034 struct cfq_io_context *cic = list_entry(cfqd->cic_list.next, 2035 struct cfq_io_context, 2036 queue_list); 2037 2038 __cfq_exit_single_io_context(cfqd, cic); 2039 } 2040 2041 spin_unlock_irq(q->queue_lock); 2042 2043 cfq_shutdown_timer_wq(cfqd); 2044 2045 kfree(cfqd); 2046} 2047 2048static void *cfq_init_queue(request_queue_t *q) 2049{ 2050 struct cfq_data *cfqd; 2051 2052 cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node); 2053 if (!cfqd) 2054 return NULL; 2055 2056 memset(cfqd, 0, sizeof(*cfqd)); 2057 2058 cfqd->service_tree = CFQ_RB_ROOT; 2059 INIT_LIST_HEAD(&cfqd->cic_list); 2060 2061 cfqd->queue = q; 2062 2063 init_timer(&cfqd->idle_slice_timer); 2064 cfqd->idle_slice_timer.function = cfq_idle_slice_timer; 2065 cfqd->idle_slice_timer.data = (unsigned long) cfqd; 2066 2067 init_timer(&cfqd->idle_class_timer); 2068 cfqd->idle_class_timer.function = cfq_idle_class_timer; 2069 cfqd->idle_class_timer.data = (unsigned long) cfqd; 2070 2071 INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); 2072 2073 cfqd->cfq_quantum = cfq_quantum; 2074 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; 2075 cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1]; 2076 cfqd->cfq_back_max = cfq_back_max; 2077 cfqd->cfq_back_penalty = cfq_back_penalty; 2078 cfqd->cfq_slice[0] = cfq_slice_async; 2079 cfqd->cfq_slice[1] = cfq_slice_sync; 2080 cfqd->cfq_slice_async_rq = cfq_slice_async_rq; 2081 cfqd->cfq_slice_idle = cfq_slice_idle; 2082 2083 return cfqd; 2084} 2085 2086static void cfq_slab_kill(void) 2087{ 2088 if (cfq_pool) 2089 kmem_cache_destroy(cfq_pool); 2090 if (cfq_ioc_pool) 2091 kmem_cache_destroy(cfq_ioc_pool); 2092} 2093 2094static int __init cfq_slab_setup(void) 2095{ 2096 cfq_pool = KMEM_CACHE(cfq_queue, 0); 2097 if (!cfq_pool) 2098 goto fail; 2099 2100 cfq_ioc_pool = KMEM_CACHE(cfq_io_context, 0); 2101 if (!cfq_ioc_pool) 2102 goto fail; 2103 2104 return 0; 2105fail: 2106 cfq_slab_kill(); 2107 return -ENOMEM; 2108} 2109 2110/* 2111 * sysfs parts below --> 2112 */ 2113static ssize_t 2114cfq_var_show(unsigned int var, char *page) 2115{ 2116 return sprintf(page, "%d\n", var); 2117} 2118 2119static ssize_t 2120cfq_var_store(unsigned int *var, const char *page, size_t count) 2121{ 2122 char *p = (char *) page; 2123 2124 *var = simple_strtoul(p, &p, 10); 2125 return count; 2126} 2127 2128#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ 2129static ssize_t __FUNC(elevator_t *e, char *page) \ 2130{ \ 2131 struct cfq_data *cfqd = e->elevator_data; \ 2132 unsigned int __data = __VAR; \ 2133 if (__CONV) \ 2134 __data = jiffies_to_msecs(__data); \ 2135 return cfq_var_show(__data, (page)); \ 2136} 2137SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0); 2138SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1); 2139SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1); 2140SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0); 2141SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0); 2142SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1); 2143SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); 2144SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); 2145SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); 2146#undef SHOW_FUNCTION 2147 2148#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ 2149static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \ 2150{ \ 2151 struct cfq_data *cfqd = e->elevator_data; \ 2152 unsigned int __data; \ 2153 int ret = cfq_var_store(&__data, (page), count); \ 2154 if (__data < (MIN)) \ 2155 __data = (MIN); \ 2156 else if (__data > (MAX)) \ 2157 __data = (MAX); \ 2158 if (__CONV) \ 2159 *(__PTR) = msecs_to_jiffies(__data); \ 2160 else \ 2161 *(__PTR) = __data; \ 2162 return ret; \ 2163} 2164STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0); 2165STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1); 2166STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1); 2167STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0); 2168STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0); 2169STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); 2170STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); 2171STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); 2172STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0); 2173#undef STORE_FUNCTION 2174 2175#define CFQ_ATTR(name) \ 2176 __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store) 2177 2178static struct elv_fs_entry cfq_attrs[] = { 2179 CFQ_ATTR(quantum), 2180 CFQ_ATTR(fifo_expire_sync), 2181 CFQ_ATTR(fifo_expire_async), 2182 CFQ_ATTR(back_seek_max), 2183 CFQ_ATTR(back_seek_penalty), 2184 CFQ_ATTR(slice_sync), 2185 CFQ_ATTR(slice_async), 2186 CFQ_ATTR(slice_async_rq), 2187 CFQ_ATTR(slice_idle), 2188 __ATTR_NULL 2189}; 2190 2191static struct elevator_type iosched_cfq = { 2192 .ops = { 2193 .elevator_merge_fn = cfq_merge, 2194 .elevator_merged_fn = cfq_merged_request, 2195 .elevator_merge_req_fn = cfq_merged_requests, 2196 .elevator_allow_merge_fn = cfq_allow_merge, 2197 .elevator_dispatch_fn = cfq_dispatch_requests, 2198 .elevator_add_req_fn = cfq_insert_request, 2199 .elevator_activate_req_fn = cfq_activate_request, 2200 .elevator_deactivate_req_fn = cfq_deactivate_request, 2201 .elevator_queue_empty_fn = cfq_queue_empty, 2202 .elevator_completed_req_fn = cfq_completed_request, 2203 .elevator_former_req_fn = elv_rb_former_request, 2204 .elevator_latter_req_fn = elv_rb_latter_request, 2205 .elevator_set_req_fn = cfq_set_request, 2206 .elevator_put_req_fn = cfq_put_request, 2207 .elevator_may_queue_fn = cfq_may_queue, 2208 .elevator_init_fn = cfq_init_queue, 2209 .elevator_exit_fn = cfq_exit_queue, 2210 .trim = cfq_free_io_context, 2211 }, 2212 .elevator_attrs = cfq_attrs, 2213 .elevator_name = "cfq", 2214 .elevator_owner = THIS_MODULE, 2215}; 2216 2217static int __init cfq_init(void) 2218{ 2219 int ret; 2220 2221 /* 2222 * could be 0 on HZ < 1000 setups 2223 */ 2224 if (!cfq_slice_async) 2225 cfq_slice_async = 1; 2226 if (!cfq_slice_idle) 2227 cfq_slice_idle = 1; 2228 2229 if (cfq_slab_setup()) 2230 return -ENOMEM; 2231 2232 ret = elv_register(&iosched_cfq); 2233 if (ret) 2234 cfq_slab_kill(); 2235 2236 return ret; 2237} 2238 2239static void __exit cfq_exit(void) 2240{ 2241 DECLARE_COMPLETION_ONSTACK(all_gone); 2242 elv_unregister(&iosched_cfq); 2243 ioc_gone = &all_gone; 2244 /* ioc_gone's update must be visible before reading ioc_count */ 2245 smp_wmb(); 2246 if (elv_ioc_count_read(ioc_count)) 2247 wait_for_completion(ioc_gone); 2248 synchronize_rcu(); 2249 cfq_slab_kill(); 2250} 2251 2252module_init(cfq_init); 2253module_exit(cfq_exit); 2254 2255MODULE_AUTHOR("Jens Axboe"); 2256MODULE_LICENSE("GPL"); 2257MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler"); 2258