• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-WNDR4500-V1.0.1.40_1.0.68/src/linux/linux-2.6/block/

Lines Matching refs:rq

41 static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
238 static void rq_init(request_queue_t *q, struct request *rq)
240 INIT_LIST_HEAD(&rq->queuelist);
241 INIT_LIST_HEAD(&rq->donelist);
243 rq->errors = 0;
244 rq->bio = rq->biotail = NULL;
245 INIT_HLIST_NODE(&rq->hash);
246 RB_CLEAR_NODE(&rq->rb_node);
247 rq->ioprio = 0;
248 rq->buffer = NULL;
249 rq->ref_count = 1;
250 rq->q = q;
251 rq->special = NULL;
252 rq->data_len = 0;
253 rq->data = NULL;
254 rq->nr_phys_segments = 0;
255 rq->sense = NULL;
256 rq->end_io = NULL;
257 rq->end_io_data = NULL;
258 rq->completion_data = NULL;
265 * @prepare_flush_fn: rq setup helper for cache flush ordered writes
330 unsigned blk_ordered_req_seq(struct request *rq)
332 request_queue_t *q = rq->q;
336 if (rq == &q->pre_flush_rq)
338 if (rq == &q->bar_rq)
340 if (rq == &q->post_flush_rq)
349 if (!blk_fs_request(rq))
352 if ((rq->cmd_flags & REQ_ORDERED_COLOR) ==
361 struct request *rq;
376 rq = q->orig_bar_rq;
381 end_that_request_first(rq, uptodate, rq->hard_nr_sectors);
382 end_that_request_last(rq, uptodate);
385 static void pre_flush_end_io(struct request *rq, int error)
387 elv_completed_request(rq->q, rq);
388 blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_PREFLUSH, error);
391 static void bar_end_io(struct request *rq, int error)
393 elv_completed_request(rq->q, rq);
394 blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_BAR, error);
397 static void post_flush_end_io(struct request *rq, int error)
399 elv_completed_request(rq->q, rq);
400 blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error);
405 struct request *rq;
409 rq = &q->pre_flush_rq;
412 rq = &q->post_flush_rq;
416 rq->cmd_flags = REQ_HARDBARRIER;
417 rq_init(q, rq);
418 rq->elevator_private = NULL;
419 rq->elevator_private2 = NULL;
420 rq->rq_disk = q->bar_rq.rq_disk;
421 rq->end_io = end_io;
422 q->prepare_flush_fn(q, rq);
424 elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
428 struct request *rq)
438 blkdev_dequeue_request(rq);
439 q->orig_bar_rq = rq;
440 rq = &q->bar_rq;
441 rq->cmd_flags = 0;
442 rq_init(q, rq);
444 rq->cmd_flags |= REQ_RW;
445 rq->cmd_flags |= q->ordered & QUEUE_ORDERED_FUA ? REQ_FUA : 0;
446 rq->elevator_private = NULL;
447 rq->elevator_private2 = NULL;
448 init_request_from_bio(rq, q->orig_bar_rq->bio);
449 rq->end_io = bar_end_io;
462 elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
466 rq = &q->pre_flush_rq;
473 rq = NULL;
475 return rq;
480 struct request *rq = *rqp;
481 int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
488 *rqp = start_ordered(q, rq);
495 blkdev_dequeue_request(rq);
496 end_that_request_first(rq, -EOPNOTSUPP,
497 rq->hard_nr_sectors);
498 end_that_request_last(rq, -EOPNOTSUPP);
509 if (!blk_fs_request(rq) &&
510 rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
515 if (is_barrier && rq != &q->bar_rq)
519 WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q));
520 if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q))
559 static int ordered_bio_endio(struct request *rq, struct bio *bio,
562 request_queue_t *q = rq->q;
566 if (&q->bar_rq != rq)
1059 * @rq: the request that has completed
1070 void blk_queue_end_tag(request_queue_t *q, struct request *rq)
1073 int tag = rq->tag;
1086 list_del_init(&rq->queuelist);
1087 rq->cmd_flags &= ~REQ_QUEUED;
1088 rq->tag = -1;
1103 * @rq: the block request that needs tagging
1118 int blk_queue_start_tag(request_queue_t *q, struct request *rq)
1123 if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
1126 __FUNCTION__, rq,
1127 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
1142 rq->cmd_flags |= REQ_QUEUED;
1143 rq->tag = tag;
1144 bqt->tag_index[tag] = rq;
1145 blkdev_dequeue_request(rq);
1146 list_add(&rq->queuelist, &bqt->busy_list);
1169 struct request *rq;
1172 rq = list_entry_rq(tmp);
1174 if (rq->tag == -1) {
1177 list_del_init(&rq->queuelist);
1178 rq->cmd_flags &= ~REQ_QUEUED;
1180 blk_queue_end_tag(q, rq);
1182 rq->cmd_flags &= ~REQ_STARTED;
1183 __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0);
1189 void blk_dump_rq_flags(struct request *rq, char *msg)
1194 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
1195 rq->cmd_flags);
1197 printk("\nsector %llu, nr/cnr %lu/%u\n", (unsigned long long)rq->sector,
1198 rq->nr_sectors,
1199 rq->current_nr_sectors);
1200 printk("bio %p, biotail %p, buffer %p, data %p, len %u\n", rq->bio, rq->biotail, rq->buffer, rq->data, rq->data_len);
1202 if (blk_pc_request(rq)) {
1204 for (bit = 0; bit < sizeof(rq->cmd); bit++)
1205 printk("%02x ", rq->cmd[bit]);
1313 * must make sure sg can hold rq->nr_phys_segments entries
1315 int blk_rq_map_sg(request_queue_t *q, struct request *rq, struct scatterlist *sg)
1325 * for each bio in rq
1328 rq_for_each_bio(bio, rq) {
1356 } /* bios in rq */
1626 q->rq.count[READ] + q->rq.count[WRITE]);
1637 q->rq.count[READ] + q->rq.count[WRITE]);
1647 q->rq.count[READ] + q->rq.count[WRITE]);
1770 struct request_list *rl = &q->rq;
1807 struct request_list *rl = &q->rq;
1959 static inline void blk_free_request(request_queue_t *q, struct request *rq)
1961 if (rq->cmd_flags & REQ_ELVPRIV)
1962 elv_put_request(q, rq);
1963 mempool_free(rq, q->rq.rq_pool);
1969 struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
1971 if (!rq)
1975 * first three bits are identical in rq->cmd_flags and bio->bi_rw,
1978 rq->cmd_flags = rw | REQ_ALLOCED;
1981 if (unlikely(elv_set_request(q, rq, gfp_mask))) {
1982 mempool_free(rq, q->rq.rq_pool);
1985 rq->cmd_flags |= REQ_ELVPRIV;
1988 return rq;
2027 struct request_list *rl = &q->rq;
2046 struct request_list *rl = &q->rq;
2067 struct request *rq = NULL;
2068 struct request_list *rl = &q->rq;
2121 rq = blk_alloc_request(q, rw_flags, priv, gfp_mask);
2122 if (unlikely(!rq)) {
2138 * rq mempool into READ and WRITE
2156 rq_init(q, rq);
2160 return rq;
2173 struct request *rq;
2175 rq = get_request(q, rw_flags, bio, GFP_NOIO);
2176 while (!rq) {
2178 struct request_list *rl = &q->rq;
2183 rq = get_request(q, rw_flags, bio, GFP_NOIO);
2185 if (!rq) {
2208 return rq;
2213 struct request *rq;
2219 rq = get_request_wait(q, rw, NULL);
2221 rq = get_request(q, rw, NULL, gfp_mask);
2222 if (!rq)
2227 return rq;
2253 * @rq: request to be inserted
2260 void blk_requeue_request(request_queue_t *q, struct request *rq)
2262 blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
2264 if (blk_rq_tagged(rq))
2265 blk_queue_end_tag(q, rq);
2267 elv_requeue_request(q, rq);
2275 * @rq: request to be inserted
2291 void blk_insert_request(request_queue_t *q, struct request *rq,
2302 rq->cmd_type = REQ_TYPE_SPECIAL;
2303 rq->cmd_flags |= REQ_SOFTBARRIER;
2305 rq->special = data;
2312 if (blk_rq_tagged(rq))
2313 blk_queue_end_tag(q, rq);
2315 drive_stat_acct(rq, rq->nr_sectors, 1);
2316 __elv_add_request(q, rq, where, 0);
2337 static int __blk_rq_map_user(request_queue_t *q, struct request *rq,
2344 reading = rq_data_dir(rq) == READ;
2368 if (!rq->bio)
2369 blk_rq_bio_prep(q, rq, bio);
2370 else if (!ll_back_merge_fn(q, rq, bio)) {
2374 rq->biotail->bi_next = bio;
2375 rq->biotail = bio;
2377 rq->data_len += bio->bi_size;
2393 * @rq: request structure to fill
2410 int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
2438 ret = __blk_rq_map_user(q, rq, ubuf, map_len);
2442 bio = rq->bio;
2447 rq->buffer = rq->data = NULL;
2459 * @rq: request to map data to
2477 int blk_rq_map_user_iov(request_queue_t *q, struct request *rq,
2488 bio = bio_map_user_iov(q, NULL, iov, iov_count, rq_data_dir(rq)== READ);
2499 blk_rq_bio_prep(q, rq, bio);
2500 rq->buffer = rq->data = NULL;
2511 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
2512 * supply the original rq->bio from the blk_rq_map_user() return, since
2513 * the io completion may have changed rq->bio.
2542 * @rq: request to fill
2547 int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf,
2561 if (rq_data_dir(rq) == WRITE)
2564 blk_rq_bio_prep(q, rq, bio);
2565 blk_queue_bounce(q, &rq->bio);
2566 rq->buffer = rq->data = NULL;
2576 * @rq: request to insert
2585 struct request *rq, int at_head,
2590 rq->rq_disk = bd_disk;
2591 rq->cmd_flags |= REQ_NOMERGE;
2592 rq->end_io = done;
2595 __elv_add_request(q, rq, where, 1);
2605 * @rq: request to insert
2613 struct request *rq, int at_head)
2623 rq->ref_count++;
2625 if (!rq->sense) {
2627 rq->sense = sense;
2628 rq->sense_len = 0;
2631 rq->end_io_data = &wait;
2632 blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);
2635 if (rq->errors)
2671 static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io)
2673 int rw = rq_data_dir(rq);
2675 if (!blk_fs_request(rq) || !rq->rq_disk)
2679 __disk_stat_inc(rq->rq_disk, merges[rw]);
2681 disk_round_stats(rq->rq_disk);
2682 rq->rq_disk->in_flight++;
2748 * it didn't come out of our reserved rq pools
2784 * @rq: request to complete
2787 void blk_end_sync_rq(struct request *rq, int error)
2789 struct completion *waiting = rq->end_io_data;
2791 rq->end_io_data = NULL;
2792 __blk_put_request(rq->q, rq);
2796 * the rq pointer) could be invalid right after this complete()
2824 * from next to rq and release next. merge_requests_fn
2858 static inline int attempt_back_merge(request_queue_t *q, struct request *rq)
2860 struct request *next = elv_latter_request(q, rq);
2863 return attempt_merge(q, rq, next);
2868 static inline int attempt_front_merge(request_queue_t *q, struct request *rq)
2870 struct request *prev = elv_former_request(q, rq);
2873 return attempt_merge(q, prev, rq);
2995 * rq allocator and io schedulers.
3314 static void blk_recalc_rq_segments(struct request *rq)
3319 request_queue_t *q = rq->q;
3321 if (!rq->bio)
3325 rq_for_each_bio(bio, rq) {
3352 rq->nr_phys_segments = nr_phys_segs;
3353 rq->nr_hw_segments = nr_hw_segs;
3356 static void blk_recalc_rq_sectors(struct request *rq, int nsect)
3358 if (blk_fs_request(rq)) {
3359 rq->hard_sector += nsect;
3360 rq->hard_nr_sectors -= nsect;
3365 if ((rq->nr_sectors >= rq->hard_nr_sectors) &&
3366 (rq->sector <= rq->hard_sector)) {
3367 rq->sector = rq->hard_sector;
3368 rq->nr_sectors = rq->hard_nr_sectors;
3369 rq->hard_cur_sectors = bio_cur_sectors(rq->bio);
3370 rq->current_nr_sectors = rq->hard_cur_sectors;
3371 rq->buffer = bio_data(rq->bio);
3378 if (rq->nr_sectors < rq->current_nr_sectors) {
3380 rq->nr_sectors = rq->current_nr_sectors;
3552 struct request *rq = list_entry(local_list.next, struct request, donelist);
3554 list_del_init(&rq->donelist);
3555 rq->q->softirq_done_fn(rq);
3665 void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio)
3667 /* first two bits are identical in rq->cmd_flags and bio->bi_rw */
3668 rq->cmd_flags |= (bio->bi_rw & 3);
3670 rq->nr_phys_segments = bio_phys_segments(q, bio);
3671 rq->nr_hw_segments = bio_hw_segments(q, bio);
3672 rq->current_nr_sectors = bio_cur_sectors(bio);
3673 rq->hard_cur_sectors = rq->current_nr_sectors;
3674 rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
3675 rq->buffer = bio_data(bio);
3676 rq->data_len = bio->bi_size;
3678 rq->bio = rq->biotail = bio;
3882 struct request_list *rl = &q->rq;