Lines Matching refs:rq

54 #define rq_hash_key(rq)		(blk_rq_pos(rq) + blk_rq_sectors(rq))
58 * merged with rq.
60 static bool elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio)
62 struct request_queue *q = rq->q;
66 return e->type->ops.allow_merge(q, rq, bio);
74 bool elv_bio_merge_ok(struct request *rq, struct bio *bio)
76 if (!blk_rq_merge_ok(rq, bio))
79 if (!elv_iosched_allow_bio_merge(rq, bio))
166 static inline void __elv_rqhash_del(struct request *rq)
168 hash_del(&rq->hash);
169 rq->rq_flags &= ~RQF_HASHED;
172 void elv_rqhash_del(struct request_queue *q, struct request *rq)
174 if (ELV_ON_HASH(rq))
175 __elv_rqhash_del(rq);
179 void elv_rqhash_add(struct request_queue *q, struct request *rq)
183 BUG_ON(ELV_ON_HASH(rq));
184 hash_add(e->hash, &rq->hash, rq_hash_key(rq));
185 rq->rq_flags |= RQF_HASHED;
189 void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
191 __elv_rqhash_del(rq);
192 elv_rqhash_add(q, rq);
199 struct request *rq;
201 hash_for_each_possible_safe(e->hash, rq, next, hash, offset) {
202 BUG_ON(!ELV_ON_HASH(rq));
204 if (unlikely(!rq_mergeable(rq))) {
205 __elv_rqhash_del(rq);
209 if (rq_hash_key(rq) == offset)
210 return rq;
220 void elv_rb_add(struct rb_root *root, struct request *rq)
230 if (blk_rq_pos(rq) < blk_rq_pos(__rq))
232 else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
236 rb_link_node(&rq->rb_node, parent, p);
237 rb_insert_color(&rq->rb_node, root);
241 void elv_rb_del(struct rb_root *root, struct request *rq)
243 BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
244 rb_erase(&rq->rb_node, root);
245 RB_CLEAR_NODE(&rq->rb_node);
252 struct request *rq;
255 rq = rb_entry(n, struct request, rb_node);
257 if (sector < blk_rq_pos(rq))
259 else if (sector > blk_rq_pos(rq))
262 return rq;
319 * we can append 'rq' to an existing request, so we can throw 'rq' away
325 bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq,
337 if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq)) {
338 list_add(&rq->queuelist, free);
350 __rq = elv_rqhash_find(q, blk_rq_pos(rq));
351 if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
354 list_add(&rq->queuelist, free);
357 rq = __rq;
363 void elv_merged_request(struct request_queue *q, struct request *rq,
369 e->type->ops.request_merged(q, rq, type);
372 elv_rqhash_reposition(q, rq);
374 q->last_merge = rq;
377 void elv_merge_requests(struct request_queue *q, struct request *rq,
383 e->type->ops.requests_merged(q, rq, next);
385 elv_rqhash_reposition(q, rq);
386 q->last_merge = rq;
389 struct request *elv_latter_request(struct request_queue *q, struct request *rq)
394 return e->type->ops.next_request(q, rq);
399 struct request *elv_former_request(struct request_queue *q, struct request *rq)
404 return e->type->ops.former_request(q, rq);
758 struct request *rq)
760 struct rb_node *rbprev = rb_prev(&rq->rb_node);
770 struct request *rq)
772 struct rb_node *rbnext = rb_next(&rq->rb_node);