Lines Matching defs:fq

30  * fq->flush_queue[fq->flush_pending_idx].  Once certain criteria are met, a
95 struct blk_flush_queue *fq, blk_opf_t flags);
100 return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq;
153 * @fq: flush queue
161 * spin_lock_irq(fq->mq_flush_lock)
164 struct blk_flush_queue *fq,
168 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
185 fq->flush_pending_since = jiffies;
190 fq->flush_data_in_flight++;
213 blk_kick_flush(q, fq, cmd_flags);
223 struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
226 spin_lock_irqsave(&fq->mq_flush_lock, flags);
229 fq->rq_status = error;
230 spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
241 if (fq->rq_status != BLK_STS_OK) {
242 error = fq->rq_status;
243 fq->rq_status = BLK_STS_OK;
253 running = &fq->flush_queue[fq->flush_running_idx];
254 BUG_ON(fq->flush_pending_idx == fq->flush_running_idx);
257 fq->flush_running_idx ^= 1;
264 blk_flush_complete_seq(rq, fq, seq, error);
267 spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
279 * @fq: flush queue
286 * spin_lock_irq(fq->mq_flush_lock)
289 static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
292 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
295 struct request *flush_rq = fq->flush_rq;
298 if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending))
302 if (fq->flush_data_in_flight &&
304 fq->flush_pending_since + FLUSH_PENDING_TIMEOUT))
311 fq->flush_pending_idx ^= 1;
358 struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);
369 spin_lock_irqsave(&fq->mq_flush_lock, flags);
370 fq->flush_data_in_flight--;
376 blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
377 spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
401 struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
445 spin_lock_irq(&fq->mq_flush_lock);
446 fq->flush_data_in_flight++;
447 spin_unlock_irq(&fq->mq_flush_lock);
455 spin_lock_irq(&fq->mq_flush_lock);
456 blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
457 spin_unlock_irq(&fq->mq_flush_lock);
481 struct blk_flush_queue *fq;
484 fq = kzalloc_node(sizeof(*fq), flags, node);
485 if (!fq)
488 spin_lock_init(&fq->mq_flush_lock);
491 fq->flush_rq = kzalloc_node(rq_sz, flags, node);
492 if (!fq->flush_rq)
495 INIT_LIST_HEAD(&fq->flush_queue[0]);
496 INIT_LIST_HEAD(&fq->flush_queue[1]);
498 return fq;
501 kfree(fq);
506 void blk_free_flush_queue(struct blk_flush_queue *fq)
509 if (!fq)
512 kfree(fq->flush_rq);
513 kfree(fq);
517 * Allow driver to set its own lock class to fq->mq_flush_lock for
524 * fq->mq_flush_lock for avoiding the lockdep warning.
532 * an hour is taken during SCSI MQ probe with per-fq lock class.
537 lockdep_set_class(&hctx->fq->mq_flush_lock, key);