Lines Matching refs:bfqd

227 	if (!(bfqq == bfqg->bfqd->in_service_queue))
309 bfqq->bfqd->root_group;
533 struct bfq_data *bfqd = blkg->q->elevator->elevator_data;
545 bfqg->bfqd = bfqd;
576 static void bfq_link_bfqg(struct bfq_data *bfqd, struct bfq_group *bfqg)
590 if (curr_bfqg != bfqd->root_group) {
593 parent = bfqd->root_group;
599 struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio)
617 &bfqg_to_blkg(bfqd->root_group)->blkcg->css);
618 return bfqd->root_group;
623 * @bfqd: queue descriptor.
635 void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
653 if (bfqq == &bfqd->oom_bfqq)
672 if (bfqq == bfqd->in_service_queue)
673 bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
677 bfq_deactivate_bfqq(bfqd, bfqq, false, false);
685 else if (bfqd->last_bfqq_created == bfqq)
686 bfqd->last_bfqq_created = NULL;
697 if (unlikely(!bfqd->nonrot_with_queueing))
698 bfq_pos_tree_add_move(bfqd, bfqq);
699 bfq_activate_bfqq(bfqd, bfqq);
702 if (!bfqd->in_service_queue && !bfqd->tot_rq_in_driver)
703 bfq_schedule_dispatch(bfqd);
708 static void bfq_sync_bfqq_move(struct bfq_data *bfqd,
719 bfq_bfqq_move(bfqd, sync_bfqq, bfqg);
744 bfq_release_process_ref(bfqd, sync_bfqq);
750 * @bfqd: the queue descriptor.
754 * Move bic to blkcg, assuming that bfqd->lock is held; which makes
758 static void __bfq_bic_change_cgroup(struct bfq_data *bfqd,
764 for (act_idx = 0; act_idx < bfqd->num_actuators; act_idx++) {
771 bfq_release_process_ref(bfqd, async_bfqq);
775 bfq_sync_bfqq_move(bfqd, sync_bfqq, bic, bfqg, act_idx);
781 struct bfq_data *bfqd = bic_to_bfqd(bic);
782 struct bfq_group *bfqg = bfq_bio_bfqg(bfqd, bio);
791 if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr))
798 bfq_link_bfqg(bfqd, bfqg);
799 __bfq_bic_change_cgroup(bfqd, bic, bfqg);
806 * (bfqd->lock). This exposes BFQ to the following sort of
820 * blkg. And these hooks are executed with bfqd->lock held for
825 * are holding bfqd->lock. A blkg_lookup performed with
826 * bfqd->lock held then returns a fully consistent blkg, which
831 * assignment, and (2) bfqd->lock is being held, here we can
835 * release bfqd->lock, even just getting blkg through this
868 * @bfqd: the device data structure with the root group.
873 static void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
893 bfq_bfqq_move(bfqd, bfqq, bfqd->root_group);
898 * @bfqd: the device data structure with the root group.
903 static void bfq_reparent_active_queues(struct bfq_data *bfqd,
912 bfq_reparent_leaf_entity(bfqd, entity, ioprio_class);
915 bfq_reparent_leaf_entity(bfqd,
932 struct bfq_data *bfqd = bfqg->bfqd;
937 spin_lock_irqsave(&bfqd->lock, flags);
961 bfq_reparent_active_queues(bfqd, bfqg, st, i);
980 bfq_put_async_queues(bfqd, bfqg);
982 spin_unlock_irqrestore(&bfqd->lock, flags);
992 void bfq_end_wr_async(struct bfq_data *bfqd)
996 list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) {
999 bfq_end_wr_async_queues(bfqd, bfqg);
1001 bfq_end_wr_async_queues(bfqd, bfqd->root_group);
1284 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
1288 ret = blkcg_activate_policy(bfqd->queue->disk, &blkcg_policy_bfq);
1292 return blkg_to_bfqg(bfqd->queue->root_blkg);
1447 void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1465 void bfq_end_wr_async(struct bfq_data *bfqd)
1467 bfq_end_wr_async_queues(bfqd, bfqd->root_group);
1470 struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio)
1472 return bfqd->root_group;
1477 return bfqq->bfqd->root_group;
1482 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)