Lines Matching refs:cfs_b

5594  * requires cfs_b->lock
5596 void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
5600 if (unlikely(cfs_b->quota == RUNTIME_INF))
5603 cfs_b->runtime += cfs_b->quota;
5604 runtime = cfs_b->runtime_snap - cfs_b->runtime;
5606 cfs_b->burst_time += runtime;
5607 cfs_b->nr_burst++;
5610 cfs_b->runtime = min(cfs_b->runtime, cfs_b->quota + cfs_b->burst);
5611 cfs_b->runtime_snap = cfs_b->runtime;
5620 static int __assign_cfs_rq_runtime(struct cfs_bandwidth *cfs_b,
5625 lockdep_assert_held(&cfs_b->lock);
5630 if (cfs_b->quota == RUNTIME_INF)
5633 start_cfs_bandwidth(cfs_b);
5635 if (cfs_b->runtime > 0) {
5636 amount = min(cfs_b->runtime, min_amount);
5637 cfs_b->runtime -= amount;
5638 cfs_b->idle = 0;
5650 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
5653 raw_spin_lock(&cfs_b->lock);
5654 ret = __assign_cfs_rq_runtime(cfs_b, cfs_rq, sched_cfs_bandwidth_slice());
5655 raw_spin_unlock(&cfs_b->lock);
5766 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
5770 raw_spin_lock(&cfs_b->lock);
5772 if (__assign_cfs_rq_runtime(cfs_b, cfs_rq, 1)) {
5779 * for 1ns of runtime rather than just check cfs_b.
5784 &cfs_b->throttled_cfs_rq);
5786 raw_spin_unlock(&cfs_b->lock);
5855 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
5865 raw_spin_lock(&cfs_b->lock);
5867 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
5871 raw_spin_unlock(&cfs_b->lock);
6015 static bool distribute_cfs_runtime(struct cfs_bandwidth *cfs_b)
6026 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
6046 raw_spin_lock(&cfs_b->lock);
6048 if (runtime > cfs_b->runtime)
6049 runtime = cfs_b->runtime;
6050 cfs_b->runtime -= runtime;
6051 remaining = cfs_b->runtime;
6052 raw_spin_unlock(&cfs_b->lock);
6100 * period the timer is deactivated until scheduling resumes; cfs_b->idle is
6103 static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, unsigned long flags)
6108 if (cfs_b->quota == RUNTIME_INF)
6111 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
6112 cfs_b->nr_periods += overrun;
6114 /* Refill extra burst quota even if cfs_b->idle */
6115 __refill_cfs_bandwidth_runtime(cfs_b);
6121 if (cfs_b->idle && !throttled)
6126 cfs_b->idle = 1;
6131 cfs_b->nr_throttled += overrun;
6134 * This check is repeated as we release cfs_b->lock while we unthrottle.
6136 while (throttled && cfs_b->runtime > 0) {
6137 raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
6138 /* we can't nest cfs_b->lock while distributing bandwidth */
6139 throttled = distribute_cfs_runtime(cfs_b);
6140 raw_spin_lock_irqsave(&cfs_b->lock, flags);
6149 cfs_b->idle = 0;
6167 * Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the
6171 static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
6173 struct hrtimer *refresh_timer = &cfs_b->period_timer;
6188 static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
6193 if (runtime_refresh_within(cfs_b, min_left))
6197 if (cfs_b->slack_started)
6199 cfs_b->slack_started = true;
6201 hrtimer_start(&cfs_b->slack_timer,
6209 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
6215 raw_spin_lock(&cfs_b->lock);
6216 if (cfs_b->quota != RUNTIME_INF) {
6217 cfs_b->runtime += slack_runtime;
6220 if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
6221 !list_empty(&cfs_b->throttled_cfs_rq))
6222 start_cfs_slack_bandwidth(cfs_b);
6224 raw_spin_unlock(&cfs_b->lock);
6245 static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
6251 raw_spin_lock_irqsave(&cfs_b->lock, flags);
6252 cfs_b->slack_started = false;
6254 if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
6255 raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
6259 if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice)
6260 runtime = cfs_b->runtime;
6262 raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
6267 distribute_cfs_runtime(cfs_b);
6332 struct cfs_bandwidth *cfs_b =
6335 do_sched_cfs_slack_timer(cfs_b);
6344 struct cfs_bandwidth *cfs_b =
6351 raw_spin_lock_irqsave(&cfs_b->lock, flags);
6353 overrun = hrtimer_forward_now(timer, cfs_b->period);
6357 idle = do_sched_cfs_period_timer(cfs_b, overrun, flags);
6360 u64 new, old = ktime_to_ns(cfs_b->period);
6369 cfs_b->period = ns_to_ktime(new);
6370 cfs_b->quota *= 2;
6371 cfs_b->burst *= 2;
6377 div_u64(cfs_b->quota, NSEC_PER_USEC));
6383 div_u64(cfs_b->quota, NSEC_PER_USEC));
6391 cfs_b->period_active = 0;
6392 raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
6397 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b, struct cfs_bandwidth *parent)
6399 raw_spin_lock_init(&cfs_b->lock);
6400 cfs_b->runtime = 0;
6401 cfs_b->quota = RUNTIME_INF;
6402 cfs_b->period = ns_to_ktime(default_cfs_period());
6403 cfs_b->burst = 0;
6404 cfs_b->hierarchical_quota = parent ? parent->hierarchical_quota : RUNTIME_INF;
6406 INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
6407 hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
6408 cfs_b->period_timer.function = sched_cfs_period_timer;
6411 hrtimer_set_expires(&cfs_b->period_timer,
6412 get_random_u32_below(cfs_b->period));
6413 hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6414 cfs_b->slack_timer.function = sched_cfs_slack_timer;
6415 cfs_b->slack_started = false;
6425 void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
6427 lockdep_assert_held(&cfs_b->lock);
6429 if (cfs_b->period_active)
6432 cfs_b->period_active = 1;
6433 hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period);
6434 hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED);
6437 static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
6442 if (!cfs_b->throttled_cfs_rq.next)
6445 hrtimer_cancel(&cfs_b->period_timer);
6446 hrtimer_cancel(&cfs_b->slack_timer);
6489 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
6492 raw_spin_lock(&cfs_b->lock);
6493 cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF;
6494 raw_spin_unlock(&cfs_b->lock);
6609 void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b, struct cfs_bandwidth *parent) {}
6617 static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}