Lines Matching refs:dl_bw

115 static inline struct dl_bw *dl_bw_of(int i)
119 return &cpu_rq(i)->rd->dl_bw;
181 void __dl_update(struct dl_bw *dl_b, s64 bw)
183 struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw);
195 static inline struct dl_bw *dl_bw_of(int i)
197 return &cpu_rq(i)->dl.dl_bw;
216 void __dl_update(struct dl_bw *dl_b, s64 bw)
218 struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw);
225 void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
232 void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
239 __dl_overflow(struct dl_bw *dl_b, unsigned long cap, u64 old_bw, u64 new_bw)
246 void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
251 dl_rq->running_bw += dl_bw;
259 void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
264 dl_rq->running_bw -= dl_bw;
273 void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
278 dl_rq->this_bw += dl_bw;
283 void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
288 dl_rq->this_bw -= dl_bw;
299 __add_rq_bw(dl_se->dl_bw, dl_rq);
306 __sub_rq_bw(dl_se->dl_bw, dl_rq);
313 __add_running_bw(dl_se->dl_bw, dl_rq);
320 __sub_running_bw(dl_se->dl_bw, dl_rq);
346 __sub_rq_bw(p->dl.dl_bw, &rq->dl);
449 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
454 __dl_sub(dl_b, dl_se->dl_bw, dl_bw_cpus(task_cpu(p)));
516 void init_dl_bw(struct dl_bw *dl_b)
537 init_dl_bw(&dl_rq->dl_bw);
664 struct dl_bw *dl_b;
715 dl_b = &rq->rd->dl_bw;
717 __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
720 dl_b = &later_rq->rd->dl_bw;
722 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span));
1296 * Since rq->dl.bw_ratio contains 1 / Umax multiplied by 2^RATIO_SHIFT, dl_bw
1313 if (u_inact + rq->dl.extra_bw > rq->dl.max_bw - dl_se->dl_bw)
1314 u_act = dl_se->dl_bw;
1487 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1496 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
2592 struct dl_bw *src_dl_b;
2601 __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
2642 struct dl_bw *dl_b;
2652 dl_b = &rq->rd->dl_bw;
2655 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
2666 raw_spin_lock_irqsave(&rd->dl_bw.lock, flags);
2667 rd->dl_bw.total_bw = 0;
2668 raw_spin_unlock_irqrestore(&rd->dl_bw.lock, flags);
2851 /* Used for dl_bw check and update, used under sched_rt_handler()::mutex */
2860 struct dl_bw *dl_b;
2910 struct dl_bw *dl_b;
2951 struct dl_bw *dl_b = dl_bw_of(cpu);
2958 if (new_bw == p->dl.dl_bw && task_has_dl_policy(p))
2973 __dl_sub(dl_b, p->dl.dl_bw, cpus);
2977 !__dl_overflow(dl_b, cap, p->dl.dl_bw, new_bw)) {
2985 __dl_sub(dl_b, p->dl.dl_bw, cpus);
3018 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
3098 dl_se->dl_bw = 0;
3138 struct dl_bw *cur_dl_b;
3159 static int dl_bw_manage(enum dl_bw_request req, int cpu, u64 dl_bw)
3162 struct dl_bw *dl_b;
3170 __dl_sub(dl_b, dl_bw, dl_bw_cpus(cpu));
3174 overflow = __dl_overflow(dl_b, cap, 0, dl_bw);
3183 __dl_add(dl_b, dl_bw, dl_bw_cpus(cpu));
3198 int dl_bw_alloc(int cpu, u64 dl_bw)
3200 return dl_bw_manage(dl_bw_req_alloc, cpu, dl_bw);
3203 void dl_bw_free(int cpu, u64 dl_bw)
3205 dl_bw_manage(dl_bw_req_free, cpu, dl_bw);