• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/kernel/

Lines Matching refs:this_rq

610 #define this_rq()		(&__get_cpu_var(runqueues))
1005 rq = this_rq();
1091 if (rq == this_rq()) {
1722 static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1723 __releases(this_rq->lock)
1725 __acquires(this_rq->lock)
1727 raw_spin_unlock(&this_rq->lock);
1728 double_rq_lock(this_rq, busiest);
1741 static int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1742 __releases(this_rq->lock)
1744 __acquires(this_rq->lock)
1749 if (busiest < this_rq) {
1750 raw_spin_unlock(&this_rq->lock);
1752 raw_spin_lock_nested(&this_rq->lock,
1765 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1767 static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1771 raw_spin_unlock(&this_rq->lock);
1775 return _double_lock_balance(this_rq, busiest);
1778 static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1782 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1838 static void calc_load_account_idle(struct rq *this_rq);
1841 static void update_cpu_load(struct rq *this_rq);
2019 if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
2392 this_rq()->nr_uninterruptible--;
2452 * ensure that this_rq() is locked, @p is bound to this_rq() and not
2453 * the current task. this_rq() stays locked over invocation.
2460 BUG_ON(rq != this_rq());
2826 struct rq *rq = this_rq();
2887 * this_rq must be evaluated again because prev may have moved
2891 finish_task_switch(this_rq(), prev);
2957 struct rq *this = this_rq();
2968 static long calc_load_fold_active(struct rq *this_rq)
2972 nr_active = this_rq->nr_running;
2973 nr_active += (long) this_rq->nr_uninterruptible;
2975 if (nr_active != this_rq->calc_load_active) {
2976 delta = nr_active - this_rq->calc_load_active;
2977 this_rq->calc_load_active = nr_active;
3000 static void calc_load_account_idle(struct rq *this_rq)
3004 delta = calc_load_fold_active(this_rq);
3144 static void calc_load_account_idle(struct rq *this_rq)
3200 static void calc_load_account_active(struct rq *this_rq)
3204 if (time_before(jiffies, this_rq->calc_load_update))
3207 delta = calc_load_fold_active(this_rq);
3212 this_rq->calc_load_update += LOAD_FREQ;
3287 static void update_cpu_load(struct rq *this_rq)
3289 unsigned long this_load = this_rq->load.weight;
3294 this_rq->nr_load_updates++;
3297 if (curr_jiffies == this_rq->last_load_update_tick)
3300 pending_updates = curr_jiffies - this_rq->last_load_update_tick;
3301 this_rq->last_load_update_tick = curr_jiffies;
3304 this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */
3310 old_load = this_rq->cpu_load[i];
3321 this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
3324 sched_avg_update(this_rq);
3327 static void update_cpu_load_active(struct rq *this_rq)
3329 update_cpu_load(this_rq);
3331 calc_load_account_active(this_rq);
3565 struct rq *rq = this_rq();
3583 struct rq *rq = this_rq();
3828 schedstat_inc(this_rq(), sched_count);
3831 schedstat_inc(this_rq(), bkl_count);