Lines Matching refs:cpu

12 #include <linux/cpu.h>
42 struct tick_sched *tick_get_tick_sched(int cpu)
44 return &per_cpu(tick_cpu_sched, cpu);
208 int tick_cpu, cpu = smp_processor_id();
226 WRITE_ONCE(tick_do_timer_cpu, cpu);
227 tick_cpu = cpu;
231 if (tick_cpu == cpu)
366 static bool can_stop_full_tick(int cpu, struct tick_sched *ts)
370 if (unlikely(!cpu_online(cpu)))
414 void tick_nohz_full_kick_cpu(int cpu)
416 if (!tick_nohz_full_cpu(cpu))
419 irq_work_queue_on(&per_cpu(nohz_full_kick_work, cpu), cpu);
424 int cpu;
446 * set_task_cpu(p, cpu);
447 * STORE p->cpu = @cpu
452 * LOAD p->tick_dep_mask LOAD p->cpu
454 cpu = task_cpu(tsk);
457 if (cpu_online(cpu))
458 tick_nohz_full_kick_cpu(cpu);
468 int cpu;
474 for_each_cpu_and(cpu, tick_nohz_full_mask, cpu_online_mask)
475 tick_nohz_full_kick_cpu(cpu);
507 void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit)
512 ts = per_cpu_ptr(&tick_cpu_sched, cpu);
518 if (cpu == smp_processor_id()) {
523 tick_nohz_full_kick_cpu(cpu);
530 void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit)
532 struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu);
609 bool tick_nohz_cpu_hotpluggable(unsigned int cpu)
616 if (tick_nohz_full_running && READ_ONCE(tick_do_timer_cpu) == cpu)
621 static int tick_nohz_cpu_down(unsigned int cpu)
623 return tick_nohz_cpu_hotpluggable(cpu) ? 0 : -EBUSY;
628 int cpu, ret;
647 cpu = smp_processor_id();
649 if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) {
651 "for timekeeping\n", cpu);
652 cpumask_clear_cpu(cpu, tick_nohz_full_mask);
656 for_each_cpu(cpu, tick_nohz_full_mask)
657 ct_cpu_track_user(cpu);
694 bool tick_nohz_tick_stopped_cpu(int cpu)
696 struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu);
788 * @cpu: CPU number to query
801 * Return: -1 if NOHZ is not enabled, else total idle time of the @cpu
803 u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
805 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
808 !nr_iowait_cpu(cpu), last_update_time);
814 * @cpu: CPU number to query
827 * Return: -1 if NOHZ is not enabled, else total iowait time of @cpu
829 u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
831 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
834 nr_iowait_cpu(cpu), last_update_time);
886 * @cpu: CPU number
893 static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
955 if (tick_cpu != cpu &&
971 static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu)
1016 if (tick_cpu == cpu) {
1077 static void tick_nohz_full_stop_tick(struct tick_sched *ts, int cpu)
1079 if (tick_nohz_next_event(ts, cpu))
1080 tick_nohz_stop_tick(ts, cpu);
1109 int cpu = smp_processor_id();
1111 if (can_stop_full_tick(cpu, ts))
1112 tick_nohz_full_stop_tick(ts, cpu);
1168 static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
1170 WARN_ON_ONCE(cpu_is_offline(cpu));
1188 if (tick_cpu == cpu)
1207 int cpu = smp_processor_id();
1216 else if (can_stop_idle_tick(cpu, ts))
1217 expires = tick_nohz_next_event(ts, cpu);
1226 tick_nohz_stop_tick(ts, cpu);
1233 nohz_balance_enter_idle(cpu);
1343 int cpu = smp_processor_id();
1355 if (!can_stop_idle_tick(cpu, ts))
1358 next_event = tick_nohz_next_event(ts, cpu);
1375 * @cpu: target CPU number
1379 * Return: the current idle calls counter value for @cpu
1381 unsigned long tick_nohz_get_idle_calls_cpu(int cpu)
1383 struct tick_sched *ts = tick_get_tick_sched(cpu);
1619 void tick_sched_timer_dying(int cpu)
1621 struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
1622 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
1653 int cpu;
1655 for_each_possible_cpu(cpu)
1656 set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks);