Lines Matching defs:tmc

428 static inline bool tmigr_is_not_available(struct tmigr_cpu *tmc)
430 return !(tmc->tmgroup && tmc->online);
481 struct tmigr_cpu *tmc)
483 struct tmigr_group *child = NULL, *group = tmc->tmgroup;
496 static void walk_groups(up_f up, void *data, struct tmigr_cpu *tmc)
498 lockdep_assert_held(&tmc->lock);
500 __walk_groups(up, data, tmc);
674 static void __tmigr_cpu_activate(struct tmigr_cpu *tmc)
678 data.childmask = tmc->childmask;
680 trace_tmigr_cpu_active(tmc);
682 tmc->cpuevt.ignore = true;
683 WRITE_ONCE(tmc->wakeup, KTIME_MAX);
685 walk_groups(&tmigr_active_up, &data, tmc);
695 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
697 if (tmigr_is_not_available(tmc))
700 if (WARN_ON_ONCE(!tmc->idle))
703 raw_spin_lock(&tmc->lock);
704 tmc->idle = false;
705 __tmigr_cpu_activate(tmc);
706 raw_spin_unlock(&tmc->lock);
875 static u64 tmigr_new_timer(struct tmigr_cpu *tmc, u64 nextexp)
879 .evt = &tmc->cpuevt };
881 lockdep_assert_held(&tmc->lock);
883 if (tmc->remote)
886 trace_tmigr_cpu_new_timer(tmc);
888 tmc->cpuevt.ignore = false;
891 walk_groups(&tmigr_new_timer_up, &data, tmc);
902 struct tmigr_cpu *tmc;
904 tmc = per_cpu_ptr(&tmigr_cpu, cpu);
906 raw_spin_lock_irq(&tmc->lock);
923 if (!tmc->online || tmc->remote || tmc->cpuevt.ignore ||
924 now < tmc->cpuevt.nextevt.expires) {
925 raw_spin_unlock_irq(&tmc->lock);
929 trace_tmigr_handle_remote_cpu(tmc);
931 tmc->remote = true;
932 WRITE_ONCE(tmc->wakeup, KTIME_MAX);
935 raw_spin_unlock_irq(&tmc->lock);
943 * the top). During fetching the next timer interrupt, also tmc->lock
957 raw_spin_lock(&tmc->lock);
970 if (!tmc->online || !tmc->idle) {
981 data.evt = &tmc->cpuevt;
989 walk_groups(&tmigr_new_timer_up, &data, tmc);
992 tmc->remote = false;
993 raw_spin_unlock_irq(&tmc->lock);
1057 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
1060 if (tmigr_is_not_available(tmc))
1063 data.childmask = tmc->childmask;
1071 if (!tmigr_check_migrator(tmc->tmgroup, tmc->childmask)) {
1077 if (READ_ONCE(tmc->wakeup) == KTIME_MAX)
1084 * Update @tmc->wakeup only at the end and do not reset @tmc->wakeup to
1085 * KTIME_MAX. Even if tmc->lock is not held during the whole remote
1086 * handling, tmc->wakeup is fine to be stale as it is called in
1091 __walk_groups(&tmigr_handle_remote_up, &data, tmc);
1093 raw_spin_lock_irq(&tmc->lock);
1094 WRITE_ONCE(tmc->wakeup, data.firstexp);
1095 raw_spin_unlock_irq(&tmc->lock);
1159 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
1164 if (tmigr_is_not_available(tmc))
1168 data.childmask = tmc->childmask;
1170 data.tmc_active = !tmc->idle;
1177 * Check is done lockless as interrupts are disabled and @tmc->idle is
1180 if (!tmc->idle) {
1181 __walk_groups(&tmigr_requires_handle_remote_up, &data, tmc);
1187 * When the CPU is idle, compare @tmc->wakeup with @data.now. The lock
1193 if (data.now >= READ_ONCE(tmc->wakeup))
1196 raw_spin_lock(&tmc->lock);
1197 if (data.now >= tmc->wakeup)
1199 raw_spin_unlock(&tmc->lock);
1206 * tmigr_cpu_new_timer() - enqueue next global timer into hierarchy (idle tmc)
1211 * and thereby the timer idle path is executed once more. @tmc->wakeup
1220 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
1223 if (tmigr_is_not_available(tmc))
1226 raw_spin_lock(&tmc->lock);
1228 ret = READ_ONCE(tmc->wakeup);
1230 if (nextexp != tmc->cpuevt.nextevt.expires ||
1231 tmc->cpuevt.ignore) {
1232 ret = tmigr_new_timer(tmc, nextexp);
1239 WRITE_ONCE(tmc->wakeup, ret);
1241 trace_tmigr_cpu_new_timer_idle(tmc, nextexp);
1242 raw_spin_unlock(&tmc->lock);
1339 static u64 __tmigr_cpu_deactivate(struct tmigr_cpu *tmc, u64 nextexp)
1343 .evt = &tmc->cpuevt,
1344 .childmask = tmc->childmask };
1352 tmc->cpuevt.ignore = false;
1354 walk_groups(&tmigr_inactive_up, &data, tmc);
1370 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
1373 if (tmigr_is_not_available(tmc))
1376 raw_spin_lock(&tmc->lock);
1378 ret = __tmigr_cpu_deactivate(tmc, nextexp);
1380 tmc->idle = true;
1386 WRITE_ONCE(tmc->wakeup, ret);
1388 trace_tmigr_cpu_idle(tmc, nextexp);
1389 raw_spin_unlock(&tmc->lock);
1413 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
1414 struct tmigr_group *group = tmc->tmgroup;
1416 if (tmigr_is_not_available(tmc))
1419 if (WARN_ON_ONCE(tmc->idle))
1422 if (!tmigr_check_migrator_and_lonely(tmc->tmgroup, tmc->childmask))
1611 * Update tmc -> group / child -> group connection
1614 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
1618 tmc->tmgroup = group;
1619 tmc->childmask = BIT(group->num_children++);
1623 trace_tmigr_connect_cpu_parent(tmc);
1669 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
1673 if (!tmc->tmgroup) {
1674 raw_spin_lock_init(&tmc->lock);
1680 if (tmc->childmask == 0)
1683 timerqueue_init(&tmc->cpuevt.nextevt);
1684 tmc->cpuevt.nextevt.expires = KTIME_MAX;
1685 tmc->cpuevt.ignore = true;
1686 tmc->cpuevt.cpu = cpu;
1688 tmc->remote = false;
1689 WRITE_ONCE(tmc->wakeup, KTIME_MAX);
1691 raw_spin_lock_irq(&tmc->lock);
1692 trace_tmigr_cpu_online(tmc);
1693 tmc->idle = timer_base_is_idle();
1694 if (!tmc->idle)
1695 __tmigr_cpu_activate(tmc);
1696 tmc->online = true;
1697 raw_spin_unlock_irq(&tmc->lock);
1710 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
1712 WARN_ON_ONCE(!tmc->online || tmc->idle);
1719 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
1723 raw_spin_lock_irq(&tmc->lock);
1724 tmc->online = false;
1725 WRITE_ONCE(tmc->wakeup, KTIME_MAX);
1731 firstexp = __tmigr_cpu_deactivate(tmc, KTIME_MAX);
1732 trace_tmigr_cpu_offline(tmc);
1733 raw_spin_unlock_irq(&tmc->lock);