Lines Matching refs:bc

38 static void tick_broadcast_setup_oneshot(struct clock_event_device *bc, bool from_periodic);
40 static void tick_resume_broadcast_oneshot(struct clock_event_device *bc);
46 tick_broadcast_setup_oneshot(struct clock_event_device *bc, bool from_periodic) { BUG(); }
48 static inline void tick_resume_broadcast_oneshot(struct clock_event_device *bc) { }
77 static void tick_broadcast_start_periodic(struct clock_event_device *bc)
79 if (bc)
80 tick_setup_periodic(bc, 1);
249 struct clock_event_device *bc = tick_broadcast_device.evtdev;
266 tick_broadcast_start_periodic(bc);
268 tick_broadcast_setup_oneshot(bc, false);
307 if (cpumask_empty(tick_broadcast_mask) && bc)
308 clockevents_shutdown(bc);
317 if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER))
356 struct clock_event_device *bc = tick_broadcast_device.evtdev;
371 local = !(bc->features & CLOCK_EVT_FEAT_HRTIMER);
440 struct clock_event_device *bc, *dev;
460 bc = tick_broadcast_device.evtdev;
478 if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER) &&
496 if (bc) {
499 clockevents_shutdown(bc);
502 tick_broadcast_start_periodic(bc);
504 tick_broadcast_setup_oneshot(bc, false);
526 struct clock_event_device *bc = tick_broadcast_device.evtdev;
529 if (bc && cpumask_empty(tick_broadcast_mask))
530 clockevents_shutdown(bc);
551 struct clock_event_device *bc;
556 bc = tick_broadcast_device.evtdev;
557 if (bc)
558 clockevents_shutdown(bc);
581 struct clock_event_device *bc;
586 bc = tick_broadcast_device.evtdev;
588 if (bc) {
589 clockevents_tick_resume(bc);
594 tick_broadcast_start_periodic(bc);
598 tick_resume_broadcast_oneshot(bc);
638 static void tick_broadcast_set_affinity(struct clock_event_device *bc,
641 if (!(bc->features & CLOCK_EVT_FEAT_DYNIRQ))
644 if (cpumask_equal(bc->cpumask, cpumask))
647 bc->cpumask = cpumask;
648 irq_set_affinity(bc->irq, bc->cpumask);
651 static void tick_broadcast_set_event(struct clock_event_device *bc, int cpu,
654 if (!clockevent_state_oneshot(bc))
655 clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT);
657 clockevents_program_event(bc, expires, 1);
658 tick_broadcast_set_affinity(bc, cpumask_of(cpu));
661 static void tick_resume_broadcast_oneshot(struct clock_event_device *bc)
663 clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT);
770 static int broadcast_needs_cpu(struct clock_event_device *bc, int cpu)
772 if (!(bc->features & CLOCK_EVT_FEAT_HRTIMER))
774 if (bc->next_event == KTIME_MAX)
776 return bc->bound_on == cpu ? -EBUSY : 0;
779 static void broadcast_shutdown_local(struct clock_event_device *bc,
787 if (bc->features & CLOCK_EVT_FEAT_HRTIMER) {
788 if (broadcast_needs_cpu(bc, smp_processor_id()))
790 if (dev->next_event < bc->next_event)
800 struct clock_event_device *bc, *dev = td->evtdev;
805 bc = tick_broadcast_device.evtdev;
815 ret = broadcast_needs_cpu(bc, cpu);
825 if (bc->features & CLOCK_EVT_FEAT_HRTIMER)
834 broadcast_shutdown_local(bc, dev);
848 } else if (dev->next_event < bc->next_event) {
849 tick_broadcast_set_event(bc, cpu, dev->next_event);
857 ret = broadcast_needs_cpu(bc, cpu);
1024 static void tick_broadcast_setup_oneshot(struct clock_event_device *bc,
1030 if (!bc)
1039 if (bc->event_handler == tick_handle_oneshot_broadcast) {
1063 bc->event_handler = tick_handle_oneshot_broadcast;
1064 bc->next_event = KTIME_MAX;
1097 if (clockevent_state_oneshot(bc))
1118 tick_broadcast_set_event(bc, cpu, nexttick);
1126 struct clock_event_device *bc;
1134 bc = tick_broadcast_device.evtdev;
1135 if (bc)
1136 tick_broadcast_setup_oneshot(bc, oldmode == TICKDEV_MODE_PERIODIC);
1144 struct clock_event_device *bc;
1148 bc = tick_broadcast_device.evtdev;
1150 if (bc && broadcast_needs_cpu(bc, deadcpu)) {
1152 clockevents_program_event(bc, bc->next_event, 1);
1188 struct clock_event_device *bc = tick_broadcast_device.evtdev;
1190 return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
1196 struct clock_event_device *bc = tick_broadcast_device.evtdev;
1198 if (!bc || (bc->features & CLOCK_EVT_FEAT_HRTIMER))