1/*
2 * Copyright 2002-2011, Haiku. All rights reserved.
3 * Distributed under the terms of the MIT License.
4 *
5 * Copyright 2001, Travis Geiselbrecht. All rights reserved.
6 * Distributed under the terms of the NewOS License.
7 */
8
9
10/*! Policy info for timers */
11
12
13#include <timer.h>
14
15#include <OS.h>
16
17#include <arch/timer.h>
18#include <boot/kernel_args.h>
19#include <cpu.h>
20#include <debug.h>
21#include <elf.h>
22#include <real_time_clock.h>
23#include <smp.h>
24#include <thread.h>
25#include <util/AutoLock.h>
26
27
28struct per_cpu_timer_data {
29	spinlock		lock;
30	timer* volatile	events;
31	timer* volatile	current_event;
32	int32			current_event_in_progress;
33	bigtime_t		real_time_offset;
34};
35
36static per_cpu_timer_data sPerCPU[SMP_MAX_CPUS];
37
38
39//#define TRACE_TIMER
40#ifdef TRACE_TIMER
41#	define TRACE(x) dprintf x
42#else
43#	define TRACE(x) ;
44#endif
45
46
47/*!	Sets the hardware timer to the given absolute time.
48
49	\param scheduleTime The absolute system time for the timer expiration.
50	\param now The current system time.
51*/
52static void
53set_hardware_timer(bigtime_t scheduleTime, bigtime_t now)
54{
55	arch_timer_set_hardware_timer(scheduleTime > now ? scheduleTime - now : 0);
56}
57
58
59/*!	Sets the hardware timer to the given absolute time.
60
61	\param scheduleTime The absolute system time for the timer expiration.
62*/
63static inline void
64set_hardware_timer(bigtime_t scheduleTime)
65{
66	set_hardware_timer(scheduleTime, system_time());
67}
68
69
70/*! NOTE: expects interrupts to be off */
71static void
72add_event_to_list(timer* event, timer* volatile* list)
73{
74	timer* next;
75	timer* last = NULL;
76
77	// stick it in the event list
78	for (next = *list; next; last = next, next = (timer*)next->next) {
79		if ((bigtime_t)next->schedule_time >= (bigtime_t)event->schedule_time)
80			break;
81	}
82
83	if (last != NULL) {
84		event->next = last->next;
85		last->next = event;
86	} else {
87		event->next = next;
88		*list = event;
89	}
90}
91
92
93static void
94per_cpu_real_time_clock_changed(void*, int cpu)
95{
96	per_cpu_timer_data& cpuData = sPerCPU[cpu];
97	SpinLocker cpuDataLocker(cpuData.lock);
98
99	bigtime_t realTimeOffset = rtc_boot_time();
100	if (realTimeOffset == cpuData.real_time_offset)
101		return;
102
103	// The real time offset has changed. We need to update all affected
104	// timers. First find and dequeue them.
105	bigtime_t timeDiff = cpuData.real_time_offset - realTimeOffset;
106	cpuData.real_time_offset = realTimeOffset;
107
108	timer* affectedTimers = NULL;
109	timer* volatile* it = &cpuData.events;
110	timer* firstEvent = *it;
111	while (timer* event = *it) {
112		// check whether it's an absolute real-time timer
113		uint32 flags = event->flags;
114		if ((flags & ~B_TIMER_FLAGS) != B_ONE_SHOT_ABSOLUTE_TIMER
115			|| (flags & B_TIMER_REAL_TIME_BASE) == 0) {
116			it = &event->next;
117			continue;
118		}
119
120		// Yep, remove the timer from the queue and add it to the
121		// affectedTimers list.
122		*it = event->next;
123		event->next = affectedTimers;
124		affectedTimers = event;
125	}
126
127	// update and requeue the affected timers
128	bool firstEventChanged = cpuData.events != firstEvent;
129	firstEvent = cpuData.events;
130
131	while (affectedTimers != NULL) {
132		timer* event = affectedTimers;
133		affectedTimers = event->next;
134
135		bigtime_t oldTime = event->schedule_time;
136		event->schedule_time += timeDiff;
137
138		// handle over-/underflows
139		if (timeDiff >= 0) {
140			if (event->schedule_time < oldTime)
141				event->schedule_time = B_INFINITE_TIMEOUT;
142		} else {
143			if (event->schedule_time < 0)
144				event->schedule_time = 0;
145		}
146
147		add_event_to_list(event, &cpuData.events);
148	}
149
150	firstEventChanged |= cpuData.events != firstEvent;
151
152	// If the first event has changed, reset the hardware timer.
153	if (firstEventChanged)
154		set_hardware_timer(cpuData.events->schedule_time);
155}
156
157
158// #pragma mark - debugging
159
160
161static int
162dump_timers(int argc, char** argv)
163{
164	int32 cpuCount = smp_get_num_cpus();
165	for (int32 i = 0; i < cpuCount; i++) {
166		kprintf("CPU %" B_PRId32 ":\n", i);
167
168		if (sPerCPU[i].events == NULL) {
169			kprintf("  no timers scheduled\n");
170			continue;
171		}
172
173		for (timer* event = sPerCPU[i].events; event != NULL;
174				event = event->next) {
175			kprintf("  [%9lld] %p: ", (long long)event->schedule_time, event);
176			if ((event->flags & ~B_TIMER_FLAGS) == B_PERIODIC_TIMER)
177				kprintf("periodic %9lld, ", (long long)event->period);
178			else
179				kprintf("one shot,           ");
180
181			kprintf("flags: %#x, user data: %p, callback: %p  ",
182				event->flags, event->user_data, event->hook);
183
184			// look up and print the hook function symbol
185			const char* symbol;
186			const char* imageName;
187			bool exactMatch;
188
189			status_t error = elf_debug_lookup_symbol_address(
190				(addr_t)event->hook, NULL, &symbol, &imageName, &exactMatch);
191			if (error == B_OK && exactMatch) {
192				if (const char* slash = strchr(imageName, '/'))
193					imageName = slash + 1;
194
195				kprintf("   %s:%s", imageName, symbol);
196			}
197
198			kprintf("\n");
199		}
200	}
201
202	kprintf("current time: %lld\n", (long long)system_time());
203
204	return 0;
205}
206
207
208// #pragma mark - kernel-private
209
210
211status_t
212timer_init(kernel_args* args)
213{
214	TRACE(("timer_init: entry\n"));
215
216	if (arch_init_timer(args) != B_OK)
217		panic("arch_init_timer() failed");
218
219	add_debugger_command_etc("timers", &dump_timers, "List all timers",
220		"\n"
221		"Prints a list of all scheduled timers.\n", 0);
222
223	return B_OK;
224}
225
226
227void
228timer_init_post_rtc(void)
229{
230	bigtime_t realTimeOffset = rtc_boot_time();
231
232	int32 cpuCount = smp_get_num_cpus();
233	for (int32 i = 0; i < cpuCount; i++)
234		sPerCPU[i].real_time_offset = realTimeOffset;
235}
236
237
238void
239timer_real_time_clock_changed()
240{
241	call_all_cpus(&per_cpu_real_time_clock_changed, NULL);
242}
243
244
245int32
246timer_interrupt()
247{
248	timer* event;
249	spinlock* spinlock;
250	per_cpu_timer_data& cpuData = sPerCPU[smp_get_current_cpu()];
251	int32 rc = B_HANDLED_INTERRUPT;
252
253	TRACE(("timer_interrupt: time %" B_PRIdBIGTIME ", cpu %" B_PRId32 "\n",
254		system_time(), smp_get_current_cpu()));
255
256	spinlock = &cpuData.lock;
257
258	acquire_spinlock(spinlock);
259
260	event = cpuData.events;
261	while (event != NULL && ((bigtime_t)event->schedule_time < system_time())) {
262		// this event needs to happen
263		int mode = event->flags;
264
265		cpuData.events = (timer*)event->next;
266		cpuData.current_event = event;
267		atomic_set(&cpuData.current_event_in_progress, 1);
268
269		release_spinlock(spinlock);
270
271		TRACE(("timer_interrupt: calling hook %p for event %p\n", event->hook,
272			event));
273
274		// call the callback
275		// note: if the event is not periodic, it is ok
276		// to delete the event structure inside the callback
277		if (event->hook)
278			rc = event->hook(event);
279
280		atomic_set(&cpuData.current_event_in_progress, 0);
281
282		acquire_spinlock(spinlock);
283
284		if ((mode & ~B_TIMER_FLAGS) == B_PERIODIC_TIMER
285			&& cpuData.current_event != NULL) {
286			// we need to adjust it and add it back to the list
287			event->schedule_time += event->period;
288
289			// If the new schedule time is a full interval or more in the past,
290			// skip ticks.
291			bigtime_t now =  system_time();
292			if (now >= event->schedule_time + event->period) {
293				// pick the closest tick in the past
294				event->schedule_time = now
295					- (now - event->schedule_time) % event->period;
296			}
297
298			add_event_to_list(event, &cpuData.events);
299		}
300
301		cpuData.current_event = NULL;
302
303		event = cpuData.events;
304	}
305
306	// setup the next hardware timer
307	if (cpuData.events != NULL)
308		set_hardware_timer(cpuData.events->schedule_time);
309
310	release_spinlock(spinlock);
311
312	return rc;
313}
314
315
316// #pragma mark - public API
317
318
319status_t
320add_timer(timer* event, timer_hook hook, bigtime_t period, int32 flags)
321{
322	bigtime_t currentTime = system_time();
323	cpu_status state;
324
325	if (event == NULL || hook == NULL || period < 0)
326		return B_BAD_VALUE;
327
328	TRACE(("add_timer: event %p\n", event));
329
330	// compute the schedule time
331	bigtime_t scheduleTime;
332	if ((flags & B_TIMER_USE_TIMER_STRUCT_TIMES) != 0) {
333		scheduleTime = event->schedule_time;
334		period = event->period;
335	} else {
336		scheduleTime = period;
337		if ((flags & ~B_TIMER_FLAGS) != B_ONE_SHOT_ABSOLUTE_TIMER)
338			scheduleTime += currentTime;
339		event->schedule_time = (int64)scheduleTime;
340		event->period = period;
341	}
342
343	event->hook = hook;
344	event->flags = flags;
345
346	state = disable_interrupts();
347	int currentCPU = smp_get_current_cpu();
348	per_cpu_timer_data& cpuData = sPerCPU[currentCPU];
349	acquire_spinlock(&cpuData.lock);
350
351	// If the timer is an absolute real-time base timer, convert the schedule
352	// time to system time.
353	if ((flags & ~B_TIMER_FLAGS) == B_ONE_SHOT_ABSOLUTE_TIMER
354		&& (flags & B_TIMER_REAL_TIME_BASE) != 0) {
355		if (event->schedule_time > cpuData.real_time_offset)
356			event->schedule_time -= cpuData.real_time_offset;
357		else
358			event->schedule_time = 0;
359	}
360
361	add_event_to_list(event, &cpuData.events);
362	event->cpu = currentCPU;
363
364	// if we were stuck at the head of the list, set the hardware timer
365	if (event == cpuData.events)
366		set_hardware_timer(scheduleTime, currentTime);
367
368	release_spinlock(&cpuData.lock);
369	restore_interrupts(state);
370
371	return B_OK;
372}
373
374
375bool
376cancel_timer(timer* event)
377{
378	TRACE(("cancel_timer: event %p\n", event));
379
380	InterruptsLocker _;
381
382	// lock the right CPU spinlock
383	int cpu = event->cpu;
384	SpinLocker spinLocker;
385	while (true) {
386		if (cpu >= SMP_MAX_CPUS)
387			return false;
388
389		spinLocker.SetTo(sPerCPU[cpu].lock, false);
390		if (cpu == event->cpu)
391			break;
392
393		// cpu field changed while we were trying to lock
394		spinLocker.Unlock();
395		cpu = event->cpu;
396	}
397
398	per_cpu_timer_data& cpuData = sPerCPU[cpu];
399
400	if (event != cpuData.current_event) {
401		// The timer hook is not yet being executed.
402		timer* current = cpuData.events;
403		timer* last = NULL;
404
405		while (current != NULL) {
406			if (current == event) {
407				// we found it
408				if (last == NULL)
409					cpuData.events = current->next;
410				else
411					last->next = current->next;
412				current->next = NULL;
413				// break out of the whole thing
414				break;
415			}
416			last = current;
417			current = current->next;
418		}
419
420		// If not found, we assume this was a one-shot timer and has already
421		// fired.
422		if (current == NULL)
423			return true;
424
425		// invalidate CPU field
426		event->cpu = 0xffff;
427
428		// If on the current CPU, also reset the hardware timer.
429		if (cpu == smp_get_current_cpu()) {
430			if (cpuData.events == NULL)
431				arch_timer_clear_hardware_timer();
432			else
433				set_hardware_timer(cpuData.events->schedule_time);
434		}
435
436		return false;
437	}
438
439	// The timer hook is currently being executed. We clear the current
440	// event so that timer_interrupt() will not reschedule periodic timers.
441	cpuData.current_event = NULL;
442
443	// Unless this is a kernel-private timer that also requires the scheduler
444	// lock to be held while calling the event hook, we'll have to wait
445	// for the hook to complete. When called from the timer hook we don't
446	// wait either, of course.
447	if (cpu != smp_get_current_cpu()) {
448		spinLocker.Unlock();
449
450		while (atomic_get(&cpuData.current_event_in_progress) == 1)
451			cpu_wait(&cpuData.current_event_in_progress, 0);
452	}
453
454	return true;
455}
456
457
458void
459spin(bigtime_t microseconds)
460{
461	bigtime_t time = system_time();
462
463	while ((system_time() - time) < microseconds)
464		cpu_pause();
465}
466