1/*
2 * Copyright (c) 1993-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * Timer interrupt callout module.
30 */
31
32#include <mach/mach_types.h>
33
34#include <kern/clock.h>
35#include <kern/processor.h>
36#include <kern/etimer.h>
37#include <kern/timer_call.h>
38#include <kern/timer_queue.h>
39#include <kern/call_entry.h>
40
41#include <sys/kdebug.h>
42
43#if CONFIG_DTRACE
44#include <mach/sdt.h>
45#endif
46
47
48#if DEBUG
49#define TIMER_ASSERT	1
50#endif
51
52//#define TIMER_ASSERT	1
53//#define TIMER_DBG	1
54
55#if TIMER_DBG
56#define DBG(x...) kprintf("DBG: " x);
57#else
58#define DBG(x...)
59#endif
60
61lck_grp_t               timer_call_lck_grp;
62lck_attr_t              timer_call_lck_attr;
63lck_grp_attr_t          timer_call_lck_grp_attr;
64
65
66#define timer_call_lock_spin(queue)		\
67	lck_mtx_lock_spin_always(&queue->lock_data)
68
69#define timer_call_unlock(queue)		\
70	lck_mtx_unlock_always(&queue->lock_data)
71
72
73#define QUEUE(x)	((queue_t)(x))
74#define MPQUEUE(x)	((mpqueue_head_t *)(x))
75#define TIMER_CALL(x)	((timer_call_t)(x))
76
77
78uint64_t past_deadline_timers;
79uint64_t past_deadline_deltas;
80uint64_t past_deadline_longest;
81uint64_t past_deadline_shortest = ~0ULL;
82enum {PAST_DEADLINE_TIMER_ADJUSTMENT_NS = 10 * 1000};
83
84uint64_t past_deadline_timer_adjustment;
85
86static boolean_t timer_call_enter_internal(timer_call_t call, timer_call_param_t param1, uint64_t deadline, uint32_t flags);
87boolean_t 	mach_timer_coalescing_enabled = TRUE;
88
89mpqueue_head_t	*timer_call_enqueue_deadline_unlocked(
90			timer_call_t		call,
91			mpqueue_head_t		*queue,
92			uint64_t		deadline);
93
94mpqueue_head_t	*timer_call_dequeue_unlocked(
95			timer_call_t 		call);
96
97
98void
99timer_call_initialize(void)
100{
101	lck_attr_setdefault(&timer_call_lck_attr);
102	lck_grp_attr_setdefault(&timer_call_lck_grp_attr);
103	lck_grp_init(&timer_call_lck_grp, "timer_call", &timer_call_lck_grp_attr);
104	nanotime_to_absolutetime(0, PAST_DEADLINE_TIMER_ADJUSTMENT_NS, &past_deadline_timer_adjustment);
105}
106
107
108void
109timer_call_initialize_queue(mpqueue_head_t *queue)
110{
111	DBG("timer_call_initialize_queue(%p)\n", queue);
112	mpqueue_init(queue, &timer_call_lck_grp, &timer_call_lck_attr);
113}
114
115
116void
117timer_call_setup(
118	timer_call_t			call,
119	timer_call_func_t		func,
120	timer_call_param_t		param0)
121{
122	DBG("timer_call_setup(%p,%p,%p)\n", call, func, param0);
123	call_entry_setup(CE(call), func, param0);
124	simple_lock_init(&(call)->lock, 0);
125	call->async_dequeue = FALSE;
126}
127
128/*
129 * Timer call entry locking model
130 * ==============================
131 *
132 * Timer call entries are linked on per-cpu timer queues which are protected
133 * by the queue lock and the call entry lock. The locking protocol is:
134 *
135 *  0) The canonical locking order is timer call entry followed by queue.
136 *
137 *  1) With only the entry lock held, entry.queue is valid:
138 *    1a) NULL: the entry is not queued, or
139 *    1b) non-NULL: this queue must be locked before the entry is modified.
140 *        After locking the queue, the call.async_dequeue flag must be checked:
141 *    1c) TRUE: the entry was removed from the queue by another thread
142 *	        and we must NULL the entry.queue and reset this flag, or
143 *    1d) FALSE: (ie. queued), the entry can be manipulated.
144 *
145 *  2) If a queue lock is obtained first, the queue is stable:
146 *    2a) If a try-lock of a queued entry succeeds, the call can be operated on
147 *	  and dequeued.
148 *    2b) If a try-lock fails, it indicates that another thread is attempting
149 *        to change the entry and move it to a different position in this queue
150 *        or to different queue. The entry can be dequeued but it should not be
151 *        operated upon since it is being changed. Furthermore, we don't null
152 *	  the entry.queue pointer (protected by the entry lock we don't own).
153 *	  Instead, we set the async_dequeue flag -- see (1c).
154 */
155
156/*
157 * Inlines timer_call_entry_dequeue() and timer_call_entry_enqueue_deadline()
158 * cast between pointer types (mpqueue_head_t *) and (queue_t) so that
159 * we can use the call_entry_dequeue() and call_entry_enqueue_deadline()
160 * methods to operate on timer_call structs as if they are call_entry structs.
161 * These structures are identical except for their queue head pointer fields.
162 *
163 * In the debug case, we assert that the timer call locking protocol
164 * is being obeyed.
165 */
166#if TIMER_ASSERT
167static __inline__ mpqueue_head_t *
168timer_call_entry_dequeue(
169	timer_call_t		entry)
170{
171        mpqueue_head_t	*old_queue = MPQUEUE(CE(entry)->queue);
172
173	if (!hw_lock_held((hw_lock_t)&entry->lock))
174		panic("_call_entry_dequeue() "
175			"entry %p is not locked\n", entry);
176	/*
177	 * XXX The queue lock is actually a mutex in spin mode
178	 *     but there's no way to test for it being held
179	 *     so we pretend it's a spinlock!
180	 */
181	if (!hw_lock_held((hw_lock_t)&old_queue->lock_data))
182		panic("_call_entry_dequeue() "
183			"queue %p is not locked\n", old_queue);
184
185	call_entry_dequeue(CE(entry));
186
187	return (old_queue);
188}
189
190static __inline__ mpqueue_head_t *
191timer_call_entry_enqueue_deadline(
192	timer_call_t		entry,
193	mpqueue_head_t		*queue,
194	uint64_t		deadline)
195{
196	mpqueue_head_t	*old_queue = MPQUEUE(CE(entry)->queue);
197
198	if (!hw_lock_held((hw_lock_t)&entry->lock))
199		panic("_call_entry_enqueue_deadline() "
200			"entry %p is not locked\n", entry);
201	/* XXX More lock pretense:  */
202	if (!hw_lock_held((hw_lock_t)&queue->lock_data))
203		panic("_call_entry_enqueue_deadline() "
204			"queue %p is not locked\n", queue);
205	if (old_queue != NULL && old_queue != queue)
206		panic("_call_entry_enqueue_deadline() "
207			"old_queue %p != queue", old_queue);
208
209	call_entry_enqueue_deadline(CE(entry), QUEUE(queue), deadline);
210
211	return (old_queue);
212}
213
214#else
215
216static __inline__ mpqueue_head_t *
217timer_call_entry_dequeue(
218	timer_call_t		entry)
219{
220	return MPQUEUE(call_entry_dequeue(CE(entry)));
221}
222
223static __inline__ mpqueue_head_t *
224timer_call_entry_enqueue_deadline(
225	timer_call_t			entry,
226	mpqueue_head_t			*queue,
227	uint64_t			deadline)
228{
229	return MPQUEUE(call_entry_enqueue_deadline(CE(entry),
230						   QUEUE(queue), deadline));
231}
232
233#endif
234
235#if TIMER_ASSERT
236unsigned timer_call_enqueue_deadline_unlocked_async1;
237unsigned timer_call_enqueue_deadline_unlocked_async2;
238#endif
239/*
240 * Assumes call_entry and queues unlocked, interrupts disabled.
241 */
242__inline__ mpqueue_head_t *
243timer_call_enqueue_deadline_unlocked(
244	timer_call_t 			call,
245	mpqueue_head_t			*queue,
246	uint64_t			deadline)
247{
248	call_entry_t	entry = CE(call);
249	mpqueue_head_t	*old_queue;
250
251	DBG("timer_call_enqueue_deadline_unlocked(%p,%p,)\n", call, queue);
252
253	simple_lock(&call->lock);
254	old_queue = MPQUEUE(entry->queue);
255	if (old_queue != NULL) {
256		timer_call_lock_spin(old_queue);
257		if (call->async_dequeue) {
258			/* collision (1c): null queue pointer and reset flag */
259			call->async_dequeue = FALSE;
260			entry->queue = NULL;
261#if TIMER_ASSERT
262			timer_call_enqueue_deadline_unlocked_async1++;
263#endif
264		} else if (old_queue != queue) {
265			(void)remque(qe(entry));
266			entry->queue = NULL;
267#if TIMER_ASSERT
268			timer_call_enqueue_deadline_unlocked_async2++;
269#endif
270		}
271		if (old_queue != queue) {
272			timer_call_unlock(old_queue);
273			timer_call_lock_spin(queue);
274		}
275	} else {
276		timer_call_lock_spin(queue);
277	}
278
279	timer_call_entry_enqueue_deadline(call, queue, deadline);
280	timer_call_unlock(queue);
281	simple_unlock(&call->lock);
282
283	return (old_queue);
284}
285
286#if TIMER_ASSERT
287unsigned timer_call_dequeue_unlocked_async1;
288unsigned timer_call_dequeue_unlocked_async2;
289#endif
290mpqueue_head_t *
291timer_call_dequeue_unlocked(
292	timer_call_t 		call)
293{
294	call_entry_t	entry = CE(call);
295	mpqueue_head_t	*old_queue;
296
297	DBG("timer_call_dequeue_unlocked(%p)\n", call);
298
299	simple_lock(&call->lock);
300	old_queue = MPQUEUE(entry->queue);
301	if (old_queue != NULL) {
302		timer_call_lock_spin(old_queue);
303		if (call->async_dequeue) {
304			/* collision (1c): null queue pointer and reset flag */
305			call->async_dequeue = FALSE;
306#if TIMER_ASSERT
307			timer_call_dequeue_unlocked_async1++;
308#endif
309		} else {
310			(void)remque(qe(entry));
311#if TIMER_ASSERT
312			timer_call_dequeue_unlocked_async2++;
313#endif
314		}
315		entry->queue = NULL;
316		timer_call_unlock(old_queue);
317	}
318	simple_unlock(&call->lock);
319	return (old_queue);
320}
321
322static boolean_t
323timer_call_enter_internal(
324	timer_call_t 		call,
325	timer_call_param_t	param1,
326	uint64_t 		deadline,
327	uint32_t 		flags)
328{
329	mpqueue_head_t		*queue;
330	mpqueue_head_t		*old_queue;
331	spl_t			s;
332	uint64_t 		slop = 0;
333
334	s = splclock();
335
336	call->soft_deadline = deadline;
337	call->flags = flags;
338
339	if ((flags & TIMER_CALL_CRITICAL) == 0 &&
340	     mach_timer_coalescing_enabled) {
341		slop = timer_call_slop(deadline);
342		deadline += slop;
343	}
344
345#if	defined(__i386__) || defined(__x86_64__) || defined(__arm__)
346	uint64_t ctime = mach_absolute_time();
347	if (__improbable(deadline < ctime)) {
348		uint64_t delta = (ctime - deadline);
349
350		past_deadline_timers++;
351		past_deadline_deltas += delta;
352		if (delta > past_deadline_longest)
353			past_deadline_longest = deadline;
354		if (delta < past_deadline_shortest)
355			past_deadline_shortest = delta;
356
357		deadline = ctime + past_deadline_timer_adjustment;
358		call->soft_deadline = deadline;
359	}
360#endif
361    call->ttd =  call->soft_deadline - ctime;
362
363	queue = timer_queue_assign(deadline);
364
365	old_queue = timer_call_enqueue_deadline_unlocked(call, queue, deadline);
366
367	CE(call)->param1 = param1;
368
369	splx(s);
370
371	return (old_queue != NULL);
372}
373
374boolean_t
375timer_call_enter(
376	timer_call_t		call,
377	uint64_t		deadline,
378	uint32_t		flags)
379{
380	return timer_call_enter_internal(call, NULL, deadline, flags);
381}
382
383boolean_t
384timer_call_enter1(
385	timer_call_t		call,
386	timer_call_param_t	param1,
387	uint64_t		deadline,
388	uint32_t		flags)
389{
390	return timer_call_enter_internal(call, param1, deadline, flags);
391}
392
393boolean_t
394timer_call_cancel(
395	timer_call_t		call)
396{
397	mpqueue_head_t		*old_queue;
398	spl_t			s;
399
400	s = splclock();
401
402	old_queue = timer_call_dequeue_unlocked(call);
403
404	if (old_queue != NULL) {
405		timer_call_lock_spin(old_queue);
406		if (!queue_empty(&old_queue->head))
407			timer_queue_cancel(old_queue, CE(call)->deadline, CE(queue_first(&old_queue->head))->deadline);
408		else
409			timer_queue_cancel(old_queue, CE(call)->deadline, UINT64_MAX);
410		timer_call_unlock(old_queue);
411	}
412	splx(s);
413
414#if CONFIG_DTRACE
415	DTRACE_TMR6(callout__cancel, timer_call_func_t, CE(call)->func,
416	    timer_call_param_t, CE(call)->param0, uint32_t, call->flags, 0,
417	    (call->ttd >> 32), (unsigned) (call->ttd & 0xFFFFFFFF));
418#endif
419
420	return (old_queue != NULL);
421}
422
423uint32_t	timer_queue_shutdown_lock_skips;
424void
425timer_queue_shutdown(
426	mpqueue_head_t		*queue)
427{
428	timer_call_t		call;
429	mpqueue_head_t		*new_queue;
430	spl_t			s;
431
432	DBG("timer_queue_shutdown(%p)\n", queue);
433
434	s = splclock();
435
436	/* Note comma operator in while expression re-locking each iteration */
437	while (timer_call_lock_spin(queue), !queue_empty(&queue->head)) {
438		call = TIMER_CALL(queue_first(&queue->head));
439		if (!simple_lock_try(&call->lock)) {
440			/*
441			 * case (2b) lock order inversion, dequeue and skip
442			 * Don't change the call_entry queue back-pointer
443			 * but set the async_dequeue field.
444			 */
445			timer_queue_shutdown_lock_skips++;
446			(void) remque(qe(call));
447			call->async_dequeue = TRUE;
448			timer_call_unlock(queue);
449			continue;
450		}
451
452		/* remove entry from old queue */
453		timer_call_entry_dequeue(call);
454		timer_call_unlock(queue);
455
456		/* and queue it on new */
457		new_queue = timer_queue_assign(CE(call)->deadline);
458		timer_call_lock_spin(new_queue);
459		timer_call_entry_enqueue_deadline(
460			call, new_queue, CE(call)->deadline);
461		timer_call_unlock(new_queue);
462
463		simple_unlock(&call->lock);
464	}
465
466	timer_call_unlock(queue);
467	splx(s);
468}
469
470uint32_t	timer_queue_expire_lock_skips;
471uint64_t
472timer_queue_expire(
473	mpqueue_head_t		*queue,
474	uint64_t		deadline)
475{
476	timer_call_t	call;
477
478	DBG("timer_queue_expire(%p,)\n", queue);
479
480	timer_call_lock_spin(queue);
481
482	while (!queue_empty(&queue->head)) {
483		call = TIMER_CALL(queue_first(&queue->head));
484
485		if (call->soft_deadline <= deadline) {
486			timer_call_func_t		func;
487			timer_call_param_t		param0, param1;
488
489			if (!simple_lock_try(&call->lock)) {
490				/* case (2b) lock inversion, dequeue and skip */
491				timer_queue_expire_lock_skips++;
492				(void) remque(qe(call));
493				call->async_dequeue = TRUE;
494				continue;
495			}
496
497			timer_call_entry_dequeue(call);
498
499			func = CE(call)->func;
500			param0 = CE(call)->param0;
501			param1 = CE(call)->param1;
502
503			simple_unlock(&call->lock);
504			timer_call_unlock(queue);
505
506			KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
507				DECR_TIMER_CALLOUT | DBG_FUNC_START,
508				VM_KERNEL_UNSLIDE(func), param0, param1, 0, 0);
509
510#if CONFIG_DTRACE
511			DTRACE_TMR6(callout__start, timer_call_func_t, func,
512			    timer_call_param_t, param0, unsigned, call->flags,
513			    0, (call->ttd >> 32),
514			    (unsigned) (call->ttd & 0xFFFFFFFF));
515#endif
516
517			/* Maintain time-to-deadline in per-processor data
518			 * structure for thread wakeup deadline statistics.
519			 */
520			uint64_t *ttdp = &(PROCESSOR_DATA(current_processor(), timer_call_ttd));
521			*ttdp = call->ttd;
522			(*func)(param0, param1);
523			*ttdp = 0;
524
525#if CONFIG_DTRACE
526			DTRACE_TMR3(callout__end, timer_call_func_t, func,
527			    timer_call_param_t, param0, timer_call_param_t,
528			    param1);
529#endif
530
531			KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
532				DECR_TIMER_CALLOUT | DBG_FUNC_END,
533				VM_KERNEL_UNSLIDE(func), param0, param1, 0, 0);
534
535			timer_call_lock_spin(queue);
536		}
537		else
538			break;
539	}
540
541	if (!queue_empty(&queue->head))
542		deadline = CE(call)->deadline;
543	else
544		deadline = UINT64_MAX;
545
546	timer_call_unlock(queue);
547
548	return (deadline);
549}
550
551
552extern int serverperfmode;
553uint32_t	timer_queue_migrate_lock_skips;
554/*
555 * timer_queue_migrate() is called by etimer_queue_migrate()
556 * to move timer requests from the local processor (queue_from)
557 * to a target processor's (queue_to).
558 */
559int
560timer_queue_migrate(mpqueue_head_t *queue_from, mpqueue_head_t *queue_to)
561{
562	timer_call_t	call;
563	timer_call_t	head_to;
564	int		timers_migrated = 0;
565
566	DBG("timer_queue_migrate(%p,%p)\n", queue_from, queue_to);
567
568	assert(!ml_get_interrupts_enabled());
569	assert(queue_from != queue_to);
570
571	if (serverperfmode) {
572		/*
573		 * if we're running a high end server
574		 * avoid migrations... they add latency
575		 * and don't save us power under typical
576		 * server workloads
577		 */
578		return -4;
579	}
580
581	/*
582	 * Take both local (from) and target (to) timer queue locks while
583	 * moving the timers from the local queue to the target processor.
584	 * We assume that the target is always the boot processor.
585	 * But only move if all of the following is true:
586	 *  - the target queue is non-empty
587	 *  - the local queue is non-empty
588	 *  - the local queue's first deadline is later than the target's
589	 *  - the local queue contains no non-migrateable "local" call
590	 * so that we need not have the target resync.
591	 */
592
593        timer_call_lock_spin(queue_to);
594
595	head_to = TIMER_CALL(queue_first(&queue_to->head));
596	if (queue_empty(&queue_to->head)) {
597		timers_migrated = -1;
598		goto abort1;
599	}
600
601        timer_call_lock_spin(queue_from);
602
603	if (queue_empty(&queue_from->head)) {
604		timers_migrated = -2;
605		goto abort2;
606	}
607
608	call = TIMER_CALL(queue_first(&queue_from->head));
609	if (CE(call)->deadline < CE(head_to)->deadline) {
610		timers_migrated = 0;
611		goto abort2;
612	}
613
614	/* perform scan for non-migratable timers */
615	do {
616		if (call->flags & TIMER_CALL_LOCAL) {
617			timers_migrated = -3;
618			goto abort2;
619		}
620		call = TIMER_CALL(queue_next(qe(call)));
621	} while (!queue_end(&queue_from->head, qe(call)));
622
623	/* migration loop itself -- both queues are locked */
624	while (!queue_empty(&queue_from->head)) {
625		call = TIMER_CALL(queue_first(&queue_from->head));
626		if (!simple_lock_try(&call->lock)) {
627			/* case (2b) lock order inversion, dequeue only */
628			timer_queue_migrate_lock_skips++;
629			(void) remque(qe(call));
630			call->async_dequeue = TRUE;
631			continue;
632		}
633		timer_call_entry_dequeue(call);
634		timer_call_entry_enqueue_deadline(
635			call, queue_to, CE(call)->deadline);
636		timers_migrated++;
637		simple_unlock(&call->lock);
638	}
639
640abort2:
641       	timer_call_unlock(queue_from);
642abort1:
643       	timer_call_unlock(queue_to);
644
645	return timers_migrated;
646}
647