1/*
2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * @APPLE_FREE_COPYRIGHT@
33 */
34/*
35 *	File:		etimer.c
36 *	Purpose:	Routines for handling the machine independent
37 *				event timer.
38 */
39
40#include <mach/mach_types.h>
41
42#include <kern/timer_queue.h>
43#include <kern/clock.h>
44#include <kern/thread.h>
45#include <kern/processor.h>
46#include <kern/macro_help.h>
47#include <kern/spl.h>
48#include <kern/etimer.h>
49#include <kern/pms.h>
50
51#include <machine/commpage.h>
52#include <machine/machine_routines.h>
53
54#include <sys/kdebug.h>
55#include <i386/cpu_data.h>
56#include <i386/cpu_topology.h>
57#include <i386/cpu_threads.h>
58
59/*
60 * 	Event timer interrupt.
61 *
62 * XXX a drawback of this implementation is that events serviced earlier must not set deadlines
63 *     that occur before the entire chain completes.
64 *
65 * XXX a better implementation would use a set of generic callouts and iterate over them
66 */
67void
68etimer_intr(int		user_mode,
69	    uint64_t	rip)
70{
71	uint64_t		abstime;
72	rtclock_timer_t		*mytimer;
73	cpu_data_t		*pp;
74	int32_t			latency;
75	uint64_t		pmdeadline;
76
77	pp = current_cpu_datap();
78
79	SCHED_STATS_TIMER_POP(current_processor());
80
81	abstime = mach_absolute_time();		/* Get the time now */
82
83	/* has a pending clock timer expired? */
84	mytimer = &pp->rtclock_timer;		/* Point to the event timer */
85	if (mytimer->deadline <= abstime) {
86		/*
87		 * Log interrupt service latency (-ve value expected by tool)
88		 * a non-PM event is expected next.
89		 * The requested deadline may be earlier than when it was set
90		 * - use MAX to avoid reporting bogus latencies.
91		 */
92		latency = (int32_t) (abstime - MAX(mytimer->deadline,
93						   mytimer->when_set));
94		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
95			DECR_TRAP_LATENCY | DBG_FUNC_NONE,
96			-latency,
97			((user_mode != 0) ? rip : VM_KERNEL_UNSLIDE(rip)),
98			user_mode, 0, 0);
99
100		mytimer->has_expired = TRUE;	/* Remember that we popped */
101		mytimer->deadline = timer_queue_expire(&mytimer->queue, abstime);
102		mytimer->has_expired = FALSE;
103
104		/* Get the time again since we ran a bit */
105		abstime = mach_absolute_time();
106		mytimer->when_set = abstime;
107	}
108
109	/* is it time for power management state change? */
110	if ((pmdeadline = pmCPUGetDeadline(pp)) && (pmdeadline <= abstime)) {
111		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
112			DECR_PM_DEADLINE | DBG_FUNC_START,
113			0, 0, 0, 0, 0);
114		pmCPUDeadline(pp);
115		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
116			DECR_PM_DEADLINE | DBG_FUNC_END,
117			0, 0, 0, 0, 0);
118	}
119
120	/* schedule our next deadline */
121	etimer_resync_deadlines();
122}
123
124/*
125 * Set the clock deadline.
126 */
127void etimer_set_deadline(uint64_t deadline)
128{
129	rtclock_timer_t		*mytimer;
130	spl_t			s;
131	cpu_data_t		*pp;
132
133	s = splclock();				/* no interruptions */
134	pp = current_cpu_datap();
135
136	mytimer = &pp->rtclock_timer;		/* Point to the timer itself */
137	mytimer->deadline = deadline;		/* Set new expiration time */
138	mytimer->when_set = mach_absolute_time();
139
140	etimer_resync_deadlines();
141
142	splx(s);
143}
144
145/*
146 * Re-evaluate the outstanding deadlines and select the most proximate.
147 *
148 * Should be called at splclock.
149 */
150void
151etimer_resync_deadlines(void)
152{
153	uint64_t		deadline = EndOfAllTime;
154	uint64_t		pmdeadline;
155	rtclock_timer_t		*mytimer;
156	spl_t			s = splclock();
157	cpu_data_t		*pp;
158	uint32_t		decr;
159
160	pp = current_cpu_datap();
161	if (!pp->cpu_running)
162		/* There's really nothing to do if this procesor is down */
163		return;
164
165	/*
166	 * If we have a clock timer set, pick that.
167	 */
168	mytimer = &pp->rtclock_timer;
169	if (!mytimer->has_expired &&
170	    0 < mytimer->deadline && mytimer->deadline < EndOfAllTime)
171		deadline = mytimer->deadline;
172
173	/*
174	 * If we have a power management deadline, see if that's earlier.
175	 */
176	pmdeadline = pmCPUGetDeadline(pp);
177	if (0 < pmdeadline && pmdeadline < deadline)
178		deadline = pmdeadline;
179
180	/*
181	 * Go and set the "pop" event.
182	 */
183	decr = (uint32_t) setPop(deadline);
184
185	/* Record non-PM deadline for latency tool */
186	if (deadline != pmdeadline) {
187		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
188			DECR_SET_DEADLINE | DBG_FUNC_NONE,
189			decr, 2,
190			deadline, (uint32_t)(deadline >> 32), 0);
191	}
192	splx(s);
193}
194
195void etimer_timer_expire(void	*arg);
196
197void
198etimer_timer_expire(
199__unused void			*arg)
200{
201	rtclock_timer_t		*mytimer;
202	uint64_t			abstime;
203	cpu_data_t			*pp;
204
205	pp = current_cpu_datap();
206
207	mytimer = &pp->rtclock_timer;
208	abstime = mach_absolute_time();
209
210	mytimer->has_expired = TRUE;
211	mytimer->deadline = timer_queue_expire(&mytimer->queue, abstime);
212	mytimer->has_expired = FALSE;
213	mytimer->when_set = mach_absolute_time();
214
215	etimer_resync_deadlines();
216}
217
218uint64_t
219timer_call_slop(
220	uint64_t	deadline)
221{
222	uint64_t now = mach_absolute_time();
223	if (deadline > now) {
224		return MIN((deadline - now) >> 3, NSEC_PER_MSEC); /* Min of 12.5% and 1ms */
225	}
226
227	return 0;
228}
229
230mpqueue_head_t *
231timer_queue_assign(
232    uint64_t        deadline)
233{
234	cpu_data_t			*cdp = current_cpu_datap();
235	mpqueue_head_t		*queue;
236
237	if (cdp->cpu_running) {
238		queue = &cdp->rtclock_timer.queue;
239
240		if (deadline < cdp->rtclock_timer.deadline)
241			etimer_set_deadline(deadline);
242	}
243	else
244		queue = &cpu_datap(master_cpu)->rtclock_timer.queue;
245
246    return (queue);
247}
248
249void
250timer_queue_cancel(
251    mpqueue_head_t  *queue,
252    uint64_t        deadline,
253    uint64_t        new_deadline)
254{
255    if (queue == &current_cpu_datap()->rtclock_timer.queue) {
256        if (deadline < new_deadline)
257            etimer_set_deadline(new_deadline);
258    }
259}
260
261/*
262 * etimer_queue_migrate() is called from the Power-Management kext
263 * when a logical processor goes idle (in a deep C-state) with a distant
264 * deadline so that it's timer queue can be moved to another processor.
265 * This target processor should be the least idle (most busy) --
266 * currently this is the primary processor for the calling thread's package.
267 * Locking restrictions demand that the target cpu must be the boot cpu.
268 */
269uint32_t
270etimer_queue_migrate(int target_cpu)
271{
272	cpu_data_t	*target_cdp = cpu_datap(target_cpu);
273	cpu_data_t	*cdp = current_cpu_datap();
274	int		ntimers_moved;
275
276	assert(!ml_get_interrupts_enabled());
277	assert(target_cpu != cdp->cpu_number);
278	assert(target_cpu == master_cpu);
279
280	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
281		DECR_TIMER_MIGRATE | DBG_FUNC_START,
282		target_cpu,
283		cdp->rtclock_timer.deadline, (cdp->rtclock_timer.deadline >>32),
284		0, 0);
285
286	/*
287	 * Move timer requests from the local queue to the target processor's.
288	 * The return value is the number of requests moved. If this is 0,
289	 * it indicates that the first (i.e. earliest) timer is earlier than
290	 * the earliest for the target processor. Since this would force a
291	 * resync, the move of this and all later requests is aborted.
292	 */
293	ntimers_moved = timer_queue_migrate(&cdp->rtclock_timer.queue,
294					    &target_cdp->rtclock_timer.queue);
295
296	/*
297	 * Assuming we moved stuff, clear local deadline.
298	 */
299	if (ntimers_moved > 0) {
300		cdp->rtclock_timer.deadline = EndOfAllTime;
301		setPop(EndOfAllTime);
302	}
303
304	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
305		DECR_TIMER_MIGRATE | DBG_FUNC_END,
306		target_cpu, ntimers_moved, 0, 0, 0);
307
308	return ntimers_moved;
309}
310