1/*
2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
49 *  School of Computer Science
50 *  Carnegie Mellon University
51 *  Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58
59/*
60 *
61 *	This file contains routines to check whether an ast is needed.
62 *
63 *	ast_check() - check whether ast is needed for interrupt or context
64 *	switch.  Usually called by clock interrupt handler.
65 *
66 */
67
68#include <kern/ast.h>
69#include <kern/counters.h>
70#include <kern/cpu_number.h>
71#include <kern/misc_protos.h>
72#include <kern/queue.h>
73#include <kern/sched_prim.h>
74#include <kern/thread.h>
75#include <kern/processor.h>
76#include <kern/spl.h>
77#include <kern/sfi.h>
78#if CONFIG_TELEMETRY
79#include <kern/telemetry.h>
80#endif
81#include <kern/wait_queue.h>
82#include <kern/ledger.h>
83#include <mach/policy.h>
84#include <machine/trap.h> // for CHUD AST hook
85#include <machine/pal_routines.h>
86#include <security/mac_mach_internal.h> // for MACF AST hook
87
88volatile perfASTCallback perfASTHook;
89
90
91void
92ast_init(void)
93{
94}
95
96extern void chudxnu_thread_ast(thread_t); // XXX this should probably be in a header...
97
98/*
99 * Called at splsched.
100 */
101void
102ast_taken(
103	ast_t		reasons,
104	boolean_t	enable
105)
106{
107	boolean_t		preempt_trap = (reasons == AST_PREEMPTION);
108	ast_t			*myast = ast_pending();
109	thread_t		thread = current_thread();
110	perfASTCallback	perf_hook = perfASTHook;
111
112	/*
113	 * CHUD hook - all threads including idle processor threads
114	 */
115	if (perf_hook) {
116		if (*myast & AST_CHUD_ALL) {
117			(*perf_hook)(reasons, myast);
118
119			if (*myast == AST_NONE)
120				return;
121		}
122	}
123	else
124		*myast &= ~AST_CHUD_ALL;
125
126	reasons &= *myast;
127	*myast &= ~reasons;
128
129	/*
130	 * Handle ASTs for all threads
131	 * except idle processor threads.
132	 */
133	if (!(thread->state & TH_IDLE)) {
134		/*
135		 * Check for urgent preemption.
136		 */
137		if (	(reasons & AST_URGENT)				&&
138				wait_queue_assert_possible(thread)		) {
139			if (reasons & AST_PREEMPT) {
140				counter(c_ast_taken_block++);
141				thread_block_reason(THREAD_CONTINUE_NULL, NULL,
142										reasons & AST_PREEMPTION);
143			}
144
145			reasons &= ~AST_PREEMPTION;
146		}
147
148		/*
149		 * The kernel preempt traps
150		 * skip all other ASTs.
151		 */
152		if (!preempt_trap) {
153			ml_set_interrupts_enabled(enable);
154
155#ifdef	MACH_BSD
156			/*
157			 * Handle BSD hook.
158			 */
159			if (reasons & AST_BSD) {
160				thread_ast_clear(thread, AST_BSD);
161				bsd_ast(thread);
162			}
163#endif
164#if CONFIG_MACF
165			/*
166			 * Handle MACF hook.
167			 */
168			if (reasons & AST_MACF) {
169				thread_ast_clear(thread, AST_MACF);
170				mac_thread_userret(thread);
171			}
172#endif
173			/*
174			 * Thread APC hook.
175			 */
176			if (reasons & AST_APC)
177				act_execute_returnhandlers();
178
179			if (reasons & AST_GUARD) {
180				thread_ast_clear(thread, AST_GUARD);
181				guard_ast(thread);
182			}
183
184			if (reasons & AST_LEDGER) {
185				thread_ast_clear(thread, AST_LEDGER);
186				ledger_ast(thread);
187			}
188
189			/*
190			 * Kernel Profiling Hook
191			 */
192			if (reasons & AST_KPERF) {
193				thread_ast_clear(thread, AST_KPERF);
194				chudxnu_thread_ast(thread);
195			}
196
197#if CONFIG_TELEMETRY
198			if (reasons & AST_TELEMETRY_ALL) {
199				boolean_t interrupted_userspace = FALSE;
200				boolean_t is_windowed = FALSE;
201
202				assert((reasons & AST_TELEMETRY_ALL) != AST_TELEMETRY_ALL); /* only one is valid at a time */
203				interrupted_userspace = (reasons & AST_TELEMETRY_USER) ? TRUE : FALSE;
204				is_windowed = ((reasons & AST_TELEMETRY_WINDOWED) ? TRUE : FALSE);
205				thread_ast_clear(thread, AST_TELEMETRY_ALL);
206				telemetry_ast(thread, interrupted_userspace, is_windowed);
207			}
208#endif
209
210			ml_set_interrupts_enabled(FALSE);
211
212			if (reasons & AST_SFI) {
213				sfi_ast(thread);
214			}
215
216			/*
217			 * Check for preemption. Conditions may have changed from when the AST_PREEMPT was originally set.
218			 */
219			thread_lock(thread);
220			if (reasons & AST_PREEMPT)
221				reasons = csw_check(current_processor(), reasons & AST_QUANTUM);
222			thread_unlock(thread);
223
224			if (	(reasons & AST_PREEMPT)				&&
225					wait_queue_assert_possible(thread)		) {
226				counter(c_ast_taken_block++);
227				thread_block_reason((thread_continue_t)thread_exception_return, NULL, reasons & AST_PREEMPTION);
228			}
229		}
230	}
231
232	ml_set_interrupts_enabled(enable);
233}
234
235/*
236 * Called at splsched.
237 */
238void
239ast_check(
240	processor_t		processor)
241{
242	thread_t			thread = processor->active_thread;
243
244	if (	processor->state == PROCESSOR_RUNNING		||
245			processor->state == PROCESSOR_SHUTDOWN		) {
246		ast_t			preempt;
247
248		/*
249		 *	Propagate thread ast to processor.
250		 */
251		pal_ast_check(thread);
252
253		ast_propagate(thread->ast);
254
255		/*
256		 *	Context switch check.
257		 */
258		thread_lock(thread);
259
260		processor->current_pri = thread->sched_pri;
261		processor->current_thmode = thread->sched_mode;
262		processor->current_sfi_class = thread->sfi_class = sfi_thread_classify(thread);
263
264		if ((preempt = csw_check(processor, AST_NONE)) != AST_NONE)
265			ast_on(preempt);
266		thread_unlock(thread);
267	}
268}
269