1/*
2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
49 *  School of Computer Science
50 *  Carnegie Mellon University
51 *  Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57/*
58 */
59#ifdef	KERNEL_PRIVATE
60
61#ifndef _I386_MP_H_
62#define _I386_MP_H_
63
64//#define	MP_DEBUG 1
65
66#include <i386/apic.h>
67#include <i386/mp_events.h>
68
69#define MAX_CPUS	32		/* (8*sizeof(long)) */
70
71#ifndef	ASSEMBLER
72#include <stdint.h>
73#include <sys/cdefs.h>
74#include <mach/boolean.h>
75#include <mach/kern_return.h>
76#include <mach/i386/thread_status.h>
77#include <mach/vm_types.h>
78#include <kern/lock.h>
79
80__BEGIN_DECLS
81
82extern kern_return_t intel_startCPU(int slot_num);
83extern kern_return_t intel_startCPU_fast(int slot_num);
84extern void i386_init_slave(void);
85extern void i386_init_slave_fast(void);
86extern void smp_init(void);
87
88extern void cpu_interrupt(int cpu);
89__END_DECLS
90
91extern	unsigned int	real_ncpus;		/* real number of cpus */
92extern	unsigned int	max_ncpus;		/* max number of cpus */
93decl_simple_lock_data(extern,kdb_lock)	/* kdb lock		*/
94
95__BEGIN_DECLS
96
97extern  void	console_init(void);
98extern	void	*console_cpu_alloc(boolean_t boot_cpu);
99extern	void	console_cpu_free(void *console_buf);
100
101extern	int	kdb_cpu;		/* current cpu running kdb	*/
102extern	int	kdb_debug;
103extern	int	kdb_active[];
104
105extern	volatile boolean_t mp_kdp_trap;
106extern 	volatile boolean_t force_immediate_debugger_NMI;
107extern  volatile boolean_t pmap_tlb_flush_timeout;
108extern  volatile usimple_lock_t spinlock_timed_out;
109extern  volatile uint32_t spinlock_owner_cpu;
110
111extern	uint64_t	LastDebuggerEntryAllowance;
112
113extern	void	mp_kdp_enter(void);
114extern	void	mp_kdp_exit(void);
115
116extern	boolean_t	mp_recent_debugger_activity(void);
117
118/*
119 * All cpu rendezvous:
120 */
121extern void mp_rendezvous(
122		void (*setup_func)(void *),
123		void (*action_func)(void *),
124		void (*teardown_func)(void *),
125		void *arg);
126extern void mp_rendezvous_no_intrs(
127		void (*action_func)(void *),
128		void *arg);
129extern void mp_rendezvous_break_lock(void);
130
131/*
132 * All cpu broadcast.
133 * Called from thread context, this blocks until all active cpus have
134 * run action_func:
135 */
136extern void mp_broadcast(
137		void (*action_func)(void *),
138		void *arg);
139#if MACH_KDP
140typedef long (*kdp_x86_xcpu_func_t) (void *arg0, void *arg1, uint16_t lcpu);
141
142extern  long kdp_x86_xcpu_invoke(const uint16_t lcpu,
143                                 kdp_x86_xcpu_func_t func,
144                                 void *arg0, void *arg1);
145typedef enum	{KDP_XCPU_NONE = 0xffff, KDP_CURRENT_LCPU = 0xfffe} kdp_cpu_t;
146#endif
147
148typedef uint32_t cpu_t;
149typedef uint32_t cpumask_t;
150static inline cpumask_t
151cpu_to_cpumask(cpu_t cpu)
152{
153	return (cpu < 32) ? (1 << cpu) : 0;
154}
155#define CPUMASK_ALL	0xffffffff
156#define CPUMASK_SELF	cpu_to_cpumask(cpu_number())
157#define CPUMASK_OTHERS	(CPUMASK_ALL & ~CPUMASK_SELF)
158
159/*
160 * Invoke a function (possibly NULL) on a set of cpus specified by a mask.
161 * The mask may include the local cpu.
162 * If the mode is:
163 *	- ASYNC:  other cpus make their calls in parallel
164 * 	- SYNC:   the calls are performed serially in logical cpu order
165 * 	- NOSYNC: the calls are queued
166 * Unless the mode is NOSYNC, mp_cpus_call() returns when the function has been
167 * called on all specified cpus.
168 * The return value is the number of cpus where the call was made or queued.
169 * The action function is called with interrupts disabled.
170 */
171extern cpu_t mp_cpus_call(
172		cpumask_t	cpus,
173		mp_sync_t	mode,
174		void		(*action_func)(void *),
175		void		*arg);
176extern cpu_t mp_cpus_call1(
177		cpumask_t	cpus,
178		mp_sync_t	mode,
179		void		(*action_func)(void *, void*),
180		void		*arg0,
181		void		*arg1,
182		cpumask_t	*cpus_calledp,
183		cpumask_t	*cpus_notcalledp);
184
185/*
186 * Power-management-specific SPI to:
187 *  - register a callout function, and
188 *  - request the callout (if registered) on a given cpu.
189 */
190extern void PM_interrupt_register(void (*fn)(void));
191extern void cpu_PM_interrupt(int cpu);
192
193__END_DECLS
194
195#if MP_DEBUG
196typedef struct {
197	uint64_t	time;
198	int		cpu;
199	mp_event_t	event;
200} cpu_signal_event_t;
201
202#define	LOG_NENTRIES	100
203typedef struct {
204	uint64_t		count[MP_LAST];
205	int			next_entry;
206	cpu_signal_event_t	entry[LOG_NENTRIES];
207} cpu_signal_event_log_t;
208
209extern cpu_signal_event_log_t	*cpu_signal[];
210extern cpu_signal_event_log_t	*cpu_handle[];
211
212#define DBGLOG(log,_cpu,_event) {					\
213	boolean_t		spl = ml_set_interrupts_enabled(FALSE);	\
214	cpu_signal_event_log_t	*logp = log[cpu_number()];		\
215	int			next = logp->next_entry;		\
216	cpu_signal_event_t	*eventp = &logp->entry[next];		\
217									\
218	logp->count[_event]++;						\
219									\
220	eventp->time = rdtsc64();					\
221	eventp->cpu = _cpu;						\
222	eventp->event = _event;						\
223	if (next == (LOG_NENTRIES - 1))					\
224		logp->next_entry = 0;					\
225	else								\
226		logp->next_entry++;					\
227									\
228	(void) ml_set_interrupts_enabled(spl);				\
229}
230
231#define DBGLOG_CPU_INIT(cpu)	{					\
232	cpu_signal_event_log_t	**sig_logpp = &cpu_signal[cpu];		\
233	cpu_signal_event_log_t	**hdl_logpp = &cpu_handle[cpu];		\
234									\
235	if (*sig_logpp == NULL &&					\
236		kmem_alloc(kernel_map,					\
237			(vm_offset_t *) sig_logpp,			\
238			sizeof(cpu_signal_event_log_t)) != KERN_SUCCESS)\
239		panic("DBGLOG_CPU_INIT cpu_signal allocation failed\n");\
240	bzero(*sig_logpp, sizeof(cpu_signal_event_log_t));		\
241	if (*hdl_logpp == NULL &&					\
242		kmem_alloc(kernel_map,					\
243			(vm_offset_t *) hdl_logpp,			\
244			sizeof(cpu_signal_event_log_t)) != KERN_SUCCESS)\
245		panic("DBGLOG_CPU_INIT cpu_handle allocation failed\n");\
246	bzero(*hdl_logpp, sizeof(cpu_signal_event_log_t));		\
247}
248#else	/* MP_DEBUG */
249#define DBGLOG(log,_cpu,_event)
250#define DBGLOG_CPU_INIT(cpu)
251#endif	/* MP_DEBUG */
252
253#endif	/* ASSEMBLER */
254
255#ifdef ASSEMBLER
256#define i_bit(bit, word)	((long)(*(word)) & (1L << (bit)))
257#else
258__attribute__((always_inline)) static inline long
259i_bit_impl(long word, long bit) {
260	long bitmask = 1L << bit;
261	return word & bitmask;
262}
263#define i_bit(bit, word)	i_bit_impl((long)(*(word)), bit)
264#endif
265
266#if	MACH_RT
267
268#if defined(__i386__)
269
270#define _DISABLE_PREEMPTION 					\
271	incl	%gs:CPU_PREEMPTION_LEVEL
272
273#define _ENABLE_PREEMPTION 					\
274	decl	%gs:CPU_PREEMPTION_LEVEL		;	\
275	jne	9f					;	\
276	pushl	%eax					;	\
277	pushl	%ecx					;	\
278	pushl	%edx					;	\
279	call	EXT(kernel_preempt_check)		;	\
280	popl	%edx					;	\
281	popl	%ecx					;	\
282	popl	%eax					;	\
2839:
284
285#define _ENABLE_PREEMPTION_NO_CHECK				\
286	decl	%gs:CPU_PREEMPTION_LEVEL
287
288#elif defined(__x86_64__)
289
290#define _DISABLE_PREEMPTION 					\
291	incl	%gs:CPU_PREEMPTION_LEVEL
292
293#define _ENABLE_PREEMPTION 					\
294	decl	%gs:CPU_PREEMPTION_LEVEL		;	\
295	jne	9f					;	\
296	call	EXT(kernel_preempt_check)		;	\
2979:
298
299#define _ENABLE_PREEMPTION_NO_CHECK				\
300	decl	%gs:CPU_PREEMPTION_LEVEL
301
302#else
303#error Unsupported architecture
304#endif
305
306/* x86_64 just calls through to the other macro directly */
307#if	MACH_ASSERT && defined(__i386__)
308#define DISABLE_PREEMPTION					\
309	pushl	%eax;						\
310	pushl	%ecx;						\
311	pushl	%edx;						\
312	call	EXT(_disable_preemption);			\
313	popl	%edx;						\
314	popl	%ecx;						\
315	popl	%eax
316#define ENABLE_PREEMPTION					\
317	pushl	%eax;						\
318	pushl	%ecx;						\
319	pushl	%edx;						\
320	call	EXT(_enable_preemption);			\
321	popl	%edx;						\
322	popl	%ecx;						\
323	popl	%eax
324#define ENABLE_PREEMPTION_NO_CHECK				\
325	pushl	%eax;						\
326	pushl	%ecx;						\
327	pushl	%edx;						\
328	call	EXT(_enable_preemption_no_check);		\
329	popl	%edx;						\
330	popl	%ecx;						\
331	popl	%eax
332#define MP_DISABLE_PREEMPTION					\
333	pushl	%eax;						\
334	pushl	%ecx;						\
335	pushl	%edx;						\
336	call	EXT(_mp_disable_preemption);			\
337	popl	%edx;						\
338	popl	%ecx;						\
339	popl	%eax
340#define MP_ENABLE_PREEMPTION					\
341	pushl	%eax;						\
342	pushl	%ecx;						\
343	pushl	%edx;						\
344	call	EXT(_mp_enable_preemption);			\
345	popl	%edx;						\
346	popl	%ecx;						\
347	popl	%eax
348#define MP_ENABLE_PREEMPTION_NO_CHECK				\
349	pushl	%eax;						\
350	pushl	%ecx;						\
351	pushl	%edx;						\
352	call	EXT(_mp_enable_preemption_no_check);		\
353	popl	%edx;						\
354	popl	%ecx;						\
355	popl	%eax
356#else	/* MACH_ASSERT */
357#define DISABLE_PREEMPTION		_DISABLE_PREEMPTION
358#define ENABLE_PREEMPTION		_ENABLE_PREEMPTION
359#define ENABLE_PREEMPTION_NO_CHECK	_ENABLE_PREEMPTION_NO_CHECK
360#define MP_DISABLE_PREEMPTION		_DISABLE_PREEMPTION
361#define MP_ENABLE_PREEMPTION		_ENABLE_PREEMPTION
362#define MP_ENABLE_PREEMPTION_NO_CHECK 	_ENABLE_PREEMPTION_NO_CHECK
363#endif	/* MACH_ASSERT */
364
365#else	/* MACH_RT */
366#define DISABLE_PREEMPTION
367#define ENABLE_PREEMPTION
368#define ENABLE_PREEMPTION_NO_CHECK
369#define MP_DISABLE_PREEMPTION
370#define MP_ENABLE_PREEMPTION
371#define MP_ENABLE_PREEMPTION_NO_CHECK
372#endif	/* MACH_RT */
373
374#endif /* _I386_MP_H_ */
375
376#endif /* KERNEL_PRIVATE */
377