1/*
2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
49 *  School of Computer Science
50 *  Carnegie Mellon University
51 *  Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57/*
58 */
59#ifdef	KERNEL_PRIVATE
60
61#ifndef _I386_MP_H_
62#define _I386_MP_H_
63
64//#define	MP_DEBUG 1
65
66#include <i386/apic.h>
67#include <i386/mp_events.h>
68
69#define MAX_CPUS	32		/* (8*sizeof(long)) */
70
71#ifndef	ASSEMBLER
72#include <stdint.h>
73#include <sys/cdefs.h>
74#include <mach/boolean.h>
75#include <mach/kern_return.h>
76#include <mach/i386/thread_status.h>
77#include <mach/vm_types.h>
78#include <kern/simple_lock.h>
79
80__BEGIN_DECLS
81
82extern kern_return_t intel_startCPU(int slot_num);
83extern kern_return_t intel_startCPU_fast(int slot_num);
84extern void i386_init_slave(void);
85extern void i386_init_slave_fast(void);
86extern void smp_init(void);
87
88extern void cpu_interrupt(int cpu);
89__END_DECLS
90
91extern	unsigned int	real_ncpus;		/* real number of cpus */
92extern	unsigned int	max_ncpus;		/* max number of cpus */
93decl_simple_lock_data(extern,kdb_lock)	/* kdb lock		*/
94
95__BEGIN_DECLS
96
97extern  void	console_init(void);
98extern	void	*console_cpu_alloc(boolean_t boot_cpu);
99extern	void	console_cpu_free(void *console_buf);
100
101extern	int	kdb_cpu;		/* current cpu running kdb	*/
102extern	int	kdb_debug;
103extern	int	kdb_active[];
104
105extern	volatile boolean_t mp_kdp_trap;
106extern 	volatile boolean_t force_immediate_debugger_NMI;
107extern  volatile boolean_t pmap_tlb_flush_timeout;
108extern  volatile usimple_lock_t spinlock_timed_out;
109extern  volatile uint32_t spinlock_owner_cpu;
110extern  uint32_t spinlock_timeout_NMI(uintptr_t thread_addr);
111
112extern	uint64_t	LastDebuggerEntryAllowance;
113
114extern	void	mp_kdp_enter(void);
115extern	void	mp_kdp_exit(void);
116
117extern	boolean_t	mp_recent_debugger_activity(void);
118
119/*
120 * All cpu rendezvous:
121 */
122extern void mp_rendezvous(
123		void (*setup_func)(void *),
124		void (*action_func)(void *),
125		void (*teardown_func)(void *),
126		void *arg);
127extern void mp_rendezvous_no_intrs(
128		void (*action_func)(void *),
129		void *arg);
130extern void mp_rendezvous_break_lock(void);
131
132/*
133 * All cpu broadcast.
134 * Called from thread context, this blocks until all active cpus have
135 * run action_func:
136 */
137extern void mp_broadcast(
138		void (*action_func)(void *),
139		void *arg);
140#if MACH_KDP
141typedef long (*kdp_x86_xcpu_func_t) (void *arg0, void *arg1, uint16_t lcpu);
142
143extern  long kdp_x86_xcpu_invoke(const uint16_t lcpu,
144                                 kdp_x86_xcpu_func_t func,
145                                 void *arg0, void *arg1);
146typedef enum	{KDP_XCPU_NONE = 0xffff, KDP_CURRENT_LCPU = 0xfffe} kdp_cpu_t;
147#endif
148
149typedef uint32_t cpu_t;
150typedef volatile long cpumask_t;
151static inline cpumask_t
152cpu_to_cpumask(cpu_t cpu)
153{
154	return (cpu < 32) ? (1 << cpu) : 0;
155}
156#define CPUMASK_ALL	0xffffffff
157#define CPUMASK_SELF	cpu_to_cpumask(cpu_number())
158#define CPUMASK_OTHERS	(CPUMASK_ALL & ~CPUMASK_SELF)
159
160/* Initialation routing called at processor registration */
161extern void mp_cpus_call_cpu_init(int cpu);
162
163/*
164 * Invoke a function (possibly NULL) on a set of cpus specified by a mask.
165 * The mask may include the local cpu.
166 * If the mode is:
167 *	- ASYNC:  other cpus make their calls in parallel
168 * 	- SYNC:   the calls are performed serially in logical cpu order
169 * 	- NOSYNC: the calls are queued
170 * Unless the mode is NOSYNC, mp_cpus_call() returns when the function has been
171 * called on all specified cpus.
172 * The return value is the number of cpus where the call was made or queued.
173 * The action function is called with interrupts disabled.
174 */
175extern cpu_t mp_cpus_call(
176		cpumask_t	cpus,
177		mp_sync_t	mode,
178		void		(*action_func)(void *),
179		void		*arg);
180extern cpu_t mp_cpus_call1(
181		cpumask_t	cpus,
182		mp_sync_t	mode,
183		void		(*action_func)(void *, void*),
184		void		*arg0,
185		void		*arg1,
186		cpumask_t	*cpus_calledp,
187		cpumask_t	*cpus_notcalledp);
188
189extern void mp_cpus_NMIPI(cpumask_t cpus);
190
191/* Interrupt a set of cpus, forcing an exit out of non-root mode */
192extern void mp_cpus_kick(cpumask_t cpus);
193/*
194 * Power-management-specific SPI to:
195 *  - register a callout function, and
196 *  - request the callout (if registered) on a given cpu.
197 */
198extern void PM_interrupt_register(void (*fn)(void));
199extern void cpu_PM_interrupt(int cpu);
200
201__END_DECLS
202
203#if MP_DEBUG
204typedef struct {
205	uint64_t	time;
206	int		cpu;
207	mp_event_t	event;
208} cpu_signal_event_t;
209
210#define	LOG_NENTRIES	100
211typedef struct {
212	uint64_t		count[MP_LAST];
213	int			next_entry;
214	cpu_signal_event_t	entry[LOG_NENTRIES];
215} cpu_signal_event_log_t;
216
217extern cpu_signal_event_log_t	*cpu_signal[];
218extern cpu_signal_event_log_t	*cpu_handle[];
219
220#define DBGLOG(log,_cpu,_event) {					\
221	boolean_t		spl = ml_set_interrupts_enabled(FALSE);	\
222	cpu_signal_event_log_t	*logp = log[cpu_number()];		\
223	int			next = logp->next_entry;		\
224	cpu_signal_event_t	*eventp = &logp->entry[next];		\
225									\
226	logp->count[_event]++;						\
227									\
228	eventp->time = rdtsc64();					\
229	eventp->cpu = _cpu;						\
230	eventp->event = _event;						\
231	if (next == (LOG_NENTRIES - 1))					\
232		logp->next_entry = 0;					\
233	else								\
234		logp->next_entry++;					\
235									\
236	(void) ml_set_interrupts_enabled(spl);				\
237}
238
239#define DBGLOG_CPU_INIT(cpu)	{					\
240	cpu_signal_event_log_t	**sig_logpp = &cpu_signal[cpu];		\
241	cpu_signal_event_log_t	**hdl_logpp = &cpu_handle[cpu];		\
242									\
243	if (*sig_logpp == NULL &&					\
244		kmem_alloc(kernel_map,					\
245			(vm_offset_t *) sig_logpp,			\
246			sizeof(cpu_signal_event_log_t)) != KERN_SUCCESS)\
247		panic("DBGLOG_CPU_INIT cpu_signal allocation failed\n");\
248	bzero(*sig_logpp, sizeof(cpu_signal_event_log_t));		\
249	if (*hdl_logpp == NULL &&					\
250		kmem_alloc(kernel_map,					\
251			(vm_offset_t *) hdl_logpp,			\
252			sizeof(cpu_signal_event_log_t)) != KERN_SUCCESS)\
253		panic("DBGLOG_CPU_INIT cpu_handle allocation failed\n");\
254	bzero(*hdl_logpp, sizeof(cpu_signal_event_log_t));		\
255}
256#else	/* MP_DEBUG */
257#define DBGLOG(log,_cpu,_event)
258#define DBGLOG_CPU_INIT(cpu)
259#endif	/* MP_DEBUG */
260
261#endif	/* ASSEMBLER */
262
263#ifdef ASSEMBLER
264#define i_bit(bit, word)	((long)(*(word)) & (1L << (bit)))
265#else
266__attribute__((always_inline)) static inline long
267i_bit_impl(long word, long bit) {
268	long bitmask = 1L << bit;
269	return word & bitmask;
270}
271#define i_bit(bit, word)	i_bit_impl((long)(*(word)), bit)
272#endif
273
274#if	MACH_RT
275
276#if   defined(__x86_64__)
277
278#define _DISABLE_PREEMPTION 					\
279	incl	%gs:CPU_PREEMPTION_LEVEL
280
281#define _ENABLE_PREEMPTION 					\
282	decl	%gs:CPU_PREEMPTION_LEVEL		;	\
283	jne	9f					;	\
284	call	EXT(kernel_preempt_check)		;	\
2859:
286
287#define _ENABLE_PREEMPTION_NO_CHECK				\
288	decl	%gs:CPU_PREEMPTION_LEVEL
289
290#else
291#error Unsupported architecture
292#endif
293
294/* x86_64 just calls through to the other macro directly */
295#define DISABLE_PREEMPTION		_DISABLE_PREEMPTION
296#define ENABLE_PREEMPTION		_ENABLE_PREEMPTION
297#define ENABLE_PREEMPTION_NO_CHECK	_ENABLE_PREEMPTION_NO_CHECK
298#define MP_DISABLE_PREEMPTION		_DISABLE_PREEMPTION
299#define MP_ENABLE_PREEMPTION		_ENABLE_PREEMPTION
300#define MP_ENABLE_PREEMPTION_NO_CHECK 	_ENABLE_PREEMPTION_NO_CHECK
301
302#else	/* MACH_RT */
303#define DISABLE_PREEMPTION
304#define ENABLE_PREEMPTION
305#define ENABLE_PREEMPTION_NO_CHECK
306#define MP_DISABLE_PREEMPTION
307#define MP_ENABLE_PREEMPTION
308#define MP_ENABLE_PREEMPTION_NO_CHECK
309#endif	/* MACH_RT */
310
311#endif /* _I386_MP_H_ */
312
313#endif /* KERNEL_PRIVATE */
314