smp.h revision 210601
1/*-
2 * Copyright (c) 2001 Jake Burkholder.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/sparc64/include/smp.h 210601 2010-07-29 12:08:46Z mav $
27 */
28
29#ifndef	_MACHINE_SMP_H_
30#define	_MACHINE_SMP_H_
31
32#ifdef SMP
33
34#define	CPU_TICKSYNC		1
35#define	CPU_STICKSYNC		2
36#define	CPU_INIT		3
37#define	CPU_BOOTSTRAP		4
38
39#ifndef	LOCORE
40
41#include <sys/proc.h>
42#include <sys/sched.h>
43
44#include <machine/intr_machdep.h>
45#include <machine/pcb.h>
46#include <machine/tte.h>
47
48#define	IDR_BUSY			0x0000000000000001ULL
49#define	IDR_NACK			0x0000000000000002ULL
50#define	IDR_CHEETAH_ALL_BUSY		0x5555555555555555ULL
51#define	IDR_CHEETAH_ALL_NACK		(~IDR_CHEETAH_ALL_BUSY)
52#define	IDR_CHEETAH_MAX_BN_PAIRS	32
53#define	IDR_JALAPENO_MAX_BN_PAIRS	4
54
55#define	IDC_ITID_SHIFT			14
56#define	IDC_BN_SHIFT			24
57
58#define	IPI_AST		PIL_AST
59#define	IPI_RENDEZVOUS	PIL_RENDEZVOUS
60#define	IPI_PREEMPT	PIL_PREEMPT
61#define	IPI_HARDCLOCK	PIL_HARDCLOCK
62#define	IPI_STATCLOCK	PIL_STATCLOCK
63#define	IPI_STOP	PIL_STOP
64#define	IPI_STOP_HARD	PIL_STOP
65
66#define	IPI_RETRIES	5000
67
68struct cpu_start_args {
69	u_int	csa_count;
70	u_int	csa_mid;
71	u_int	csa_state;
72	vm_offset_t csa_pcpu;
73	u_long	csa_tick;
74	u_long	csa_stick;
75	u_long	csa_ver;
76	struct	tte csa_ttes[PCPU_PAGES];
77};
78
79struct ipi_cache_args {
80	u_int	ica_mask;
81	vm_paddr_t ica_pa;
82};
83
84struct ipi_tlb_args {
85	u_int	ita_mask;
86	struct	pmap *ita_pmap;
87	u_long	ita_start;
88	u_long	ita_end;
89};
90#define	ita_va	ita_start
91
92struct pcpu;
93
94extern struct pcb stoppcbs[];
95
96void	cpu_mp_bootstrap(struct pcpu *pc);
97void	cpu_mp_shutdown(void);
98
99typedef	void cpu_ipi_selected_t(u_int, u_long, u_long, u_long);
100extern	cpu_ipi_selected_t *cpu_ipi_selected;
101
102void	mp_init(u_int cpu_impl);
103
104extern	struct mtx ipi_mtx;
105extern	struct ipi_cache_args ipi_cache_args;
106extern	struct ipi_tlb_args ipi_tlb_args;
107
108extern	char *mp_tramp_code;
109extern	u_long mp_tramp_code_len;
110extern	u_long mp_tramp_tlb_slots;
111extern	u_long mp_tramp_func;
112
113extern	void mp_startup(void);
114
115extern	char tl_ipi_cheetah_dcache_page_inval[];
116extern	char tl_ipi_spitfire_dcache_page_inval[];
117extern	char tl_ipi_spitfire_icache_page_inval[];
118
119extern	char tl_ipi_level[];
120extern	char tl_ipi_tlb_context_demap[];
121extern	char tl_ipi_tlb_page_demap[];
122extern	char tl_ipi_tlb_range_demap[];
123
124static __inline void
125ipi_all_but_self(u_int ipi)
126{
127
128	cpu_ipi_selected(PCPU_GET(other_cpus), 0, (u_long)tl_ipi_level, ipi);
129}
130
131static __inline void
132ipi_selected(u_int cpus, u_int ipi)
133{
134
135	cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_level, ipi);
136}
137
138#if defined(_MACHINE_PMAP_H_) && defined(_SYS_MUTEX_H_)
139
140static __inline void *
141ipi_dcache_page_inval(void *func, vm_paddr_t pa)
142{
143	struct ipi_cache_args *ica;
144
145	if (smp_cpus == 1)
146		return (NULL);
147	sched_pin();
148	ica = &ipi_cache_args;
149	mtx_lock_spin(&ipi_mtx);
150	ica->ica_mask = all_cpus;
151	ica->ica_pa = pa;
152	cpu_ipi_selected(PCPU_GET(other_cpus), 0, (u_long)func, (u_long)ica);
153	return (&ica->ica_mask);
154}
155
156static __inline void *
157ipi_icache_page_inval(void *func, vm_paddr_t pa)
158{
159	struct ipi_cache_args *ica;
160
161	if (smp_cpus == 1)
162		return (NULL);
163	sched_pin();
164	ica = &ipi_cache_args;
165	mtx_lock_spin(&ipi_mtx);
166	ica->ica_mask = all_cpus;
167	ica->ica_pa = pa;
168	cpu_ipi_selected(PCPU_GET(other_cpus), 0, (u_long)func, (u_long)ica);
169	return (&ica->ica_mask);
170}
171
172static __inline void *
173ipi_tlb_context_demap(struct pmap *pm)
174{
175	struct ipi_tlb_args *ita;
176	u_int cpus;
177
178	if (smp_cpus == 1)
179		return (NULL);
180	sched_pin();
181	if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0) {
182		sched_unpin();
183		return (NULL);
184	}
185	ita = &ipi_tlb_args;
186	mtx_lock_spin(&ipi_mtx);
187	ita->ita_mask = cpus | PCPU_GET(cpumask);
188	ita->ita_pmap = pm;
189	cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_context_demap,
190	    (u_long)ita);
191	return (&ita->ita_mask);
192}
193
194static __inline void *
195ipi_tlb_page_demap(struct pmap *pm, vm_offset_t va)
196{
197	struct ipi_tlb_args *ita;
198	u_int cpus;
199
200	if (smp_cpus == 1)
201		return (NULL);
202	sched_pin();
203	if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0) {
204		sched_unpin();
205		return (NULL);
206	}
207	ita = &ipi_tlb_args;
208	mtx_lock_spin(&ipi_mtx);
209	ita->ita_mask = cpus | PCPU_GET(cpumask);
210	ita->ita_pmap = pm;
211	ita->ita_va = va;
212	cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_page_demap, (u_long)ita);
213	return (&ita->ita_mask);
214}
215
216static __inline void *
217ipi_tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end)
218{
219	struct ipi_tlb_args *ita;
220	u_int cpus;
221
222	if (smp_cpus == 1)
223		return (NULL);
224	sched_pin();
225	if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0) {
226		sched_unpin();
227		return (NULL);
228	}
229	ita = &ipi_tlb_args;
230	mtx_lock_spin(&ipi_mtx);
231	ita->ita_mask = cpus | PCPU_GET(cpumask);
232	ita->ita_pmap = pm;
233	ita->ita_start = start;
234	ita->ita_end = end;
235	cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_range_demap, (u_long)ita);
236	return (&ita->ita_mask);
237}
238
239static __inline void
240ipi_wait(void *cookie)
241{
242	volatile u_int *mask;
243
244	if ((mask = cookie) != NULL) {
245		atomic_clear_int(mask, PCPU_GET(cpumask));
246		while (*mask != 0)
247			;
248		mtx_unlock_spin(&ipi_mtx);
249		sched_unpin();
250	}
251}
252
253#endif /* _MACHINE_PMAP_H_ && _SYS_MUTEX_H_ */
254
255#endif /* !LOCORE */
256
257#else
258
259#ifndef	LOCORE
260
261static __inline void *
262ipi_dcache_page_inval(void *func __unused, vm_paddr_t pa __unused)
263{
264
265	return (NULL);
266}
267
268static __inline void *
269ipi_icache_page_inval(void *func __unused, vm_paddr_t pa __unused)
270{
271
272	return (NULL);
273}
274
275static __inline void *
276ipi_tlb_context_demap(struct pmap *pm __unused)
277{
278
279	return (NULL);
280}
281
282static __inline void *
283ipi_tlb_page_demap(struct pmap *pm __unused, vm_offset_t va __unused)
284{
285
286	return (NULL);
287}
288
289static __inline void *
290ipi_tlb_range_demap(struct pmap *pm __unused, vm_offset_t start __unused,
291    __unused vm_offset_t end)
292{
293
294	return (NULL);
295}
296
297static __inline void
298ipi_wait(void *cookie)
299{
300
301}
302
303static __inline void
304tl_ipi_cheetah_dcache_page_inval(void)
305{
306
307}
308
309static __inline void
310tl_ipi_spitfire_dcache_page_inval(void)
311{
312
313}
314
315static __inline void
316tl_ipi_spitfire_icache_page_inval(void)
317{
318
319}
320
321#endif /* !LOCORE */
322
323#endif /* SMP */
324
325#endif /* !_MACHINE_SMP_H_ */
326