smp.h revision 209695
1/*-
2 * Copyright (c) 2001 Jake Burkholder.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/sparc64/include/smp.h 209695 2010-07-04 12:43:12Z marius $
27 */
28
29#ifndef	_MACHINE_SMP_H_
30#define	_MACHINE_SMP_H_
31
32#ifdef SMP
33
34#define	CPU_TICKSYNC		1
35#define	CPU_STICKSYNC		2
36#define	CPU_INIT		3
37#define	CPU_BOOTSTRAP		4
38
39#ifndef	LOCORE
40
41#include <sys/proc.h>
42#include <sys/sched.h>
43
44#include <machine/intr_machdep.h>
45#include <machine/pcb.h>
46#include <machine/tte.h>
47
48#define	IDR_BUSY			0x0000000000000001ULL
49#define	IDR_NACK			0x0000000000000002ULL
50#define	IDR_CHEETAH_ALL_BUSY		0x5555555555555555ULL
51#define	IDR_CHEETAH_ALL_NACK		(~IDR_CHEETAH_ALL_BUSY)
52#define	IDR_CHEETAH_MAX_BN_PAIRS	32
53#define	IDR_JALAPENO_MAX_BN_PAIRS	4
54
55#define	IDC_ITID_SHIFT			14
56#define	IDC_BN_SHIFT			24
57
58#define	IPI_AST		PIL_AST
59#define	IPI_RENDEZVOUS	PIL_RENDEZVOUS
60#define	IPI_PREEMPT	PIL_PREEMPT
61#define	IPI_STOP	PIL_STOP
62#define	IPI_STOP_HARD	PIL_STOP
63
64#define	IPI_RETRIES	5000
65
66struct cpu_start_args {
67	u_int	csa_count;
68	u_int	csa_mid;
69	u_int	csa_state;
70	vm_offset_t csa_pcpu;
71	u_long	csa_tick;
72	u_long	csa_stick;
73	u_long	csa_ver;
74	struct	tte csa_ttes[PCPU_PAGES];
75};
76
77struct ipi_cache_args {
78	u_int	ica_mask;
79	vm_paddr_t ica_pa;
80};
81
82struct ipi_tlb_args {
83	u_int	ita_mask;
84	struct	pmap *ita_pmap;
85	u_long	ita_start;
86	u_long	ita_end;
87};
88#define	ita_va	ita_start
89
90struct pcpu;
91
92extern struct pcb stoppcbs[];
93
94void	cpu_mp_bootstrap(struct pcpu *pc);
95void	cpu_mp_shutdown(void);
96
97typedef	void cpu_ipi_selected_t(u_int, u_long, u_long, u_long);
98extern	cpu_ipi_selected_t *cpu_ipi_selected;
99
100void	mp_init(u_int cpu_impl);
101
102extern	struct mtx ipi_mtx;
103extern	struct ipi_cache_args ipi_cache_args;
104extern	struct ipi_tlb_args ipi_tlb_args;
105
106extern	char *mp_tramp_code;
107extern	u_long mp_tramp_code_len;
108extern	u_long mp_tramp_tlb_slots;
109extern	u_long mp_tramp_func;
110
111extern	void mp_startup(void);
112
113extern	char tl_ipi_cheetah_dcache_page_inval[];
114extern	char tl_ipi_spitfire_dcache_page_inval[];
115extern	char tl_ipi_spitfire_icache_page_inval[];
116
117extern	char tl_ipi_level[];
118extern	char tl_ipi_tlb_context_demap[];
119extern	char tl_ipi_tlb_page_demap[];
120extern	char tl_ipi_tlb_range_demap[];
121
122static __inline void
123ipi_all_but_self(u_int ipi)
124{
125
126	cpu_ipi_selected(PCPU_GET(other_cpus), 0, (u_long)tl_ipi_level, ipi);
127}
128
129static __inline void
130ipi_selected(u_int cpus, u_int ipi)
131{
132
133	cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_level, ipi);
134}
135
136#if defined(_MACHINE_PMAP_H_) && defined(_SYS_MUTEX_H_)
137
138static __inline void *
139ipi_dcache_page_inval(void *func, vm_paddr_t pa)
140{
141	struct ipi_cache_args *ica;
142
143	if (smp_cpus == 1)
144		return (NULL);
145	sched_pin();
146	ica = &ipi_cache_args;
147	mtx_lock_spin(&ipi_mtx);
148	ica->ica_mask = all_cpus;
149	ica->ica_pa = pa;
150	cpu_ipi_selected(PCPU_GET(other_cpus), 0, (u_long)func, (u_long)ica);
151	return (&ica->ica_mask);
152}
153
154static __inline void *
155ipi_icache_page_inval(void *func, vm_paddr_t pa)
156{
157	struct ipi_cache_args *ica;
158
159	if (smp_cpus == 1)
160		return (NULL);
161	sched_pin();
162	ica = &ipi_cache_args;
163	mtx_lock_spin(&ipi_mtx);
164	ica->ica_mask = all_cpus;
165	ica->ica_pa = pa;
166	cpu_ipi_selected(PCPU_GET(other_cpus), 0, (u_long)func, (u_long)ica);
167	return (&ica->ica_mask);
168}
169
170static __inline void *
171ipi_tlb_context_demap(struct pmap *pm)
172{
173	struct ipi_tlb_args *ita;
174	u_int cpus;
175
176	if (smp_cpus == 1)
177		return (NULL);
178	sched_pin();
179	if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0) {
180		sched_unpin();
181		return (NULL);
182	}
183	ita = &ipi_tlb_args;
184	mtx_lock_spin(&ipi_mtx);
185	ita->ita_mask = cpus | PCPU_GET(cpumask);
186	ita->ita_pmap = pm;
187	cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_context_demap,
188	    (u_long)ita);
189	return (&ita->ita_mask);
190}
191
192static __inline void *
193ipi_tlb_page_demap(struct pmap *pm, vm_offset_t va)
194{
195	struct ipi_tlb_args *ita;
196	u_int cpus;
197
198	if (smp_cpus == 1)
199		return (NULL);
200	sched_pin();
201	if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0) {
202		sched_unpin();
203		return (NULL);
204	}
205	ita = &ipi_tlb_args;
206	mtx_lock_spin(&ipi_mtx);
207	ita->ita_mask = cpus | PCPU_GET(cpumask);
208	ita->ita_pmap = pm;
209	ita->ita_va = va;
210	cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_page_demap, (u_long)ita);
211	return (&ita->ita_mask);
212}
213
214static __inline void *
215ipi_tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end)
216{
217	struct ipi_tlb_args *ita;
218	u_int cpus;
219
220	if (smp_cpus == 1)
221		return (NULL);
222	sched_pin();
223	if ((cpus = (pm->pm_active & PCPU_GET(other_cpus))) == 0) {
224		sched_unpin();
225		return (NULL);
226	}
227	ita = &ipi_tlb_args;
228	mtx_lock_spin(&ipi_mtx);
229	ita->ita_mask = cpus | PCPU_GET(cpumask);
230	ita->ita_pmap = pm;
231	ita->ita_start = start;
232	ita->ita_end = end;
233	cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_range_demap, (u_long)ita);
234	return (&ita->ita_mask);
235}
236
237static __inline void
238ipi_wait(void *cookie)
239{
240	volatile u_int *mask;
241
242	if ((mask = cookie) != NULL) {
243		atomic_clear_int(mask, PCPU_GET(cpumask));
244		while (*mask != 0)
245			;
246		mtx_unlock_spin(&ipi_mtx);
247		sched_unpin();
248	}
249}
250
251#endif /* _MACHINE_PMAP_H_ && _SYS_MUTEX_H_ */
252
253#endif /* !LOCORE */
254
255#else
256
257#ifndef	LOCORE
258
259static __inline void *
260ipi_dcache_page_inval(void *func __unused, vm_paddr_t pa __unused)
261{
262
263	return (NULL);
264}
265
266static __inline void *
267ipi_icache_page_inval(void *func __unused, vm_paddr_t pa __unused)
268{
269
270	return (NULL);
271}
272
273static __inline void *
274ipi_tlb_context_demap(struct pmap *pm __unused)
275{
276
277	return (NULL);
278}
279
280static __inline void *
281ipi_tlb_page_demap(struct pmap *pm __unused, vm_offset_t va __unused)
282{
283
284	return (NULL);
285}
286
287static __inline void *
288ipi_tlb_range_demap(struct pmap *pm __unused, vm_offset_t start __unused,
289    __unused vm_offset_t end)
290{
291
292	return (NULL);
293}
294
295static __inline void
296ipi_wait(void *cookie)
297{
298
299}
300
301static __inline void
302tl_ipi_cheetah_dcache_page_inval(void)
303{
304
305}
306
307static __inline void
308tl_ipi_spitfire_dcache_page_inval(void)
309{
310
311}
312
313static __inline void
314tl_ipi_spitfire_icache_page_inval(void)
315{
316
317}
318
319#endif /* !LOCORE */
320
321#endif /* SMP */
322
323#endif /* !_MACHINE_SMP_H_ */
324