smp.h revision 239864
1209878Snwhitehorn/*-
2209878Snwhitehorn * Copyright (c) 2001 Jake Burkholder.
3209878Snwhitehorn * Copyright (c) 2007 - 2011 Marius Strobl <marius@FreeBSD.org>
4209878Snwhitehorn * All rights reserved.
5209878Snwhitehorn *
6209878Snwhitehorn * Redistribution and use in source and binary forms, with or without
7209878Snwhitehorn * modification, are permitted provided that the following conditions
8209878Snwhitehorn * are met:
9209878Snwhitehorn * 1. Redistributions of source code must retain the above copyright
10209878Snwhitehorn *    notice, this list of conditions and the following disclaimer.
11209878Snwhitehorn * 2. Redistributions in binary form must reproduce the above copyright
12209878Snwhitehorn *    notice, this list of conditions and the following disclaimer in the
13209878Snwhitehorn *    documentation and/or other materials provided with the distribution.
14209878Snwhitehorn *
15209878Snwhitehorn * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16209878Snwhitehorn * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD: head/sys/sparc64/include/smp.h 239864 2012-08-29 16:56:50Z marius $
28 */
29
30#ifndef	_MACHINE_SMP_H_
31#define	_MACHINE_SMP_H_
32
33#ifdef SMP
34
35#define	CPU_TICKSYNC		1
36#define	CPU_STICKSYNC		2
37#define	CPU_INIT		3
38#define	CPU_BOOTSTRAP		4
39
40#ifndef	LOCORE
41
42#include <sys/cpuset.h>
43#include <sys/proc.h>
44#include <sys/sched.h>
45
46#include <machine/intr_machdep.h>
47#include <machine/pcb.h>
48#include <machine/tte.h>
49
50#define	IDR_BUSY			0x0000000000000001ULL
51#define	IDR_NACK			0x0000000000000002ULL
52#define	IDR_CHEETAH_ALL_BUSY		0x5555555555555555ULL
53#define	IDR_CHEETAH_ALL_NACK		(~IDR_CHEETAH_ALL_BUSY)
54#define	IDR_CHEETAH_MAX_BN_PAIRS	32
55#define	IDR_JALAPENO_MAX_BN_PAIRS	4
56
57#define	IDC_ITID_SHIFT			14
58#define	IDC_BN_SHIFT			24
59
60#define	IPI_AST		PIL_AST
61#define	IPI_RENDEZVOUS	PIL_RENDEZVOUS
62#define	IPI_PREEMPT	PIL_PREEMPT
63#define	IPI_HARDCLOCK	PIL_HARDCLOCK
64#define	IPI_STOP	PIL_STOP
65#define	IPI_STOP_HARD	PIL_STOP
66
67#define	IPI_RETRIES	5000
68
69struct cpu_start_args {
70	u_int	csa_count;
71	u_int	csa_mid;
72	u_int	csa_state;
73	vm_offset_t csa_pcpu;
74	u_long	csa_tick;
75	u_long	csa_stick;
76	u_long	csa_ver;
77	struct	tte csa_ttes[PCPU_PAGES];
78};
79
80struct ipi_cache_args {
81	cpuset_t ica_mask;
82	vm_paddr_t ica_pa;
83};
84
85struct ipi_rd_args {
86	cpuset_t ira_mask;
87	register_t *ira_val;
88};
89
90struct ipi_tlb_args {
91	cpuset_t ita_mask;
92	struct	pmap *ita_pmap;
93	u_long	ita_start;
94	u_long	ita_end;
95};
96#define	ita_va	ita_start
97
98struct pcpu;
99
100extern struct pcb stoppcbs[];
101
102void	cpu_mp_bootstrap(struct pcpu *pc);
103void	cpu_mp_shutdown(void);
104
105typedef	void cpu_ipi_selected_t(cpuset_t, u_long, u_long, u_long);
106extern	cpu_ipi_selected_t *cpu_ipi_selected;
107typedef	void cpu_ipi_single_t(u_int, u_long, u_long, u_long);
108extern	cpu_ipi_single_t *cpu_ipi_single;
109
110void	mp_init(u_int cpu_impl);
111
112extern	struct ipi_cache_args ipi_cache_args;
113extern	struct ipi_rd_args ipi_rd_args;
114extern	struct ipi_tlb_args ipi_tlb_args;
115
116extern	char *mp_tramp_code;
117extern	u_long mp_tramp_code_len;
118extern	u_long mp_tramp_tlb_slots;
119extern	u_long mp_tramp_func;
120
121extern	void mp_startup(void);
122
123extern	char tl_ipi_cheetah_dcache_page_inval[];
124extern	char tl_ipi_spitfire_dcache_page_inval[];
125extern	char tl_ipi_spitfire_icache_page_inval[];
126
127extern	char tl_ipi_level[];
128
129extern	char tl_ipi_stick_rd[];
130extern	char tl_ipi_tick_rd[];
131
132extern	char tl_ipi_tlb_context_demap[];
133extern	char tl_ipi_tlb_page_demap[];
134extern	char tl_ipi_tlb_range_demap[];
135
136static __inline void
137ipi_all_but_self(u_int ipi)
138{
139	cpuset_t cpus;
140
141	cpus = all_cpus;
142	CPU_CLR(PCPU_GET(cpuid), &cpus);
143	cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_level, ipi);
144}
145
146static __inline void
147ipi_selected(cpuset_t cpus, u_int ipi)
148{
149
150	cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_level, ipi);
151}
152
153static __inline void
154ipi_cpu(int cpu, u_int ipi)
155{
156
157	cpu_ipi_single(cpu, 0, (u_long)tl_ipi_level, ipi);
158}
159
160#if defined(_MACHINE_PMAP_H_) && defined(_SYS_MUTEX_H_)
161
162static __inline void *
163ipi_dcache_page_inval(void *func, vm_paddr_t pa)
164{
165	struct ipi_cache_args *ica;
166
167	if (smp_cpus == 1)
168		return (NULL);
169	sched_pin();
170	ica = &ipi_cache_args;
171	mtx_lock_spin(&smp_ipi_mtx);
172	ica->ica_mask = all_cpus;
173	CPU_CLR(PCPU_GET(cpuid), &ica->ica_mask);
174	ica->ica_pa = pa;
175	cpu_ipi_selected(ica->ica_mask, 0, (u_long)func, (u_long)ica);
176	return (&ica->ica_mask);
177}
178
179static __inline void *
180ipi_icache_page_inval(void *func, vm_paddr_t pa)
181{
182	struct ipi_cache_args *ica;
183
184	if (smp_cpus == 1)
185		return (NULL);
186	sched_pin();
187	ica = &ipi_cache_args;
188	mtx_lock_spin(&smp_ipi_mtx);
189	ica->ica_mask = all_cpus;
190	CPU_CLR(PCPU_GET(cpuid), &ica->ica_mask);
191	ica->ica_pa = pa;
192	cpu_ipi_selected(ica->ica_mask, 0, (u_long)func, (u_long)ica);
193	return (&ica->ica_mask);
194}
195
196static __inline void *
197ipi_rd(u_int cpu, void *func, u_long *val)
198{
199	struct ipi_rd_args *ira;
200
201	if (smp_cpus == 1)
202		return (NULL);
203	sched_pin();
204	ira = &ipi_rd_args;
205	CPU_SETOF(cpu, &ira->ira_mask);
206	ira->ira_val = val;
207	cpu_ipi_single(cpu, 0, (u_long)func, (u_long)ira);
208	return (&ira->ira_mask);
209}
210
211static __inline void *
212ipi_tlb_context_demap(struct pmap *pm)
213{
214	struct ipi_tlb_args *ita;
215	cpuset_t cpus;
216
217	if (smp_cpus == 1)
218		return (NULL);
219	sched_pin();
220	cpus = pm->pm_active;
221	CPU_AND(&cpus, &all_cpus);
222	CPU_CLR(PCPU_GET(cpuid), &cpus);
223	if (CPU_EMPTY(&cpus)) {
224		sched_unpin();
225		return (NULL);
226	}
227	ita = &ipi_tlb_args;
228	mtx_lock_spin(&smp_ipi_mtx);
229	ita->ita_mask = cpus;
230	ita->ita_pmap = pm;
231	cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_context_demap,
232	    (u_long)ita);
233	return (&ita->ita_mask);
234}
235
236static __inline void *
237ipi_tlb_page_demap(struct pmap *pm, vm_offset_t va)
238{
239	struct ipi_tlb_args *ita;
240	cpuset_t cpus;
241
242	if (smp_cpus == 1)
243		return (NULL);
244	sched_pin();
245	cpus = pm->pm_active;
246	CPU_AND(&cpus, &all_cpus);
247	CPU_CLR(PCPU_GET(cpuid), &cpus);
248	if (CPU_EMPTY(&cpus)) {
249		sched_unpin();
250		return (NULL);
251	}
252	ita = &ipi_tlb_args;
253	mtx_lock_spin(&smp_ipi_mtx);
254	ita->ita_mask = cpus;
255	ita->ita_pmap = pm;
256	ita->ita_va = va;
257	cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_page_demap, (u_long)ita);
258	return (&ita->ita_mask);
259}
260
261static __inline void *
262ipi_tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end)
263{
264	struct ipi_tlb_args *ita;
265	cpuset_t cpus;
266
267	if (smp_cpus == 1)
268		return (NULL);
269	sched_pin();
270	cpus = pm->pm_active;
271	CPU_AND(&cpus, &all_cpus);
272	CPU_CLR(PCPU_GET(cpuid), &cpus);
273	if (CPU_EMPTY(&cpus)) {
274		sched_unpin();
275		return (NULL);
276	}
277	ita = &ipi_tlb_args;
278	mtx_lock_spin(&smp_ipi_mtx);
279	ita->ita_mask = cpus;
280	ita->ita_pmap = pm;
281	ita->ita_start = start;
282	ita->ita_end = end;
283	cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_range_demap,
284	    (u_long)ita);
285	return (&ita->ita_mask);
286}
287
288static __inline void
289ipi_wait(void *cookie)
290{
291	volatile cpuset_t *mask;
292
293	if ((mask = cookie) != NULL) {
294		while (!CPU_EMPTY(mask))
295			;
296		mtx_unlock_spin(&smp_ipi_mtx);
297		sched_unpin();
298	}
299}
300
301static __inline void
302ipi_wait_unlocked(void *cookie)
303{
304	volatile cpuset_t *mask;
305
306	if ((mask = cookie) != NULL) {
307		while (!CPU_EMPTY(mask))
308			;
309		sched_unpin();
310	}
311}
312
313#endif /* _MACHINE_PMAP_H_ && _SYS_MUTEX_H_ */
314
315#endif /* !LOCORE */
316
317#else
318
319#ifndef	LOCORE
320
321static __inline void *
322ipi_dcache_page_inval(void *func __unused, vm_paddr_t pa __unused)
323{
324
325	return (NULL);
326}
327
328static __inline void *
329ipi_icache_page_inval(void *func __unused, vm_paddr_t pa __unused)
330{
331
332	return (NULL);
333}
334
335static __inline void *
336ipi_rd(u_int cpu __unused, void *func __unused, u_long *val __unused)
337{
338
339	return (NULL);
340}
341
342static __inline void *
343ipi_tlb_context_demap(struct pmap *pm __unused)
344{
345
346	return (NULL);
347}
348
349static __inline void *
350ipi_tlb_page_demap(struct pmap *pm __unused, vm_offset_t va __unused)
351{
352
353	return (NULL);
354}
355
356static __inline void *
357ipi_tlb_range_demap(struct pmap *pm __unused, vm_offset_t start __unused,
358    __unused vm_offset_t end)
359{
360
361	return (NULL);
362}
363
364static __inline void
365ipi_wait(void *cookie __unused)
366{
367
368}
369
370static __inline void
371ipi_wait_unlocked(void *cookie __unused)
372{
373
374}
375
376static __inline void
377tl_ipi_cheetah_dcache_page_inval(void)
378{
379
380}
381
382static __inline void
383tl_ipi_spitfire_dcache_page_inval(void)
384{
385
386}
387
388static __inline void
389tl_ipi_spitfire_icache_page_inval(void)
390{
391
392}
393
394#endif /* !LOCORE */
395
396#endif /* SMP */
397
398#endif /* !_MACHINE_SMP_H_ */
399