smp.h revision 286055
1/*-
2 * Copyright (c) 2001 Jake Burkholder.
3 * Copyright (c) 2007 - 2011 Marius Strobl <marius@FreeBSD.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD: stable/10/sys/sparc64/include/smp.h 286055 2015-07-30 02:06:29Z marius $
28 */
29
30#ifndef	_MACHINE_SMP_H_
31#define	_MACHINE_SMP_H_
32
33#ifdef SMP
34
35#define	CPU_TICKSYNC		1
36#define	CPU_STICKSYNC		2
37#define	CPU_INIT		3
38#define	CPU_BOOTSTRAP		4
39
40#ifndef	LOCORE
41
42#include <sys/param.h>
43#include <sys/cpuset.h>
44#include <sys/lock.h>
45#include <sys/mutex.h>
46#include <sys/proc.h>
47#include <sys/sched.h>
48#include <sys/smp.h>
49
50#include <machine/intr_machdep.h>
51#include <machine/tte.h>
52
53#define	IDR_BUSY			0x0000000000000001ULL
54#define	IDR_NACK			0x0000000000000002ULL
55#define	IDR_CHEETAH_ALL_BUSY		0x5555555555555555ULL
56#define	IDR_CHEETAH_ALL_NACK		(~IDR_CHEETAH_ALL_BUSY)
57#define	IDR_CHEETAH_MAX_BN_PAIRS	32
58#define	IDR_JALAPENO_MAX_BN_PAIRS	4
59
60#define	IDC_ITID_SHIFT			14
61#define	IDC_BN_SHIFT			24
62
63#define	IPI_AST		PIL_AST
64#define	IPI_RENDEZVOUS	PIL_RENDEZVOUS
65#define	IPI_PREEMPT	PIL_PREEMPT
66#define	IPI_HARDCLOCK	PIL_HARDCLOCK
67#define	IPI_STOP	PIL_STOP
68#define	IPI_STOP_HARD	PIL_STOP
69
70#define	IPI_RETRIES	5000
71
72struct cpu_start_args {
73	u_int	csa_count;
74	u_int	csa_mid;
75	u_int	csa_state;
76	vm_offset_t csa_pcpu;
77	u_long	csa_tick;
78	u_long	csa_stick;
79	u_long	csa_ver;
80	struct	tte csa_ttes[PCPU_PAGES];
81};
82
83struct ipi_cache_args {
84	cpuset_t ica_mask;
85	vm_paddr_t ica_pa;
86};
87
88struct ipi_rd_args {
89	cpuset_t ira_mask;
90	register_t *ira_val;
91};
92
93struct ipi_tlb_args {
94	cpuset_t ita_mask;
95	struct	pmap *ita_pmap;
96	u_long	ita_start;
97	u_long	ita_end;
98};
99#define	ita_va	ita_start
100
101struct pcb;
102struct pcpu;
103
104extern struct pcb stoppcbs[];
105
106void	cpu_mp_bootstrap(struct pcpu *pc);
107void	cpu_mp_shutdown(void);
108
109typedef	void cpu_ipi_selected_t(cpuset_t, u_long, u_long, u_long);
110extern	cpu_ipi_selected_t *cpu_ipi_selected;
111typedef	void cpu_ipi_single_t(u_int, u_long, u_long, u_long);
112extern	cpu_ipi_single_t *cpu_ipi_single;
113
114void	mp_init(void);
115
116extern	struct mtx ipi_mtx;
117extern	struct ipi_cache_args ipi_cache_args;
118extern	struct ipi_rd_args ipi_rd_args;
119extern	struct ipi_tlb_args ipi_tlb_args;
120
121extern	char *mp_tramp_code;
122extern	u_long mp_tramp_code_len;
123extern	u_long mp_tramp_tlb_slots;
124extern	u_long mp_tramp_func;
125
126extern	void mp_startup(void);
127
128extern	char tl_ipi_cheetah_dcache_page_inval[];
129extern	char tl_ipi_spitfire_dcache_page_inval[];
130extern	char tl_ipi_spitfire_icache_page_inval[];
131
132extern	char tl_ipi_level[];
133
134extern	char tl_ipi_stick_rd[];
135extern	char tl_ipi_tick_rd[];
136
137extern	char tl_ipi_tlb_context_demap[];
138extern	char tl_ipi_tlb_page_demap[];
139extern	char tl_ipi_tlb_range_demap[];
140
141static __inline void
142ipi_all_but_self(u_int ipi)
143{
144	cpuset_t cpus;
145
146	if (__predict_false(smp_started == 0))
147		return;
148	cpus = all_cpus;
149	sched_pin();
150	CPU_CLR(PCPU_GET(cpuid), &cpus);
151	mtx_lock_spin(&ipi_mtx);
152	cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_level, ipi);
153	mtx_unlock_spin(&ipi_mtx);
154	sched_unpin();
155}
156
157static __inline void
158ipi_selected(cpuset_t cpus, u_int ipi)
159{
160
161	if (__predict_false(smp_started == 0 || CPU_EMPTY(&cpus)))
162		return;
163	mtx_lock_spin(&ipi_mtx);
164	cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_level, ipi);
165	mtx_unlock_spin(&ipi_mtx);
166}
167
168static __inline void
169ipi_cpu(int cpu, u_int ipi)
170{
171
172	if (__predict_false(smp_started == 0))
173		return;
174	mtx_lock_spin(&ipi_mtx);
175	cpu_ipi_single(cpu, 0, (u_long)tl_ipi_level, ipi);
176	mtx_unlock_spin(&ipi_mtx);
177}
178
179#if defined(_MACHINE_PMAP_H_) && defined(_SYS_MUTEX_H_)
180
181static __inline void *
182ipi_dcache_page_inval(void *func, vm_paddr_t pa)
183{
184	struct ipi_cache_args *ica;
185
186	if (__predict_false(smp_started == 0))
187		return (NULL);
188	sched_pin();
189	ica = &ipi_cache_args;
190	mtx_lock_spin(&ipi_mtx);
191	ica->ica_mask = all_cpus;
192	CPU_CLR(PCPU_GET(cpuid), &ica->ica_mask);
193	ica->ica_pa = pa;
194	cpu_ipi_selected(ica->ica_mask, 0, (u_long)func, (u_long)ica);
195	return (&ica->ica_mask);
196}
197
198static __inline void *
199ipi_icache_page_inval(void *func, vm_paddr_t pa)
200{
201	struct ipi_cache_args *ica;
202
203	if (__predict_false(smp_started == 0))
204		return (NULL);
205	sched_pin();
206	ica = &ipi_cache_args;
207	mtx_lock_spin(&ipi_mtx);
208	ica->ica_mask = all_cpus;
209	CPU_CLR(PCPU_GET(cpuid), &ica->ica_mask);
210	ica->ica_pa = pa;
211	cpu_ipi_selected(ica->ica_mask, 0, (u_long)func, (u_long)ica);
212	return (&ica->ica_mask);
213}
214
215static __inline void *
216ipi_rd(u_int cpu, void *func, u_long *val)
217{
218	struct ipi_rd_args *ira;
219
220	if (__predict_false(smp_started == 0))
221		return (NULL);
222	sched_pin();
223	ira = &ipi_rd_args;
224	mtx_lock_spin(&ipi_mtx);
225	CPU_SETOF(cpu, &ira->ira_mask);
226	ira->ira_val = val;
227	cpu_ipi_single(cpu, 0, (u_long)func, (u_long)ira);
228	return (&ira->ira_mask);
229}
230
231static __inline void *
232ipi_tlb_context_demap(struct pmap *pm)
233{
234	struct ipi_tlb_args *ita;
235	cpuset_t cpus;
236
237	if (__predict_false(smp_started == 0))
238		return (NULL);
239	sched_pin();
240	cpus = pm->pm_active;
241	CPU_AND(&cpus, &all_cpus);
242	CPU_CLR(PCPU_GET(cpuid), &cpus);
243	if (CPU_EMPTY(&cpus)) {
244		sched_unpin();
245		return (NULL);
246	}
247	ita = &ipi_tlb_args;
248	mtx_lock_spin(&ipi_mtx);
249	ita->ita_mask = cpus;
250	ita->ita_pmap = pm;
251	cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_context_demap,
252	    (u_long)ita);
253	return (&ita->ita_mask);
254}
255
256static __inline void *
257ipi_tlb_page_demap(struct pmap *pm, vm_offset_t va)
258{
259	struct ipi_tlb_args *ita;
260	cpuset_t cpus;
261
262	if (__predict_false(smp_started == 0))
263		return (NULL);
264	sched_pin();
265	cpus = pm->pm_active;
266	CPU_AND(&cpus, &all_cpus);
267	CPU_CLR(PCPU_GET(cpuid), &cpus);
268	if (CPU_EMPTY(&cpus)) {
269		sched_unpin();
270		return (NULL);
271	}
272	ita = &ipi_tlb_args;
273	mtx_lock_spin(&ipi_mtx);
274	ita->ita_mask = cpus;
275	ita->ita_pmap = pm;
276	ita->ita_va = va;
277	cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_page_demap, (u_long)ita);
278	return (&ita->ita_mask);
279}
280
281static __inline void *
282ipi_tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end)
283{
284	struct ipi_tlb_args *ita;
285	cpuset_t cpus;
286
287	if (__predict_false(smp_started == 0))
288		return (NULL);
289	sched_pin();
290	cpus = pm->pm_active;
291	CPU_AND(&cpus, &all_cpus);
292	CPU_CLR(PCPU_GET(cpuid), &cpus);
293	if (CPU_EMPTY(&cpus)) {
294		sched_unpin();
295		return (NULL);
296	}
297	ita = &ipi_tlb_args;
298	mtx_lock_spin(&ipi_mtx);
299	ita->ita_mask = cpus;
300	ita->ita_pmap = pm;
301	ita->ita_start = start;
302	ita->ita_end = end;
303	cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_range_demap,
304	    (u_long)ita);
305	return (&ita->ita_mask);
306}
307
308static __inline void
309ipi_wait(void *cookie)
310{
311	volatile cpuset_t *mask;
312
313	if (__predict_false((mask = cookie) != NULL)) {
314		while (!CPU_EMPTY(mask))
315			;
316		mtx_unlock_spin(&ipi_mtx);
317		sched_unpin();
318	}
319}
320
321#endif /* _MACHINE_PMAP_H_ && _SYS_MUTEX_H_ */
322
323#endif /* !LOCORE */
324
325#else
326
327#ifndef	LOCORE
328
329static __inline void *
330ipi_dcache_page_inval(void *func __unused, vm_paddr_t pa __unused)
331{
332
333	return (NULL);
334}
335
336static __inline void *
337ipi_icache_page_inval(void *func __unused, vm_paddr_t pa __unused)
338{
339
340	return (NULL);
341}
342
343static __inline void *
344ipi_rd(u_int cpu __unused, void *func __unused, u_long *val __unused)
345{
346
347	return (NULL);
348}
349
350static __inline void *
351ipi_tlb_context_demap(struct pmap *pm __unused)
352{
353
354	return (NULL);
355}
356
357static __inline void *
358ipi_tlb_page_demap(struct pmap *pm __unused, vm_offset_t va __unused)
359{
360
361	return (NULL);
362}
363
364static __inline void *
365ipi_tlb_range_demap(struct pmap *pm __unused, vm_offset_t start __unused,
366    __unused vm_offset_t end)
367{
368
369	return (NULL);
370}
371
372static __inline void
373ipi_wait(void *cookie __unused)
374{
375
376}
377
378static __inline void
379tl_ipi_cheetah_dcache_page_inval(void)
380{
381
382}
383
384static __inline void
385tl_ipi_spitfire_dcache_page_inval(void)
386{
387
388}
389
390static __inline void
391tl_ipi_spitfire_icache_page_inval(void)
392{
393
394}
395
396#endif /* !LOCORE */
397
398#endif /* SMP */
399
400#endif /* !_MACHINE_SMP_H_ */
401