1/*-
2 * Copyright (c) 2001 Jake Burkholder.
3 * Copyright (c) 2007 - 2011 Marius Strobl <marius@FreeBSD.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD$
28 */
29
30#ifndef	_MACHINE_SMP_H_
31#define	_MACHINE_SMP_H_
32
33#ifdef SMP
34
35#define	CPU_TICKSYNC		1
36#define	CPU_STICKSYNC		2
37#define	CPU_INIT		3
38#define	CPU_BOOTSTRAP		4
39
40#ifndef	LOCORE
41
42#include <sys/cpuset.h>
43#include <sys/proc.h>
44#include <sys/sched.h>
45#include <sys/smp.h>
46
47#include <machine/intr_machdep.h>
48#include <machine/pcb.h>
49#include <machine/tte.h>
50
51#define	IDR_BUSY			0x0000000000000001ULL
52#define	IDR_NACK			0x0000000000000002ULL
53#define	IDR_CHEETAH_ALL_BUSY		0x5555555555555555ULL
54#define	IDR_CHEETAH_ALL_NACK		(~IDR_CHEETAH_ALL_BUSY)
55#define	IDR_CHEETAH_MAX_BN_PAIRS	32
56#define	IDR_JALAPENO_MAX_BN_PAIRS	4
57
58#define	IDC_ITID_SHIFT			14
59#define	IDC_BN_SHIFT			24
60
61#define	IPI_AST		PIL_AST
62#define	IPI_RENDEZVOUS	PIL_RENDEZVOUS
63#define	IPI_PREEMPT	PIL_PREEMPT
64#define	IPI_HARDCLOCK	PIL_HARDCLOCK
65#define	IPI_STOP	PIL_STOP
66#define	IPI_STOP_HARD	PIL_STOP
67
68#define	IPI_RETRIES	5000
69
70struct cpu_start_args {
71	u_int	csa_count;
72	u_int	csa_mid;
73	u_int	csa_state;
74	vm_offset_t csa_pcpu;
75	u_long	csa_tick;
76	u_long	csa_stick;
77	u_long	csa_ver;
78	struct	tte csa_ttes[PCPU_PAGES];
79};
80
81struct ipi_cache_args {
82	cpuset_t ica_mask;
83	vm_paddr_t ica_pa;
84};
85
86struct ipi_rd_args {
87	cpuset_t ira_mask;
88	register_t *ira_val;
89};
90
91struct ipi_tlb_args {
92	cpuset_t ita_mask;
93	struct	pmap *ita_pmap;
94	u_long	ita_start;
95	u_long	ita_end;
96};
97#define	ita_va	ita_start
98
99struct pcpu;
100
101extern struct pcb stoppcbs[];
102
103void	cpu_mp_bootstrap(struct pcpu *pc);
104void	cpu_mp_shutdown(void);
105
106typedef	void cpu_ipi_selected_t(cpuset_t, u_long, u_long, u_long);
107extern	cpu_ipi_selected_t *cpu_ipi_selected;
108typedef	void cpu_ipi_single_t(u_int, u_long, u_long, u_long);
109extern	cpu_ipi_single_t *cpu_ipi_single;
110
111void	mp_init(u_int cpu_impl);
112
113extern	struct ipi_cache_args ipi_cache_args;
114extern	struct ipi_rd_args ipi_rd_args;
115extern	struct ipi_tlb_args ipi_tlb_args;
116
117extern	char *mp_tramp_code;
118extern	u_long mp_tramp_code_len;
119extern	u_long mp_tramp_tlb_slots;
120extern	u_long mp_tramp_func;
121
122extern	void mp_startup(void);
123
124extern	char tl_ipi_cheetah_dcache_page_inval[];
125extern	char tl_ipi_spitfire_dcache_page_inval[];
126extern	char tl_ipi_spitfire_icache_page_inval[];
127
128extern	char tl_ipi_level[];
129
130extern	char tl_ipi_stick_rd[];
131extern	char tl_ipi_tick_rd[];
132
133extern	char tl_ipi_tlb_context_demap[];
134extern	char tl_ipi_tlb_page_demap[];
135extern	char tl_ipi_tlb_range_demap[];
136
137static __inline void
138ipi_all_but_self(u_int ipi)
139{
140	cpuset_t cpus;
141
142	cpus = all_cpus;
143	CPU_CLR(PCPU_GET(cpuid), &cpus);
144	cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_level, ipi);
145}
146
147static __inline void
148ipi_selected(cpuset_t cpus, u_int ipi)
149{
150
151	cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_level, ipi);
152}
153
154static __inline void
155ipi_cpu(int cpu, u_int ipi)
156{
157
158	cpu_ipi_single(cpu, 0, (u_long)tl_ipi_level, ipi);
159}
160
161#if defined(_MACHINE_PMAP_H_) && defined(_SYS_MUTEX_H_)
162
163static __inline void *
164ipi_dcache_page_inval(void *func, vm_paddr_t pa)
165{
166	struct ipi_cache_args *ica;
167
168	if (smp_cpus == 1)
169		return (NULL);
170	sched_pin();
171	ica = &ipi_cache_args;
172	mtx_lock_spin(&smp_ipi_mtx);
173	ica->ica_mask = all_cpus;
174	CPU_CLR(PCPU_GET(cpuid), &ica->ica_mask);
175	ica->ica_pa = pa;
176	cpu_ipi_selected(ica->ica_mask, 0, (u_long)func, (u_long)ica);
177	return (&ica->ica_mask);
178}
179
180static __inline void *
181ipi_icache_page_inval(void *func, vm_paddr_t pa)
182{
183	struct ipi_cache_args *ica;
184
185	if (smp_cpus == 1)
186		return (NULL);
187	sched_pin();
188	ica = &ipi_cache_args;
189	mtx_lock_spin(&smp_ipi_mtx);
190	ica->ica_mask = all_cpus;
191	CPU_CLR(PCPU_GET(cpuid), &ica->ica_mask);
192	ica->ica_pa = pa;
193	cpu_ipi_selected(ica->ica_mask, 0, (u_long)func, (u_long)ica);
194	return (&ica->ica_mask);
195}
196
197static __inline void *
198ipi_rd(u_int cpu, void *func, u_long *val)
199{
200	struct ipi_rd_args *ira;
201
202	if (smp_cpus == 1)
203		return (NULL);
204	sched_pin();
205	ira = &ipi_rd_args;
206	mtx_lock_spin(&smp_ipi_mtx);
207	CPU_SETOF(cpu, &ira->ira_mask);
208	ira->ira_val = val;
209	cpu_ipi_single(cpu, 0, (u_long)func, (u_long)ira);
210	return (&ira->ira_mask);
211}
212
213static __inline void *
214ipi_tlb_context_demap(struct pmap *pm)
215{
216	struct ipi_tlb_args *ita;
217	cpuset_t cpus;
218
219	if (smp_cpus == 1)
220		return (NULL);
221	sched_pin();
222	cpus = pm->pm_active;
223	CPU_AND(&cpus, &all_cpus);
224	CPU_CLR(PCPU_GET(cpuid), &cpus);
225	if (CPU_EMPTY(&cpus)) {
226		sched_unpin();
227		return (NULL);
228	}
229	ita = &ipi_tlb_args;
230	mtx_lock_spin(&smp_ipi_mtx);
231	ita->ita_mask = cpus;
232	ita->ita_pmap = pm;
233	cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_context_demap,
234	    (u_long)ita);
235	return (&ita->ita_mask);
236}
237
238static __inline void *
239ipi_tlb_page_demap(struct pmap *pm, vm_offset_t va)
240{
241	struct ipi_tlb_args *ita;
242	cpuset_t cpus;
243
244	if (smp_cpus == 1)
245		return (NULL);
246	sched_pin();
247	cpus = pm->pm_active;
248	CPU_AND(&cpus, &all_cpus);
249	CPU_CLR(PCPU_GET(cpuid), &cpus);
250	if (CPU_EMPTY(&cpus)) {
251		sched_unpin();
252		return (NULL);
253	}
254	ita = &ipi_tlb_args;
255	mtx_lock_spin(&smp_ipi_mtx);
256	ita->ita_mask = cpus;
257	ita->ita_pmap = pm;
258	ita->ita_va = va;
259	cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_page_demap, (u_long)ita);
260	return (&ita->ita_mask);
261}
262
263static __inline void *
264ipi_tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end)
265{
266	struct ipi_tlb_args *ita;
267	cpuset_t cpus;
268
269	if (smp_cpus == 1)
270		return (NULL);
271	sched_pin();
272	cpus = pm->pm_active;
273	CPU_AND(&cpus, &all_cpus);
274	CPU_CLR(PCPU_GET(cpuid), &cpus);
275	if (CPU_EMPTY(&cpus)) {
276		sched_unpin();
277		return (NULL);
278	}
279	ita = &ipi_tlb_args;
280	mtx_lock_spin(&smp_ipi_mtx);
281	ita->ita_mask = cpus;
282	ita->ita_pmap = pm;
283	ita->ita_start = start;
284	ita->ita_end = end;
285	cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_range_demap,
286	    (u_long)ita);
287	return (&ita->ita_mask);
288}
289
290static __inline void
291ipi_wait(void *cookie)
292{
293	volatile cpuset_t *mask;
294
295	if ((mask = cookie) != NULL) {
296		while (!CPU_EMPTY(mask))
297			;
298		mtx_unlock_spin(&smp_ipi_mtx);
299		sched_unpin();
300	}
301}
302
303#endif /* _MACHINE_PMAP_H_ && _SYS_MUTEX_H_ */
304
305#endif /* !LOCORE */
306
307#else
308
309#ifndef	LOCORE
310
311static __inline void *
312ipi_dcache_page_inval(void *func __unused, vm_paddr_t pa __unused)
313{
314
315	return (NULL);
316}
317
318static __inline void *
319ipi_icache_page_inval(void *func __unused, vm_paddr_t pa __unused)
320{
321
322	return (NULL);
323}
324
325static __inline void *
326ipi_rd(u_int cpu __unused, void *func __unused, u_long *val __unused)
327{
328
329	return (NULL);
330}
331
332static __inline void *
333ipi_tlb_context_demap(struct pmap *pm __unused)
334{
335
336	return (NULL);
337}
338
339static __inline void *
340ipi_tlb_page_demap(struct pmap *pm __unused, vm_offset_t va __unused)
341{
342
343	return (NULL);
344}
345
346static __inline void *
347ipi_tlb_range_demap(struct pmap *pm __unused, vm_offset_t start __unused,
348    __unused vm_offset_t end)
349{
350
351	return (NULL);
352}
353
354static __inline void
355ipi_wait(void *cookie __unused)
356{
357
358}
359
360static __inline void
361tl_ipi_cheetah_dcache_page_inval(void)
362{
363
364}
365
366static __inline void
367tl_ipi_spitfire_dcache_page_inval(void)
368{
369
370}
371
372static __inline void
373tl_ipi_spitfire_icache_page_inval(void)
374{
375
376}
377
378#endif /* !LOCORE */
379
380#endif /* SMP */
381
382#endif /* !_MACHINE_SMP_H_ */
383