1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2001 Jake Burkholder.
5 * Copyright (c) 2007 - 2011 Marius Strobl <marius@FreeBSD.org>
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * $FreeBSD$
30 */
31
32#ifndef	_MACHINE_SMP_H_
33#define	_MACHINE_SMP_H_
34
35#ifdef SMP
36
37#define	CPU_TICKSYNC		1
38#define	CPU_STICKSYNC		2
39#define	CPU_INIT		3
40#define	CPU_BOOTSTRAP		4
41
42#ifndef	LOCORE
43
44#include <sys/param.h>
45#include <sys/cpuset.h>
46#include <sys/lock.h>
47#include <sys/mutex.h>
48#include <sys/proc.h>
49#include <sys/sched.h>
50#include <sys/smp.h>
51
52#include <machine/atomic.h>
53#include <machine/intr_machdep.h>
54#include <machine/tte.h>
55
56#define	IDR_BUSY			0x0000000000000001ULL
57#define	IDR_NACK			0x0000000000000002ULL
58#define	IDR_CHEETAH_ALL_BUSY		0x5555555555555555ULL
59#define	IDR_CHEETAH_ALL_NACK		(~IDR_CHEETAH_ALL_BUSY)
60#define	IDR_CHEETAH_MAX_BN_PAIRS	32
61#define	IDR_JALAPENO_MAX_BN_PAIRS	4
62
63#define	IDC_ITID_SHIFT			14
64#define	IDC_BN_SHIFT			24
65
66#define	IPI_AST		PIL_AST
67#define	IPI_RENDEZVOUS	PIL_RENDEZVOUS
68#define	IPI_PREEMPT	PIL_PREEMPT
69#define	IPI_HARDCLOCK	PIL_HARDCLOCK
70#define	IPI_STOP	PIL_STOP
71#define	IPI_STOP_HARD	PIL_STOP
72
73#define	IPI_RETRIES	5000
74
75struct cpu_start_args {
76	u_int	csa_count;
77	u_int	csa_mid;
78	u_int	csa_state;
79	vm_offset_t csa_pcpu;
80	u_long	csa_tick;
81	u_long	csa_stick;
82	u_long	csa_ver;
83	struct	tte csa_ttes[PCPU_PAGES];
84};
85
86struct ipi_cache_args {
87	cpuset_t ica_mask;
88	vm_paddr_t ica_pa;
89};
90
91struct ipi_rd_args {
92	cpuset_t ira_mask;
93	register_t *ira_val;
94};
95
96struct ipi_tlb_args {
97	cpuset_t ita_mask;
98	struct	pmap *ita_pmap;
99	u_long	ita_start;
100	u_long	ita_end;
101};
102#define	ita_va	ita_start
103
104struct pcb;
105struct pcpu;
106
107extern struct pcb stoppcbs[];
108
109void	cpu_mp_bootstrap(struct pcpu *pc);
110void	cpu_mp_shutdown(void);
111
112typedef	void cpu_ipi_selected_t(cpuset_t, u_long, u_long, u_long);
113extern	cpu_ipi_selected_t *cpu_ipi_selected;
114typedef	void cpu_ipi_single_t(u_int, u_long, u_long, u_long);
115extern	cpu_ipi_single_t *cpu_ipi_single;
116
117void	mp_init(void);
118
119extern	struct mtx ipi_mtx;
120extern	struct ipi_cache_args ipi_cache_args;
121extern	struct ipi_rd_args ipi_rd_args;
122extern	struct ipi_tlb_args ipi_tlb_args;
123
124extern	char *mp_tramp_code;
125extern	u_long mp_tramp_code_len;
126extern	u_long mp_tramp_tlb_slots;
127extern	u_long mp_tramp_func;
128
129extern	void mp_startup(void);
130
131extern	char tl_ipi_cheetah_dcache_page_inval[];
132extern	char tl_ipi_spitfire_dcache_page_inval[];
133extern	char tl_ipi_spitfire_icache_page_inval[];
134
135extern	char tl_ipi_level[];
136
137extern	char tl_ipi_stick_rd[];
138extern	char tl_ipi_tick_rd[];
139
140extern	char tl_ipi_tlb_context_demap[];
141extern	char tl_ipi_tlb_page_demap[];
142extern	char tl_ipi_tlb_range_demap[];
143
144static __inline void
145ipi_all_but_self(u_int ipi)
146{
147	cpuset_t cpus;
148
149	if (__predict_false(atomic_load_acq_int(&smp_started) == 0))
150		return;
151	cpus = all_cpus;
152	sched_pin();
153	CPU_CLR(PCPU_GET(cpuid), &cpus);
154	mtx_lock_spin(&ipi_mtx);
155	cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_level, ipi);
156	mtx_unlock_spin(&ipi_mtx);
157	sched_unpin();
158}
159
160static __inline void
161ipi_selected(cpuset_t cpus, u_int ipi)
162{
163
164	if (__predict_false(atomic_load_acq_int(&smp_started) == 0 ||
165	    CPU_EMPTY(&cpus)))
166		return;
167	mtx_lock_spin(&ipi_mtx);
168	cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_level, ipi);
169	mtx_unlock_spin(&ipi_mtx);
170}
171
172static __inline void
173ipi_cpu(int cpu, u_int ipi)
174{
175
176	if (__predict_false(atomic_load_acq_int(&smp_started) == 0))
177		return;
178	mtx_lock_spin(&ipi_mtx);
179	cpu_ipi_single(cpu, 0, (u_long)tl_ipi_level, ipi);
180	mtx_unlock_spin(&ipi_mtx);
181}
182
183#if defined(_MACHINE_PMAP_H_) && defined(_SYS_MUTEX_H_)
184
185static __inline void *
186ipi_dcache_page_inval(void *func, vm_paddr_t pa)
187{
188	struct ipi_cache_args *ica;
189
190	if (__predict_false(atomic_load_acq_int(&smp_started) == 0))
191		return (NULL);
192	sched_pin();
193	ica = &ipi_cache_args;
194	mtx_lock_spin(&ipi_mtx);
195	ica->ica_mask = all_cpus;
196	CPU_CLR(PCPU_GET(cpuid), &ica->ica_mask);
197	ica->ica_pa = pa;
198	cpu_ipi_selected(ica->ica_mask, 0, (u_long)func, (u_long)ica);
199	return (&ica->ica_mask);
200}
201
202static __inline void *
203ipi_icache_page_inval(void *func, vm_paddr_t pa)
204{
205	struct ipi_cache_args *ica;
206
207	if (__predict_false(atomic_load_acq_int(&smp_started) == 0))
208		return (NULL);
209	sched_pin();
210	ica = &ipi_cache_args;
211	mtx_lock_spin(&ipi_mtx);
212	ica->ica_mask = all_cpus;
213	CPU_CLR(PCPU_GET(cpuid), &ica->ica_mask);
214	ica->ica_pa = pa;
215	cpu_ipi_selected(ica->ica_mask, 0, (u_long)func, (u_long)ica);
216	return (&ica->ica_mask);
217}
218
219static __inline void *
220ipi_rd(u_int cpu, void *func, u_long *val)
221{
222	struct ipi_rd_args *ira;
223
224	if (__predict_false(atomic_load_acq_int(&smp_started) == 0))
225		return (NULL);
226	sched_pin();
227	ira = &ipi_rd_args;
228	mtx_lock_spin(&ipi_mtx);
229	CPU_SETOF(cpu, &ira->ira_mask);
230	ira->ira_val = val;
231	cpu_ipi_single(cpu, 0, (u_long)func, (u_long)ira);
232	return (&ira->ira_mask);
233}
234
235static __inline void *
236ipi_tlb_context_demap(struct pmap *pm)
237{
238	struct ipi_tlb_args *ita;
239	cpuset_t cpus;
240
241	if (__predict_false(atomic_load_acq_int(&smp_started) == 0))
242		return (NULL);
243	sched_pin();
244	cpus = pm->pm_active;
245	CPU_AND(&cpus, &all_cpus);
246	CPU_CLR(PCPU_GET(cpuid), &cpus);
247	if (CPU_EMPTY(&cpus)) {
248		sched_unpin();
249		return (NULL);
250	}
251	ita = &ipi_tlb_args;
252	mtx_lock_spin(&ipi_mtx);
253	ita->ita_mask = cpus;
254	ita->ita_pmap = pm;
255	cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_context_demap,
256	    (u_long)ita);
257	return (&ita->ita_mask);
258}
259
260static __inline void *
261ipi_tlb_page_demap(struct pmap *pm, vm_offset_t va)
262{
263	struct ipi_tlb_args *ita;
264	cpuset_t cpus;
265
266	if (__predict_false(atomic_load_acq_int(&smp_started) == 0))
267		return (NULL);
268	sched_pin();
269	cpus = pm->pm_active;
270	CPU_AND(&cpus, &all_cpus);
271	CPU_CLR(PCPU_GET(cpuid), &cpus);
272	if (CPU_EMPTY(&cpus)) {
273		sched_unpin();
274		return (NULL);
275	}
276	ita = &ipi_tlb_args;
277	mtx_lock_spin(&ipi_mtx);
278	ita->ita_mask = cpus;
279	ita->ita_pmap = pm;
280	ita->ita_va = va;
281	cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_page_demap, (u_long)ita);
282	return (&ita->ita_mask);
283}
284
285static __inline void *
286ipi_tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end)
287{
288	struct ipi_tlb_args *ita;
289	cpuset_t cpus;
290
291	if (__predict_false(atomic_load_acq_int(&smp_started) == 0))
292		return (NULL);
293	sched_pin();
294	cpus = pm->pm_active;
295	CPU_AND(&cpus, &all_cpus);
296	CPU_CLR(PCPU_GET(cpuid), &cpus);
297	if (CPU_EMPTY(&cpus)) {
298		sched_unpin();
299		return (NULL);
300	}
301	ita = &ipi_tlb_args;
302	mtx_lock_spin(&ipi_mtx);
303	ita->ita_mask = cpus;
304	ita->ita_pmap = pm;
305	ita->ita_start = start;
306	ita->ita_end = end;
307	cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_tlb_range_demap,
308	    (u_long)ita);
309	return (&ita->ita_mask);
310}
311
312static __inline void
313ipi_wait(void *cookie)
314{
315	volatile cpuset_t *mask;
316
317	if (__predict_false((mask = cookie) != NULL)) {
318		while (!CPU_EMPTY(mask))
319			;
320		mtx_unlock_spin(&ipi_mtx);
321		sched_unpin();
322	}
323}
324
325#endif /* _MACHINE_PMAP_H_ && _SYS_MUTEX_H_ */
326
327#endif /* !LOCORE */
328
329#else
330
331#ifndef	LOCORE
332
333static __inline void *
334ipi_dcache_page_inval(void *func __unused, vm_paddr_t pa __unused)
335{
336
337	return (NULL);
338}
339
340static __inline void *
341ipi_icache_page_inval(void *func __unused, vm_paddr_t pa __unused)
342{
343
344	return (NULL);
345}
346
347static __inline void *
348ipi_rd(u_int cpu __unused, void *func __unused, u_long *val __unused)
349{
350
351	return (NULL);
352}
353
354static __inline void *
355ipi_tlb_context_demap(struct pmap *pm __unused)
356{
357
358	return (NULL);
359}
360
361static __inline void *
362ipi_tlb_page_demap(struct pmap *pm __unused, vm_offset_t va __unused)
363{
364
365	return (NULL);
366}
367
368static __inline void *
369ipi_tlb_range_demap(struct pmap *pm __unused, vm_offset_t start __unused,
370    __unused vm_offset_t end)
371{
372
373	return (NULL);
374}
375
376static __inline void
377ipi_wait(void *cookie __unused)
378{
379
380}
381
382static __inline void
383tl_ipi_cheetah_dcache_page_inval(void)
384{
385
386}
387
388static __inline void
389tl_ipi_spitfire_dcache_page_inval(void)
390{
391
392}
393
394static __inline void
395tl_ipi_spitfire_icache_page_inval(void)
396{
397
398}
399
400#endif /* !LOCORE */
401
402#endif /* SMP */
403
404#endif /* !_MACHINE_SMP_H_ */
405