1/*-
2 * Copyright (c) 2001, John Baldwin <jhb@FreeBSD.org>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27/*
28 * This module holds the global variables and machine independent functions
29 * used for the kernel SMP support.
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD$");
34
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/kernel.h>
38#include <sys/ktr.h>
39#include <sys/proc.h>
40#include <sys/bus.h>
41#include <sys/lock.h>
42#include <sys/mutex.h>
43#include <sys/pcpu.h>
44#include <sys/sched.h>
45#include <sys/smp.h>
46#include <sys/sysctl.h>
47
48#include <machine/cpu.h>
49#include <machine/smp.h>
50
51#include "opt_sched.h"
52
53#ifdef SMP
54volatile cpuset_t stopped_cpus;
55volatile cpuset_t started_cpus;
56volatile cpuset_t suspended_cpus;
57cpuset_t hlt_cpus_mask;
58cpuset_t logical_cpus_mask;
59
60void (*cpustop_restartfunc)(void);
61#endif
62
63static int sysctl_kern_smp_active(SYSCTL_HANDLER_ARGS);
64
65/* This is used in modules that need to work in both SMP and UP. */
66cpuset_t all_cpus;
67
68int mp_ncpus;
69/* export this for libkvm consumers. */
70int mp_maxcpus = MAXCPU;
71
72volatile int smp_started;
73u_int mp_maxid;
74
75static SYSCTL_NODE(_kern, OID_AUTO, smp, CTLFLAG_RD|CTLFLAG_CAPRD, NULL,
76    "Kernel SMP");
77
78SYSCTL_INT(_kern_smp, OID_AUTO, maxid, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_maxid, 0,
79    "Max CPU ID.");
80
81SYSCTL_INT(_kern_smp, OID_AUTO, maxcpus, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_maxcpus,
82    0, "Max number of CPUs that the system was compiled for.");
83
84SYSCTL_PROC(_kern_smp, OID_AUTO, active, CTLFLAG_RD | CTLTYPE_INT, NULL, 0,
85    sysctl_kern_smp_active, "I", "Indicates system is running in SMP mode");
86
87int smp_disabled = 0;	/* has smp been disabled? */
88SYSCTL_INT(_kern_smp, OID_AUTO, disabled, CTLFLAG_RDTUN|CTLFLAG_CAPRD,
89    &smp_disabled, 0, "SMP has been disabled from the loader");
90TUNABLE_INT("kern.smp.disabled", &smp_disabled);
91
92int smp_cpus = 1;	/* how many cpu's running */
93SYSCTL_INT(_kern_smp, OID_AUTO, cpus, CTLFLAG_RD|CTLFLAG_CAPRD, &smp_cpus, 0,
94    "Number of CPUs online");
95
96int smp_topology = 0;	/* Which topology we're using. */
97SYSCTL_INT(_kern_smp, OID_AUTO, topology, CTLFLAG_RD, &smp_topology, 0,
98    "Topology override setting; 0 is default provided by hardware.");
99TUNABLE_INT("kern.smp.topology", &smp_topology);
100
101#ifdef SMP
102/* Enable forwarding of a signal to a process running on a different CPU */
103static int forward_signal_enabled = 1;
104SYSCTL_INT(_kern_smp, OID_AUTO, forward_signal_enabled, CTLFLAG_RW,
105	   &forward_signal_enabled, 0,
106	   "Forwarding of a signal to a process on a different CPU");
107
108/* Variables needed for SMP rendezvous. */
109static volatile int smp_rv_ncpus;
110static void (*volatile smp_rv_setup_func)(void *arg);
111static void (*volatile smp_rv_action_func)(void *arg);
112static void (*volatile smp_rv_teardown_func)(void *arg);
113static void *volatile smp_rv_func_arg;
114static volatile int smp_rv_waiters[4];
115
116/*
117 * Shared mutex to restrict busywaits between smp_rendezvous() and
118 * smp(_targeted)_tlb_shootdown().  A deadlock occurs if both of these
119 * functions trigger at once and cause multiple CPUs to busywait with
120 * interrupts disabled.
121 */
122struct mtx smp_ipi_mtx;
123
124/*
125 * Let the MD SMP code initialize mp_maxid very early if it can.
126 */
127static void
128mp_setmaxid(void *dummy)
129{
130	cpu_mp_setmaxid();
131}
132SYSINIT(cpu_mp_setmaxid, SI_SUB_TUNABLES, SI_ORDER_FIRST, mp_setmaxid, NULL);
133
134/*
135 * Call the MD SMP initialization code.
136 */
137static void
138mp_start(void *dummy)
139{
140
141	mtx_init(&smp_ipi_mtx, "smp rendezvous", NULL, MTX_SPIN);
142
143	/* Probe for MP hardware. */
144	if (smp_disabled != 0 || cpu_mp_probe() == 0) {
145		mp_ncpus = 1;
146		CPU_SETOF(PCPU_GET(cpuid), &all_cpus);
147		return;
148	}
149
150	cpu_mp_start();
151	printf("FreeBSD/SMP: Multiprocessor System Detected: %d CPUs\n",
152	    mp_ncpus);
153	cpu_mp_announce();
154}
155SYSINIT(cpu_mp, SI_SUB_CPU, SI_ORDER_THIRD, mp_start, NULL);
156
157void
158forward_signal(struct thread *td)
159{
160	int id;
161
162	/*
163	 * signotify() has already set TDF_ASTPENDING and TDF_NEEDSIGCHECK on
164	 * this thread, so all we need to do is poke it if it is currently
165	 * executing so that it executes ast().
166	 */
167	THREAD_LOCK_ASSERT(td, MA_OWNED);
168	KASSERT(TD_IS_RUNNING(td),
169	    ("forward_signal: thread is not TDS_RUNNING"));
170
171	CTR1(KTR_SMP, "forward_signal(%p)", td->td_proc);
172
173	if (!smp_started || cold || panicstr)
174		return;
175	if (!forward_signal_enabled)
176		return;
177
178	/* No need to IPI ourself. */
179	if (td == curthread)
180		return;
181
182	id = td->td_oncpu;
183	if (id == NOCPU)
184		return;
185	ipi_cpu(id, IPI_AST);
186}
187
188/*
189 * When called the executing CPU will send an IPI to all other CPUs
190 *  requesting that they halt execution.
191 *
192 * Usually (but not necessarily) called with 'other_cpus' as its arg.
193 *
194 *  - Signals all CPUs in map to stop.
195 *  - Waits for each to stop.
196 *
197 * Returns:
198 *  -1: error
199 *   0: NA
200 *   1: ok
201 *
202 */
203static int
204generic_stop_cpus(cpuset_t map, u_int type)
205{
206#ifdef KTR
207	char cpusetbuf[CPUSETBUFSIZ];
208#endif
209	static volatile u_int stopping_cpu = NOCPU;
210	int i;
211	volatile cpuset_t *cpus;
212
213	KASSERT(
214#if defined(__amd64__) || defined(__i386__)
215	    type == IPI_STOP || type == IPI_STOP_HARD || type == IPI_SUSPEND,
216#else
217	    type == IPI_STOP || type == IPI_STOP_HARD,
218#endif
219	    ("%s: invalid stop type", __func__));
220
221	if (!smp_started)
222		return (0);
223
224	CTR2(KTR_SMP, "stop_cpus(%s) with %u type",
225	    cpusetobj_strprint(cpusetbuf, &map), type);
226
227#if defined(__amd64__) || defined(__i386__)
228	/*
229	 * When suspending, ensure there are are no IPIs in progress.
230	 * IPIs that have been issued, but not yet delivered (e.g.
231	 * not pending on a vCPU when running under virtualization)
232	 * will be lost, violating FreeBSD's assumption of reliable
233	 * IPI delivery.
234	 */
235	if (type == IPI_SUSPEND)
236		mtx_lock_spin(&smp_ipi_mtx);
237#endif
238
239	if (stopping_cpu != PCPU_GET(cpuid))
240		while (atomic_cmpset_int(&stopping_cpu, NOCPU,
241		    PCPU_GET(cpuid)) == 0)
242			while (stopping_cpu != NOCPU)
243				cpu_spinwait(); /* spin */
244
245	/* send the stop IPI to all CPUs in map */
246	ipi_selected(map, type);
247
248#if defined(__amd64__) || defined(__i386__)
249	if (type == IPI_SUSPEND)
250		cpus = &suspended_cpus;
251	else
252#endif
253		cpus = &stopped_cpus;
254
255	i = 0;
256	while (!CPU_SUBSET(cpus, &map)) {
257		/* spin */
258		cpu_spinwait();
259		i++;
260		if (i == 100000000) {
261			printf("timeout stopping cpus\n");
262			break;
263		}
264	}
265
266#if defined(__amd64__) || defined(__i386__)
267	if (type == IPI_SUSPEND)
268		mtx_unlock_spin(&smp_ipi_mtx);
269#endif
270
271	stopping_cpu = NOCPU;
272	return (1);
273}
274
275int
276stop_cpus(cpuset_t map)
277{
278
279	return (generic_stop_cpus(map, IPI_STOP));
280}
281
282int
283stop_cpus_hard(cpuset_t map)
284{
285
286	return (generic_stop_cpus(map, IPI_STOP_HARD));
287}
288
289#if defined(__amd64__) || defined(__i386__)
290int
291suspend_cpus(cpuset_t map)
292{
293
294	return (generic_stop_cpus(map, IPI_SUSPEND));
295}
296#endif
297
298/*
299 * Called by a CPU to restart stopped CPUs.
300 *
301 * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
302 *
303 *  - Signals all CPUs in map to restart.
304 *  - Waits for each to restart.
305 *
306 * Returns:
307 *  -1: error
308 *   0: NA
309 *   1: ok
310 */
311static int
312generic_restart_cpus(cpuset_t map, u_int type)
313{
314#ifdef KTR
315	char cpusetbuf[CPUSETBUFSIZ];
316#endif
317	volatile cpuset_t *cpus;
318
319	KASSERT(
320#if defined(__amd64__) || defined(__i386__)
321	    type == IPI_STOP || type == IPI_STOP_HARD || type == IPI_SUSPEND,
322#else
323	    type == IPI_STOP || type == IPI_STOP_HARD,
324#endif
325	    ("%s: invalid stop type", __func__));
326
327	if (!smp_started)
328		return 0;
329
330	CTR1(KTR_SMP, "restart_cpus(%s)", cpusetobj_strprint(cpusetbuf, &map));
331
332#if defined(__amd64__) || defined(__i386__)
333	if (type == IPI_SUSPEND)
334		cpus = &suspended_cpus;
335	else
336#endif
337		cpus = &stopped_cpus;
338
339	/* signal other cpus to restart */
340	CPU_COPY_STORE_REL(&map, &started_cpus);
341
342	/* wait for each to clear its bit */
343	while (CPU_OVERLAP(cpus, &map))
344		cpu_spinwait();
345
346	return 1;
347}
348
349int
350restart_cpus(cpuset_t map)
351{
352
353	return (generic_restart_cpus(map, IPI_STOP));
354}
355
356#if defined(__amd64__) || defined(__i386__)
357int
358resume_cpus(cpuset_t map)
359{
360
361	return (generic_restart_cpus(map, IPI_SUSPEND));
362}
363#endif
364
365/*
366 * All-CPU rendezvous.  CPUs are signalled, all execute the setup function
367 * (if specified), rendezvous, execute the action function (if specified),
368 * rendezvous again, execute the teardown function (if specified), and then
369 * resume.
370 *
371 * Note that the supplied external functions _must_ be reentrant and aware
372 * that they are running in parallel and in an unknown lock context.
373 */
374void
375smp_rendezvous_action(void)
376{
377	struct thread *td;
378	void *local_func_arg;
379	void (*local_setup_func)(void*);
380	void (*local_action_func)(void*);
381	void (*local_teardown_func)(void*);
382#ifdef INVARIANTS
383	int owepreempt;
384#endif
385
386	/* Ensure we have up-to-date values. */
387	atomic_add_acq_int(&smp_rv_waiters[0], 1);
388	while (smp_rv_waiters[0] < smp_rv_ncpus)
389		cpu_spinwait();
390
391	/* Fetch rendezvous parameters after acquire barrier. */
392	local_func_arg = smp_rv_func_arg;
393	local_setup_func = smp_rv_setup_func;
394	local_action_func = smp_rv_action_func;
395	local_teardown_func = smp_rv_teardown_func;
396
397	/*
398	 * Use a nested critical section to prevent any preemptions
399	 * from occurring during a rendezvous action routine.
400	 * Specifically, if a rendezvous handler is invoked via an IPI
401	 * and the interrupted thread was in the critical_exit()
402	 * function after setting td_critnest to 0 but before
403	 * performing a deferred preemption, this routine can be
404	 * invoked with td_critnest set to 0 and td_owepreempt true.
405	 * In that case, a critical_exit() during the rendezvous
406	 * action would trigger a preemption which is not permitted in
407	 * a rendezvous action.  To fix this, wrap all of the
408	 * rendezvous action handlers in a critical section.  We
409	 * cannot use a regular critical section however as having
410	 * critical_exit() preempt from this routine would also be
411	 * problematic (the preemption must not occur before the IPI
412	 * has been acknowledged via an EOI).  Instead, we
413	 * intentionally ignore td_owepreempt when leaving the
414	 * critical section.  This should be harmless because we do
415	 * not permit rendezvous action routines to schedule threads,
416	 * and thus td_owepreempt should never transition from 0 to 1
417	 * during this routine.
418	 */
419	td = curthread;
420	td->td_critnest++;
421#ifdef INVARIANTS
422	owepreempt = td->td_owepreempt;
423#endif
424
425	/*
426	 * If requested, run a setup function before the main action
427	 * function.  Ensure all CPUs have completed the setup
428	 * function before moving on to the action function.
429	 */
430	if (local_setup_func != smp_no_rendevous_barrier) {
431		if (smp_rv_setup_func != NULL)
432			smp_rv_setup_func(smp_rv_func_arg);
433		atomic_add_int(&smp_rv_waiters[1], 1);
434		while (smp_rv_waiters[1] < smp_rv_ncpus)
435                	cpu_spinwait();
436	}
437
438	if (local_action_func != NULL)
439		local_action_func(local_func_arg);
440
441	if (local_teardown_func != smp_no_rendevous_barrier) {
442		/*
443		 * Signal that the main action has been completed.  If a
444		 * full exit rendezvous is requested, then all CPUs will
445		 * wait here until all CPUs have finished the main action.
446		 */
447		atomic_add_int(&smp_rv_waiters[2], 1);
448		while (smp_rv_waiters[2] < smp_rv_ncpus)
449			cpu_spinwait();
450
451		if (local_teardown_func != NULL)
452			local_teardown_func(local_func_arg);
453	}
454
455	/*
456	 * Signal that the rendezvous is fully completed by this CPU.
457	 * This means that no member of smp_rv_* pseudo-structure will be
458	 * accessed by this target CPU after this point; in particular,
459	 * memory pointed by smp_rv_func_arg.
460	 */
461	atomic_add_int(&smp_rv_waiters[3], 1);
462
463	td->td_critnest--;
464	KASSERT(owepreempt == td->td_owepreempt,
465	    ("rendezvous action changed td_owepreempt"));
466}
467
468void
469smp_rendezvous_cpus(cpuset_t map,
470	void (* setup_func)(void *),
471	void (* action_func)(void *),
472	void (* teardown_func)(void *),
473	void *arg)
474{
475	int curcpumap, i, ncpus = 0;
476
477	/* Look comments in the !SMP case. */
478	if (!smp_started) {
479		spinlock_enter();
480		if (setup_func != NULL)
481			setup_func(arg);
482		if (action_func != NULL)
483			action_func(arg);
484		if (teardown_func != NULL)
485			teardown_func(arg);
486		spinlock_exit();
487		return;
488	}
489
490	CPU_FOREACH(i) {
491		if (CPU_ISSET(i, &map))
492			ncpus++;
493	}
494	if (ncpus == 0)
495		panic("ncpus is 0 with non-zero map");
496
497	mtx_lock_spin(&smp_ipi_mtx);
498
499	/* Pass rendezvous parameters via global variables. */
500	smp_rv_ncpus = ncpus;
501	smp_rv_setup_func = setup_func;
502	smp_rv_action_func = action_func;
503	smp_rv_teardown_func = teardown_func;
504	smp_rv_func_arg = arg;
505	smp_rv_waiters[1] = 0;
506	smp_rv_waiters[2] = 0;
507	smp_rv_waiters[3] = 0;
508	atomic_store_rel_int(&smp_rv_waiters[0], 0);
509
510	/*
511	 * Signal other processors, which will enter the IPI with
512	 * interrupts off.
513	 */
514	curcpumap = CPU_ISSET(curcpu, &map);
515	CPU_CLR(curcpu, &map);
516	ipi_selected(map, IPI_RENDEZVOUS);
517
518	/* Check if the current CPU is in the map */
519	if (curcpumap != 0)
520		smp_rendezvous_action();
521
522	/*
523	 * Ensure that the master CPU waits for all the other
524	 * CPUs to finish the rendezvous, so that smp_rv_*
525	 * pseudo-structure and the arg are guaranteed to not
526	 * be in use.
527	 */
528	while (atomic_load_acq_int(&smp_rv_waiters[3]) < ncpus)
529		cpu_spinwait();
530
531	mtx_unlock_spin(&smp_ipi_mtx);
532}
533
534void
535smp_rendezvous(void (* setup_func)(void *),
536	       void (* action_func)(void *),
537	       void (* teardown_func)(void *),
538	       void *arg)
539{
540	smp_rendezvous_cpus(all_cpus, setup_func, action_func, teardown_func, arg);
541}
542
543static struct cpu_group group[MAXCPU];
544
545struct cpu_group *
546smp_topo(void)
547{
548	char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ];
549	struct cpu_group *top;
550
551	/*
552	 * Check for a fake topology request for debugging purposes.
553	 */
554	switch (smp_topology) {
555	case 1:
556		/* Dual core with no sharing.  */
557		top = smp_topo_1level(CG_SHARE_NONE, 2, 0);
558		break;
559	case 2:
560		/* No topology, all cpus are equal. */
561		top = smp_topo_none();
562		break;
563	case 3:
564		/* Dual core with shared L2.  */
565		top = smp_topo_1level(CG_SHARE_L2, 2, 0);
566		break;
567	case 4:
568		/* quad core, shared l3 among each package, private l2.  */
569		top = smp_topo_1level(CG_SHARE_L3, 4, 0);
570		break;
571	case 5:
572		/* quad core,  2 dualcore parts on each package share l2.  */
573		top = smp_topo_2level(CG_SHARE_NONE, 2, CG_SHARE_L2, 2, 0);
574		break;
575	case 6:
576		/* Single-core 2xHTT */
577		top = smp_topo_1level(CG_SHARE_L1, 2, CG_FLAG_HTT);
578		break;
579	case 7:
580		/* quad core with a shared l3, 8 threads sharing L2.  */
581		top = smp_topo_2level(CG_SHARE_L3, 4, CG_SHARE_L2, 8,
582		    CG_FLAG_SMT);
583		break;
584	default:
585		/* Default, ask the system what it wants. */
586		top = cpu_topo();
587		break;
588	}
589	/*
590	 * Verify the returned topology.
591	 */
592	if (top->cg_count != mp_ncpus)
593		panic("Built bad topology at %p.  CPU count %d != %d",
594		    top, top->cg_count, mp_ncpus);
595	if (CPU_CMP(&top->cg_mask, &all_cpus))
596		panic("Built bad topology at %p.  CPU mask (%s) != (%s)",
597		    top, cpusetobj_strprint(cpusetbuf, &top->cg_mask),
598		    cpusetobj_strprint(cpusetbuf2, &all_cpus));
599	return (top);
600}
601
602struct cpu_group *
603smp_topo_none(void)
604{
605	struct cpu_group *top;
606
607	top = &group[0];
608	top->cg_parent = NULL;
609	top->cg_child = NULL;
610	top->cg_mask = all_cpus;
611	top->cg_count = mp_ncpus;
612	top->cg_children = 0;
613	top->cg_level = CG_SHARE_NONE;
614	top->cg_flags = 0;
615
616	return (top);
617}
618
619static int
620smp_topo_addleaf(struct cpu_group *parent, struct cpu_group *child, int share,
621    int count, int flags, int start)
622{
623	char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ];
624	cpuset_t mask;
625	int i;
626
627	CPU_ZERO(&mask);
628	for (i = 0; i < count; i++, start++)
629		CPU_SET(start, &mask);
630	child->cg_parent = parent;
631	child->cg_child = NULL;
632	child->cg_children = 0;
633	child->cg_level = share;
634	child->cg_count = count;
635	child->cg_flags = flags;
636	child->cg_mask = mask;
637	parent->cg_children++;
638	for (; parent != NULL; parent = parent->cg_parent) {
639		if (CPU_OVERLAP(&parent->cg_mask, &child->cg_mask))
640			panic("Duplicate children in %p.  mask (%s) child (%s)",
641			    parent,
642			    cpusetobj_strprint(cpusetbuf, &parent->cg_mask),
643			    cpusetobj_strprint(cpusetbuf2, &child->cg_mask));
644		CPU_OR(&parent->cg_mask, &child->cg_mask);
645		parent->cg_count += child->cg_count;
646	}
647
648	return (start);
649}
650
651struct cpu_group *
652smp_topo_1level(int share, int count, int flags)
653{
654	struct cpu_group *child;
655	struct cpu_group *top;
656	int packages;
657	int cpu;
658	int i;
659
660	cpu = 0;
661	top = &group[0];
662	packages = mp_ncpus / count;
663	top->cg_child = child = &group[1];
664	top->cg_level = CG_SHARE_NONE;
665	for (i = 0; i < packages; i++, child++)
666		cpu = smp_topo_addleaf(top, child, share, count, flags, cpu);
667	return (top);
668}
669
670struct cpu_group *
671smp_topo_2level(int l2share, int l2count, int l1share, int l1count,
672    int l1flags)
673{
674	struct cpu_group *top;
675	struct cpu_group *l1g;
676	struct cpu_group *l2g;
677	int cpu;
678	int i;
679	int j;
680
681	cpu = 0;
682	top = &group[0];
683	l2g = &group[1];
684	top->cg_child = l2g;
685	top->cg_level = CG_SHARE_NONE;
686	top->cg_children = mp_ncpus / (l2count * l1count);
687	l1g = l2g + top->cg_children;
688	for (i = 0; i < top->cg_children; i++, l2g++) {
689		l2g->cg_parent = top;
690		l2g->cg_child = l1g;
691		l2g->cg_level = l2share;
692		for (j = 0; j < l2count; j++, l1g++)
693			cpu = smp_topo_addleaf(l2g, l1g, l1share, l1count,
694			    l1flags, cpu);
695	}
696	return (top);
697}
698
699
700struct cpu_group *
701smp_topo_find(struct cpu_group *top, int cpu)
702{
703	struct cpu_group *cg;
704	cpuset_t mask;
705	int children;
706	int i;
707
708	CPU_SETOF(cpu, &mask);
709	cg = top;
710	for (;;) {
711		if (!CPU_OVERLAP(&cg->cg_mask, &mask))
712			return (NULL);
713		if (cg->cg_children == 0)
714			return (cg);
715		children = cg->cg_children;
716		for (i = 0, cg = cg->cg_child; i < children; cg++, i++)
717			if (CPU_OVERLAP(&cg->cg_mask, &mask))
718				break;
719	}
720	return (NULL);
721}
722#else /* !SMP */
723
724void
725smp_rendezvous_cpus(cpuset_t map,
726	void (*setup_func)(void *),
727	void (*action_func)(void *),
728	void (*teardown_func)(void *),
729	void *arg)
730{
731	/*
732	 * In the !SMP case we just need to ensure the same initial conditions
733	 * as the SMP case.
734	 */
735	spinlock_enter();
736	if (setup_func != NULL)
737		setup_func(arg);
738	if (action_func != NULL)
739		action_func(arg);
740	if (teardown_func != NULL)
741		teardown_func(arg);
742	spinlock_exit();
743}
744
745void
746smp_rendezvous(void (*setup_func)(void *),
747	       void (*action_func)(void *),
748	       void (*teardown_func)(void *),
749	       void *arg)
750{
751
752	/* Look comments in the smp_rendezvous_cpus() case. */
753	spinlock_enter();
754	if (setup_func != NULL)
755		setup_func(arg);
756	if (action_func != NULL)
757		action_func(arg);
758	if (teardown_func != NULL)
759		teardown_func(arg);
760	spinlock_exit();
761}
762
763/*
764 * Provide dummy SMP support for UP kernels.  Modules that need to use SMP
765 * APIs will still work using this dummy support.
766 */
767static void
768mp_setvariables_for_up(void *dummy)
769{
770	mp_ncpus = 1;
771	mp_maxid = PCPU_GET(cpuid);
772	CPU_SETOF(mp_maxid, &all_cpus);
773	KASSERT(PCPU_GET(cpuid) == 0, ("UP must have a CPU ID of zero"));
774}
775SYSINIT(cpu_mp_setvariables, SI_SUB_TUNABLES, SI_ORDER_FIRST,
776    mp_setvariables_for_up, NULL);
777#endif /* SMP */
778
779void
780smp_no_rendevous_barrier(void *dummy)
781{
782#ifdef SMP
783	KASSERT((!smp_started),("smp_no_rendevous called and smp is started"));
784#endif
785}
786
787/*
788 * Wait specified idle threads to switch once.  This ensures that even
789 * preempted threads have cycled through the switch function once,
790 * exiting their codepaths.  This allows us to change global pointers
791 * with no other synchronization.
792 */
793int
794quiesce_cpus(cpuset_t map, const char *wmesg, int prio)
795{
796	struct pcpu *pcpu;
797	u_int gen[MAXCPU];
798	int error;
799	int cpu;
800
801	error = 0;
802	for (cpu = 0; cpu <= mp_maxid; cpu++) {
803		if (!CPU_ISSET(cpu, &map) || CPU_ABSENT(cpu))
804			continue;
805		pcpu = pcpu_find(cpu);
806		gen[cpu] = pcpu->pc_idlethread->td_generation;
807	}
808	for (cpu = 0; cpu <= mp_maxid; cpu++) {
809		if (!CPU_ISSET(cpu, &map) || CPU_ABSENT(cpu))
810			continue;
811		pcpu = pcpu_find(cpu);
812		thread_lock(curthread);
813		sched_bind(curthread, cpu);
814		thread_unlock(curthread);
815		while (gen[cpu] == pcpu->pc_idlethread->td_generation) {
816			error = tsleep(quiesce_cpus, prio, wmesg, 1);
817			if (error != EWOULDBLOCK)
818				goto out;
819			error = 0;
820		}
821	}
822out:
823	thread_lock(curthread);
824	sched_unbind(curthread);
825	thread_unlock(curthread);
826
827	return (error);
828}
829
830int
831quiesce_all_cpus(const char *wmesg, int prio)
832{
833
834	return quiesce_cpus(all_cpus, wmesg, prio);
835}
836
837/* Extra care is taken with this sysctl because the data type is volatile */
838static int
839sysctl_kern_smp_active(SYSCTL_HANDLER_ARGS)
840{
841	int error, active;
842
843	active = smp_started;
844	error = SYSCTL_OUT(req, &active, sizeof(active));
845	return (error);
846}
847
848