Deleted Added
full compact
subr_smp.c (244444) subr_smp.c (255726)
1/*-
2 * Copyright (c) 2001, John Baldwin <jhb@FreeBSD.org>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30/*
31 * This module holds the global variables and machine independent functions
32 * used for the kernel SMP support.
33 */
34
35#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2001, John Baldwin <jhb@FreeBSD.org>.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30/*
31 * This module holds the global variables and machine independent functions
32 * used for the kernel SMP support.
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: head/sys/kern/subr_smp.c 244444 2012-12-19 20:08:06Z jeff $");
36__FBSDID("$FreeBSD: head/sys/kern/subr_smp.c 255726 2013-09-20 05:06:03Z gibbs $");
37
38#include <sys/param.h>
39#include <sys/systm.h>
40#include <sys/kernel.h>
41#include <sys/ktr.h>
42#include <sys/proc.h>
43#include <sys/bus.h>
44#include <sys/lock.h>
45#include <sys/mutex.h>
46#include <sys/pcpu.h>
47#include <sys/sched.h>
48#include <sys/smp.h>
49#include <sys/sysctl.h>
50
51#include <machine/cpu.h>
52#include <machine/smp.h>
53
54#include "opt_sched.h"
55
56#ifdef SMP
57volatile cpuset_t stopped_cpus;
58volatile cpuset_t started_cpus;
59volatile cpuset_t suspended_cpus;
60cpuset_t hlt_cpus_mask;
61cpuset_t logical_cpus_mask;
62
63void (*cpustop_restartfunc)(void);
64#endif
65/* This is used in modules that need to work in both SMP and UP. */
66cpuset_t all_cpus;
67
68int mp_ncpus;
69/* export this for libkvm consumers. */
70int mp_maxcpus = MAXCPU;
71
72volatile int smp_started;
73u_int mp_maxid;
74
75static SYSCTL_NODE(_kern, OID_AUTO, smp, CTLFLAG_RD|CTLFLAG_CAPRD, NULL,
76 "Kernel SMP");
77
78SYSCTL_INT(_kern_smp, OID_AUTO, maxid, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_maxid, 0,
79 "Max CPU ID.");
80
81SYSCTL_INT(_kern_smp, OID_AUTO, maxcpus, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_maxcpus,
82 0, "Max number of CPUs that the system was compiled for.");
83
84int smp_active = 0; /* are the APs allowed to run? */
85SYSCTL_INT(_kern_smp, OID_AUTO, active, CTLFLAG_RW, &smp_active, 0,
86 "Number of Auxillary Processors (APs) that were successfully started");
87
88int smp_disabled = 0; /* has smp been disabled? */
89SYSCTL_INT(_kern_smp, OID_AUTO, disabled, CTLFLAG_RDTUN|CTLFLAG_CAPRD,
90 &smp_disabled, 0, "SMP has been disabled from the loader");
91TUNABLE_INT("kern.smp.disabled", &smp_disabled);
92
93int smp_cpus = 1; /* how many cpu's running */
94SYSCTL_INT(_kern_smp, OID_AUTO, cpus, CTLFLAG_RD|CTLFLAG_CAPRD, &smp_cpus, 0,
95 "Number of CPUs online");
96
97int smp_topology = 0; /* Which topology we're using. */
98SYSCTL_INT(_kern_smp, OID_AUTO, topology, CTLFLAG_RD, &smp_topology, 0,
99 "Topology override setting; 0 is default provided by hardware.");
100TUNABLE_INT("kern.smp.topology", &smp_topology);
101
102#ifdef SMP
103/* Enable forwarding of a signal to a process running on a different CPU */
104static int forward_signal_enabled = 1;
105SYSCTL_INT(_kern_smp, OID_AUTO, forward_signal_enabled, CTLFLAG_RW,
106 &forward_signal_enabled, 0,
107 "Forwarding of a signal to a process on a different CPU");
108
109/* Variables needed for SMP rendezvous. */
110static volatile int smp_rv_ncpus;
111static void (*volatile smp_rv_setup_func)(void *arg);
112static void (*volatile smp_rv_action_func)(void *arg);
113static void (*volatile smp_rv_teardown_func)(void *arg);
114static void *volatile smp_rv_func_arg;
115static volatile int smp_rv_waiters[4];
116
117/*
118 * Shared mutex to restrict busywaits between smp_rendezvous() and
119 * smp(_targeted)_tlb_shootdown(). A deadlock occurs if both of these
120 * functions trigger at once and cause multiple CPUs to busywait with
121 * interrupts disabled.
122 */
123struct mtx smp_ipi_mtx;
124
125/*
126 * Let the MD SMP code initialize mp_maxid very early if it can.
127 */
128static void
129mp_setmaxid(void *dummy)
130{
131 cpu_mp_setmaxid();
132}
133SYSINIT(cpu_mp_setmaxid, SI_SUB_TUNABLES, SI_ORDER_FIRST, mp_setmaxid, NULL);
134
135/*
136 * Call the MD SMP initialization code.
137 */
138static void
139mp_start(void *dummy)
140{
141
142 mtx_init(&smp_ipi_mtx, "smp rendezvous", NULL, MTX_SPIN);
143
144 /* Probe for MP hardware. */
145 if (smp_disabled != 0 || cpu_mp_probe() == 0) {
146 mp_ncpus = 1;
147 CPU_SETOF(PCPU_GET(cpuid), &all_cpus);
148 return;
149 }
150
151 cpu_mp_start();
152 printf("FreeBSD/SMP: Multiprocessor System Detected: %d CPUs\n",
153 mp_ncpus);
154 cpu_mp_announce();
155}
156SYSINIT(cpu_mp, SI_SUB_CPU, SI_ORDER_THIRD, mp_start, NULL);
157
158void
159forward_signal(struct thread *td)
160{
161 int id;
162
163 /*
164 * signotify() has already set TDF_ASTPENDING and TDF_NEEDSIGCHECK on
165 * this thread, so all we need to do is poke it if it is currently
166 * executing so that it executes ast().
167 */
168 THREAD_LOCK_ASSERT(td, MA_OWNED);
169 KASSERT(TD_IS_RUNNING(td),
170 ("forward_signal: thread is not TDS_RUNNING"));
171
172 CTR1(KTR_SMP, "forward_signal(%p)", td->td_proc);
173
174 if (!smp_started || cold || panicstr)
175 return;
176 if (!forward_signal_enabled)
177 return;
178
179 /* No need to IPI ourself. */
180 if (td == curthread)
181 return;
182
183 id = td->td_oncpu;
184 if (id == NOCPU)
185 return;
186 ipi_cpu(id, IPI_AST);
187}
188
189/*
190 * When called the executing CPU will send an IPI to all other CPUs
191 * requesting that they halt execution.
192 *
193 * Usually (but not necessarily) called with 'other_cpus' as its arg.
194 *
195 * - Signals all CPUs in map to stop.
196 * - Waits for each to stop.
197 *
198 * Returns:
199 * -1: error
200 * 0: NA
201 * 1: ok
202 *
203 */
204static int
205generic_stop_cpus(cpuset_t map, u_int type)
206{
207#ifdef KTR
208 char cpusetbuf[CPUSETBUFSIZ];
209#endif
210 static volatile u_int stopping_cpu = NOCPU;
211 int i;
212 volatile cpuset_t *cpus;
213
214 KASSERT(
215#if defined(__amd64__) || defined(__i386__)
216 type == IPI_STOP || type == IPI_STOP_HARD || type == IPI_SUSPEND,
217#else
218 type == IPI_STOP || type == IPI_STOP_HARD,
219#endif
220 ("%s: invalid stop type", __func__));
221
222 if (!smp_started)
223 return (0);
224
225 CTR2(KTR_SMP, "stop_cpus(%s) with %u type",
226 cpusetobj_strprint(cpusetbuf, &map), type);
227
37
38#include <sys/param.h>
39#include <sys/systm.h>
40#include <sys/kernel.h>
41#include <sys/ktr.h>
42#include <sys/proc.h>
43#include <sys/bus.h>
44#include <sys/lock.h>
45#include <sys/mutex.h>
46#include <sys/pcpu.h>
47#include <sys/sched.h>
48#include <sys/smp.h>
49#include <sys/sysctl.h>
50
51#include <machine/cpu.h>
52#include <machine/smp.h>
53
54#include "opt_sched.h"
55
56#ifdef SMP
57volatile cpuset_t stopped_cpus;
58volatile cpuset_t started_cpus;
59volatile cpuset_t suspended_cpus;
60cpuset_t hlt_cpus_mask;
61cpuset_t logical_cpus_mask;
62
63void (*cpustop_restartfunc)(void);
64#endif
65/* This is used in modules that need to work in both SMP and UP. */
66cpuset_t all_cpus;
67
68int mp_ncpus;
69/* export this for libkvm consumers. */
70int mp_maxcpus = MAXCPU;
71
72volatile int smp_started;
73u_int mp_maxid;
74
75static SYSCTL_NODE(_kern, OID_AUTO, smp, CTLFLAG_RD|CTLFLAG_CAPRD, NULL,
76 "Kernel SMP");
77
78SYSCTL_INT(_kern_smp, OID_AUTO, maxid, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_maxid, 0,
79 "Max CPU ID.");
80
81SYSCTL_INT(_kern_smp, OID_AUTO, maxcpus, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_maxcpus,
82 0, "Max number of CPUs that the system was compiled for.");
83
84int smp_active = 0; /* are the APs allowed to run? */
85SYSCTL_INT(_kern_smp, OID_AUTO, active, CTLFLAG_RW, &smp_active, 0,
86 "Number of Auxillary Processors (APs) that were successfully started");
87
88int smp_disabled = 0; /* has smp been disabled? */
89SYSCTL_INT(_kern_smp, OID_AUTO, disabled, CTLFLAG_RDTUN|CTLFLAG_CAPRD,
90 &smp_disabled, 0, "SMP has been disabled from the loader");
91TUNABLE_INT("kern.smp.disabled", &smp_disabled);
92
93int smp_cpus = 1; /* how many cpu's running */
94SYSCTL_INT(_kern_smp, OID_AUTO, cpus, CTLFLAG_RD|CTLFLAG_CAPRD, &smp_cpus, 0,
95 "Number of CPUs online");
96
97int smp_topology = 0; /* Which topology we're using. */
98SYSCTL_INT(_kern_smp, OID_AUTO, topology, CTLFLAG_RD, &smp_topology, 0,
99 "Topology override setting; 0 is default provided by hardware.");
100TUNABLE_INT("kern.smp.topology", &smp_topology);
101
102#ifdef SMP
103/* Enable forwarding of a signal to a process running on a different CPU */
104static int forward_signal_enabled = 1;
105SYSCTL_INT(_kern_smp, OID_AUTO, forward_signal_enabled, CTLFLAG_RW,
106 &forward_signal_enabled, 0,
107 "Forwarding of a signal to a process on a different CPU");
108
109/* Variables needed for SMP rendezvous. */
110static volatile int smp_rv_ncpus;
111static void (*volatile smp_rv_setup_func)(void *arg);
112static void (*volatile smp_rv_action_func)(void *arg);
113static void (*volatile smp_rv_teardown_func)(void *arg);
114static void *volatile smp_rv_func_arg;
115static volatile int smp_rv_waiters[4];
116
117/*
118 * Shared mutex to restrict busywaits between smp_rendezvous() and
119 * smp(_targeted)_tlb_shootdown(). A deadlock occurs if both of these
120 * functions trigger at once and cause multiple CPUs to busywait with
121 * interrupts disabled.
122 */
123struct mtx smp_ipi_mtx;
124
125/*
126 * Let the MD SMP code initialize mp_maxid very early if it can.
127 */
128static void
129mp_setmaxid(void *dummy)
130{
131 cpu_mp_setmaxid();
132}
133SYSINIT(cpu_mp_setmaxid, SI_SUB_TUNABLES, SI_ORDER_FIRST, mp_setmaxid, NULL);
134
135/*
136 * Call the MD SMP initialization code.
137 */
138static void
139mp_start(void *dummy)
140{
141
142 mtx_init(&smp_ipi_mtx, "smp rendezvous", NULL, MTX_SPIN);
143
144 /* Probe for MP hardware. */
145 if (smp_disabled != 0 || cpu_mp_probe() == 0) {
146 mp_ncpus = 1;
147 CPU_SETOF(PCPU_GET(cpuid), &all_cpus);
148 return;
149 }
150
151 cpu_mp_start();
152 printf("FreeBSD/SMP: Multiprocessor System Detected: %d CPUs\n",
153 mp_ncpus);
154 cpu_mp_announce();
155}
156SYSINIT(cpu_mp, SI_SUB_CPU, SI_ORDER_THIRD, mp_start, NULL);
157
158void
159forward_signal(struct thread *td)
160{
161 int id;
162
163 /*
164 * signotify() has already set TDF_ASTPENDING and TDF_NEEDSIGCHECK on
165 * this thread, so all we need to do is poke it if it is currently
166 * executing so that it executes ast().
167 */
168 THREAD_LOCK_ASSERT(td, MA_OWNED);
169 KASSERT(TD_IS_RUNNING(td),
170 ("forward_signal: thread is not TDS_RUNNING"));
171
172 CTR1(KTR_SMP, "forward_signal(%p)", td->td_proc);
173
174 if (!smp_started || cold || panicstr)
175 return;
176 if (!forward_signal_enabled)
177 return;
178
179 /* No need to IPI ourself. */
180 if (td == curthread)
181 return;
182
183 id = td->td_oncpu;
184 if (id == NOCPU)
185 return;
186 ipi_cpu(id, IPI_AST);
187}
188
189/*
190 * When called the executing CPU will send an IPI to all other CPUs
191 * requesting that they halt execution.
192 *
193 * Usually (but not necessarily) called with 'other_cpus' as its arg.
194 *
195 * - Signals all CPUs in map to stop.
196 * - Waits for each to stop.
197 *
198 * Returns:
199 * -1: error
200 * 0: NA
201 * 1: ok
202 *
203 */
204static int
205generic_stop_cpus(cpuset_t map, u_int type)
206{
207#ifdef KTR
208 char cpusetbuf[CPUSETBUFSIZ];
209#endif
210 static volatile u_int stopping_cpu = NOCPU;
211 int i;
212 volatile cpuset_t *cpus;
213
214 KASSERT(
215#if defined(__amd64__) || defined(__i386__)
216 type == IPI_STOP || type == IPI_STOP_HARD || type == IPI_SUSPEND,
217#else
218 type == IPI_STOP || type == IPI_STOP_HARD,
219#endif
220 ("%s: invalid stop type", __func__));
221
222 if (!smp_started)
223 return (0);
224
225 CTR2(KTR_SMP, "stop_cpus(%s) with %u type",
226 cpusetobj_strprint(cpusetbuf, &map), type);
227
228#ifdef XENHVM
229 /*
230 * When migrating a PVHVM domain we need to make sure there are
231 * no IPIs in progress. IPIs that have been issued, but not
232 * yet delivered (not pending on a vCPU) will be lost in the
233 * IPI rebinding process, violating FreeBSD's assumption of
234 * reliable IPI delivery.
235 */
236 if (type == IPI_SUSPEND)
237 mtx_lock_spin(&smp_ipi_mtx);
238#endif
239
228 if (stopping_cpu != PCPU_GET(cpuid))
229 while (atomic_cmpset_int(&stopping_cpu, NOCPU,
230 PCPU_GET(cpuid)) == 0)
231 while (stopping_cpu != NOCPU)
232 cpu_spinwait(); /* spin */
233
234 /* send the stop IPI to all CPUs in map */
235 ipi_selected(map, type);
236
237#if defined(__amd64__) || defined(__i386__)
238 if (type == IPI_SUSPEND)
239 cpus = &suspended_cpus;
240 else
241#endif
242 cpus = &stopped_cpus;
243
244 i = 0;
245 while (!CPU_SUBSET(cpus, &map)) {
246 /* spin */
247 cpu_spinwait();
248 i++;
249 if (i == 100000000) {
250 printf("timeout stopping cpus\n");
251 break;
252 }
253 }
254
240 if (stopping_cpu != PCPU_GET(cpuid))
241 while (atomic_cmpset_int(&stopping_cpu, NOCPU,
242 PCPU_GET(cpuid)) == 0)
243 while (stopping_cpu != NOCPU)
244 cpu_spinwait(); /* spin */
245
246 /* send the stop IPI to all CPUs in map */
247 ipi_selected(map, type);
248
249#if defined(__amd64__) || defined(__i386__)
250 if (type == IPI_SUSPEND)
251 cpus = &suspended_cpus;
252 else
253#endif
254 cpus = &stopped_cpus;
255
256 i = 0;
257 while (!CPU_SUBSET(cpus, &map)) {
258 /* spin */
259 cpu_spinwait();
260 i++;
261 if (i == 100000000) {
262 printf("timeout stopping cpus\n");
263 break;
264 }
265 }
266
267#ifdef XENHVM
268 if (type == IPI_SUSPEND)
269 mtx_unlock_spin(&smp_ipi_mtx);
270#endif
271
255 stopping_cpu = NOCPU;
256 return (1);
257}
258
259int
260stop_cpus(cpuset_t map)
261{
262
263 return (generic_stop_cpus(map, IPI_STOP));
264}
265
266int
267stop_cpus_hard(cpuset_t map)
268{
269
270 return (generic_stop_cpus(map, IPI_STOP_HARD));
271}
272
273#if defined(__amd64__) || defined(__i386__)
274int
275suspend_cpus(cpuset_t map)
276{
277
278 return (generic_stop_cpus(map, IPI_SUSPEND));
279}
280#endif
281
282/*
283 * Called by a CPU to restart stopped CPUs.
284 *
285 * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
286 *
287 * - Signals all CPUs in map to restart.
288 * - Waits for each to restart.
289 *
290 * Returns:
291 * -1: error
292 * 0: NA
293 * 1: ok
294 */
272 stopping_cpu = NOCPU;
273 return (1);
274}
275
276int
277stop_cpus(cpuset_t map)
278{
279
280 return (generic_stop_cpus(map, IPI_STOP));
281}
282
283int
284stop_cpus_hard(cpuset_t map)
285{
286
287 return (generic_stop_cpus(map, IPI_STOP_HARD));
288}
289
290#if defined(__amd64__) || defined(__i386__)
291int
292suspend_cpus(cpuset_t map)
293{
294
295 return (generic_stop_cpus(map, IPI_SUSPEND));
296}
297#endif
298
299/*
300 * Called by a CPU to restart stopped CPUs.
301 *
302 * Usually (but not necessarily) called with 'stopped_cpus' as its arg.
303 *
304 * - Signals all CPUs in map to restart.
305 * - Waits for each to restart.
306 *
307 * Returns:
308 * -1: error
309 * 0: NA
310 * 1: ok
311 */
295int
296restart_cpus(cpuset_t map)
312static int
313generic_restart_cpus(cpuset_t map, u_int type)
297{
298#ifdef KTR
299 char cpusetbuf[CPUSETBUFSIZ];
300#endif
314{
315#ifdef KTR
316 char cpusetbuf[CPUSETBUFSIZ];
317#endif
318 volatile cpuset_t *cpus;
301
319
320 KASSERT(
321#if defined(__amd64__) || defined(__i386__)
322 type == IPI_STOP || type == IPI_STOP_HARD || type == IPI_SUSPEND,
323#else
324 type == IPI_STOP || type == IPI_STOP_HARD,
325#endif
326 ("%s: invalid stop type", __func__));
327
302 if (!smp_started)
303 return 0;
304
305 CTR1(KTR_SMP, "restart_cpus(%s)", cpusetobj_strprint(cpusetbuf, &map));
306
328 if (!smp_started)
329 return 0;
330
331 CTR1(KTR_SMP, "restart_cpus(%s)", cpusetobj_strprint(cpusetbuf, &map));
332
333#if defined(__amd64__) || defined(__i386__)
334 if (type == IPI_SUSPEND)
335 cpus = &suspended_cpus;
336 else
337#endif
338 cpus = &stopped_cpus;
339
307 /* signal other cpus to restart */
308 CPU_COPY_STORE_REL(&map, &started_cpus);
309
310 /* wait for each to clear its bit */
340 /* signal other cpus to restart */
341 CPU_COPY_STORE_REL(&map, &started_cpus);
342
343 /* wait for each to clear its bit */
311 while (CPU_OVERLAP(&stopped_cpus, &map))
344 while (CPU_OVERLAP(cpus, &map))
312 cpu_spinwait();
313
314 return 1;
315}
316
345 cpu_spinwait();
346
347 return 1;
348}
349
350int
351restart_cpus(cpuset_t map)
352{
353
354 return (generic_restart_cpus(map, IPI_STOP));
355}
356
357#if defined(__amd64__) || defined(__i386__)
358int
359resume_cpus(cpuset_t map)
360{
361
362 return (generic_restart_cpus(map, IPI_SUSPEND));
363}
364#endif
365
317/*
318 * All-CPU rendezvous. CPUs are signalled, all execute the setup function
319 * (if specified), rendezvous, execute the action function (if specified),
320 * rendezvous again, execute the teardown function (if specified), and then
321 * resume.
322 *
323 * Note that the supplied external functions _must_ be reentrant and aware
324 * that they are running in parallel and in an unknown lock context.
325 */
326void
327smp_rendezvous_action(void)
328{
329 struct thread *td;
330 void *local_func_arg;
331 void (*local_setup_func)(void*);
332 void (*local_action_func)(void*);
333 void (*local_teardown_func)(void*);
334#ifdef INVARIANTS
335 int owepreempt;
336#endif
337
338 /* Ensure we have up-to-date values. */
339 atomic_add_acq_int(&smp_rv_waiters[0], 1);
340 while (smp_rv_waiters[0] < smp_rv_ncpus)
341 cpu_spinwait();
342
343 /* Fetch rendezvous parameters after acquire barrier. */
344 local_func_arg = smp_rv_func_arg;
345 local_setup_func = smp_rv_setup_func;
346 local_action_func = smp_rv_action_func;
347 local_teardown_func = smp_rv_teardown_func;
348
349 /*
350 * Use a nested critical section to prevent any preemptions
351 * from occurring during a rendezvous action routine.
352 * Specifically, if a rendezvous handler is invoked via an IPI
353 * and the interrupted thread was in the critical_exit()
354 * function after setting td_critnest to 0 but before
355 * performing a deferred preemption, this routine can be
356 * invoked with td_critnest set to 0 and td_owepreempt true.
357 * In that case, a critical_exit() during the rendezvous
358 * action would trigger a preemption which is not permitted in
359 * a rendezvous action. To fix this, wrap all of the
360 * rendezvous action handlers in a critical section. We
361 * cannot use a regular critical section however as having
362 * critical_exit() preempt from this routine would also be
363 * problematic (the preemption must not occur before the IPI
364 * has been acknowledged via an EOI). Instead, we
365 * intentionally ignore td_owepreempt when leaving the
366 * critical section. This should be harmless because we do
367 * not permit rendezvous action routines to schedule threads,
368 * and thus td_owepreempt should never transition from 0 to 1
369 * during this routine.
370 */
371 td = curthread;
372 td->td_critnest++;
373#ifdef INVARIANTS
374 owepreempt = td->td_owepreempt;
375#endif
376
377 /*
378 * If requested, run a setup function before the main action
379 * function. Ensure all CPUs have completed the setup
380 * function before moving on to the action function.
381 */
382 if (local_setup_func != smp_no_rendevous_barrier) {
383 if (smp_rv_setup_func != NULL)
384 smp_rv_setup_func(smp_rv_func_arg);
385 atomic_add_int(&smp_rv_waiters[1], 1);
386 while (smp_rv_waiters[1] < smp_rv_ncpus)
387 cpu_spinwait();
388 }
389
390 if (local_action_func != NULL)
391 local_action_func(local_func_arg);
392
393 if (local_teardown_func != smp_no_rendevous_barrier) {
394 /*
395 * Signal that the main action has been completed. If a
396 * full exit rendezvous is requested, then all CPUs will
397 * wait here until all CPUs have finished the main action.
398 */
399 atomic_add_int(&smp_rv_waiters[2], 1);
400 while (smp_rv_waiters[2] < smp_rv_ncpus)
401 cpu_spinwait();
402
403 if (local_teardown_func != NULL)
404 local_teardown_func(local_func_arg);
405 }
406
407 /*
408 * Signal that the rendezvous is fully completed by this CPU.
409 * This means that no member of smp_rv_* pseudo-structure will be
410 * accessed by this target CPU after this point; in particular,
411 * memory pointed by smp_rv_func_arg.
412 */
413 atomic_add_int(&smp_rv_waiters[3], 1);
414
415 td->td_critnest--;
416 KASSERT(owepreempt == td->td_owepreempt,
417 ("rendezvous action changed td_owepreempt"));
418}
419
420void
421smp_rendezvous_cpus(cpuset_t map,
422 void (* setup_func)(void *),
423 void (* action_func)(void *),
424 void (* teardown_func)(void *),
425 void *arg)
426{
427 int curcpumap, i, ncpus = 0;
428
429 /* Look comments in the !SMP case. */
430 if (!smp_started) {
431 spinlock_enter();
432 if (setup_func != NULL)
433 setup_func(arg);
434 if (action_func != NULL)
435 action_func(arg);
436 if (teardown_func != NULL)
437 teardown_func(arg);
438 spinlock_exit();
439 return;
440 }
441
442 CPU_FOREACH(i) {
443 if (CPU_ISSET(i, &map))
444 ncpus++;
445 }
446 if (ncpus == 0)
447 panic("ncpus is 0 with non-zero map");
448
449 mtx_lock_spin(&smp_ipi_mtx);
450
451 /* Pass rendezvous parameters via global variables. */
452 smp_rv_ncpus = ncpus;
453 smp_rv_setup_func = setup_func;
454 smp_rv_action_func = action_func;
455 smp_rv_teardown_func = teardown_func;
456 smp_rv_func_arg = arg;
457 smp_rv_waiters[1] = 0;
458 smp_rv_waiters[2] = 0;
459 smp_rv_waiters[3] = 0;
460 atomic_store_rel_int(&smp_rv_waiters[0], 0);
461
462 /*
463 * Signal other processors, which will enter the IPI with
464 * interrupts off.
465 */
466 curcpumap = CPU_ISSET(curcpu, &map);
467 CPU_CLR(curcpu, &map);
468 ipi_selected(map, IPI_RENDEZVOUS);
469
470 /* Check if the current CPU is in the map */
471 if (curcpumap != 0)
472 smp_rendezvous_action();
473
474 /*
475 * Ensure that the master CPU waits for all the other
476 * CPUs to finish the rendezvous, so that smp_rv_*
477 * pseudo-structure and the arg are guaranteed to not
478 * be in use.
479 */
480 while (atomic_load_acq_int(&smp_rv_waiters[3]) < ncpus)
481 cpu_spinwait();
482
483 mtx_unlock_spin(&smp_ipi_mtx);
484}
485
486void
487smp_rendezvous(void (* setup_func)(void *),
488 void (* action_func)(void *),
489 void (* teardown_func)(void *),
490 void *arg)
491{
492 smp_rendezvous_cpus(all_cpus, setup_func, action_func, teardown_func, arg);
493}
494
495static struct cpu_group group[MAXCPU];
496
497struct cpu_group *
498smp_topo(void)
499{
500 char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ];
501 struct cpu_group *top;
502
503 /*
504 * Check for a fake topology request for debugging purposes.
505 */
506 switch (smp_topology) {
507 case 1:
508 /* Dual core with no sharing. */
509 top = smp_topo_1level(CG_SHARE_NONE, 2, 0);
510 break;
511 case 2:
512 /* No topology, all cpus are equal. */
513 top = smp_topo_none();
514 break;
515 case 3:
516 /* Dual core with shared L2. */
517 top = smp_topo_1level(CG_SHARE_L2, 2, 0);
518 break;
519 case 4:
520 /* quad core, shared l3 among each package, private l2. */
521 top = smp_topo_1level(CG_SHARE_L3, 4, 0);
522 break;
523 case 5:
524 /* quad core, 2 dualcore parts on each package share l2. */
525 top = smp_topo_2level(CG_SHARE_NONE, 2, CG_SHARE_L2, 2, 0);
526 break;
527 case 6:
528 /* Single-core 2xHTT */
529 top = smp_topo_1level(CG_SHARE_L1, 2, CG_FLAG_HTT);
530 break;
531 case 7:
532 /* quad core with a shared l3, 8 threads sharing L2. */
533 top = smp_topo_2level(CG_SHARE_L3, 4, CG_SHARE_L2, 8,
534 CG_FLAG_SMT);
535 break;
536 default:
537 /* Default, ask the system what it wants. */
538 top = cpu_topo();
539 break;
540 }
541 /*
542 * Verify the returned topology.
543 */
544 if (top->cg_count != mp_ncpus)
545 panic("Built bad topology at %p. CPU count %d != %d",
546 top, top->cg_count, mp_ncpus);
547 if (CPU_CMP(&top->cg_mask, &all_cpus))
548 panic("Built bad topology at %p. CPU mask (%s) != (%s)",
549 top, cpusetobj_strprint(cpusetbuf, &top->cg_mask),
550 cpusetobj_strprint(cpusetbuf2, &all_cpus));
551 return (top);
552}
553
554struct cpu_group *
555smp_topo_none(void)
556{
557 struct cpu_group *top;
558
559 top = &group[0];
560 top->cg_parent = NULL;
561 top->cg_child = NULL;
562 top->cg_mask = all_cpus;
563 top->cg_count = mp_ncpus;
564 top->cg_children = 0;
565 top->cg_level = CG_SHARE_NONE;
566 top->cg_flags = 0;
567
568 return (top);
569}
570
571static int
572smp_topo_addleaf(struct cpu_group *parent, struct cpu_group *child, int share,
573 int count, int flags, int start)
574{
575 char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ];
576 cpuset_t mask;
577 int i;
578
579 CPU_ZERO(&mask);
580 for (i = 0; i < count; i++, start++)
581 CPU_SET(start, &mask);
582 child->cg_parent = parent;
583 child->cg_child = NULL;
584 child->cg_children = 0;
585 child->cg_level = share;
586 child->cg_count = count;
587 child->cg_flags = flags;
588 child->cg_mask = mask;
589 parent->cg_children++;
590 for (; parent != NULL; parent = parent->cg_parent) {
591 if (CPU_OVERLAP(&parent->cg_mask, &child->cg_mask))
592 panic("Duplicate children in %p. mask (%s) child (%s)",
593 parent,
594 cpusetobj_strprint(cpusetbuf, &parent->cg_mask),
595 cpusetobj_strprint(cpusetbuf2, &child->cg_mask));
596 CPU_OR(&parent->cg_mask, &child->cg_mask);
597 parent->cg_count += child->cg_count;
598 }
599
600 return (start);
601}
602
603struct cpu_group *
604smp_topo_1level(int share, int count, int flags)
605{
606 struct cpu_group *child;
607 struct cpu_group *top;
608 int packages;
609 int cpu;
610 int i;
611
612 cpu = 0;
613 top = &group[0];
614 packages = mp_ncpus / count;
615 top->cg_child = child = &group[1];
616 top->cg_level = CG_SHARE_NONE;
617 for (i = 0; i < packages; i++, child++)
618 cpu = smp_topo_addleaf(top, child, share, count, flags, cpu);
619 return (top);
620}
621
622struct cpu_group *
623smp_topo_2level(int l2share, int l2count, int l1share, int l1count,
624 int l1flags)
625{
626 struct cpu_group *top;
627 struct cpu_group *l1g;
628 struct cpu_group *l2g;
629 int cpu;
630 int i;
631 int j;
632
633 cpu = 0;
634 top = &group[0];
635 l2g = &group[1];
636 top->cg_child = l2g;
637 top->cg_level = CG_SHARE_NONE;
638 top->cg_children = mp_ncpus / (l2count * l1count);
639 l1g = l2g + top->cg_children;
640 for (i = 0; i < top->cg_children; i++, l2g++) {
641 l2g->cg_parent = top;
642 l2g->cg_child = l1g;
643 l2g->cg_level = l2share;
644 for (j = 0; j < l2count; j++, l1g++)
645 cpu = smp_topo_addleaf(l2g, l1g, l1share, l1count,
646 l1flags, cpu);
647 }
648 return (top);
649}
650
651
652struct cpu_group *
653smp_topo_find(struct cpu_group *top, int cpu)
654{
655 struct cpu_group *cg;
656 cpuset_t mask;
657 int children;
658 int i;
659
660 CPU_SETOF(cpu, &mask);
661 cg = top;
662 for (;;) {
663 if (!CPU_OVERLAP(&cg->cg_mask, &mask))
664 return (NULL);
665 if (cg->cg_children == 0)
666 return (cg);
667 children = cg->cg_children;
668 for (i = 0, cg = cg->cg_child; i < children; cg++, i++)
669 if (CPU_OVERLAP(&cg->cg_mask, &mask))
670 break;
671 }
672 return (NULL);
673}
674#else /* !SMP */
675
676void
677smp_rendezvous_cpus(cpuset_t map,
678 void (*setup_func)(void *),
679 void (*action_func)(void *),
680 void (*teardown_func)(void *),
681 void *arg)
682{
683 /*
684 * In the !SMP case we just need to ensure the same initial conditions
685 * as the SMP case.
686 */
687 spinlock_enter();
688 if (setup_func != NULL)
689 setup_func(arg);
690 if (action_func != NULL)
691 action_func(arg);
692 if (teardown_func != NULL)
693 teardown_func(arg);
694 spinlock_exit();
695}
696
697void
698smp_rendezvous(void (*setup_func)(void *),
699 void (*action_func)(void *),
700 void (*teardown_func)(void *),
701 void *arg)
702{
703
704 /* Look comments in the smp_rendezvous_cpus() case. */
705 spinlock_enter();
706 if (setup_func != NULL)
707 setup_func(arg);
708 if (action_func != NULL)
709 action_func(arg);
710 if (teardown_func != NULL)
711 teardown_func(arg);
712 spinlock_exit();
713}
714
715/*
716 * Provide dummy SMP support for UP kernels. Modules that need to use SMP
717 * APIs will still work using this dummy support.
718 */
719static void
720mp_setvariables_for_up(void *dummy)
721{
722 mp_ncpus = 1;
723 mp_maxid = PCPU_GET(cpuid);
724 CPU_SETOF(mp_maxid, &all_cpus);
725 KASSERT(PCPU_GET(cpuid) == 0, ("UP must have a CPU ID of zero"));
726}
727SYSINIT(cpu_mp_setvariables, SI_SUB_TUNABLES, SI_ORDER_FIRST,
728 mp_setvariables_for_up, NULL);
729#endif /* SMP */
730
731void
732smp_no_rendevous_barrier(void *dummy)
733{
734#ifdef SMP
735 KASSERT((!smp_started),("smp_no_rendevous called and smp is started"));
736#endif
737}
738
739/*
740 * Wait specified idle threads to switch once. This ensures that even
741 * preempted threads have cycled through the switch function once,
742 * exiting their codepaths. This allows us to change global pointers
743 * with no other synchronization.
744 */
745int
746quiesce_cpus(cpuset_t map, const char *wmesg, int prio)
747{
748 struct pcpu *pcpu;
749 u_int gen[MAXCPU];
750 int error;
751 int cpu;
752
753 error = 0;
754 for (cpu = 0; cpu <= mp_maxid; cpu++) {
755 if (!CPU_ISSET(cpu, &map) || CPU_ABSENT(cpu))
756 continue;
757 pcpu = pcpu_find(cpu);
758 gen[cpu] = pcpu->pc_idlethread->td_generation;
759 }
760 for (cpu = 0; cpu <= mp_maxid; cpu++) {
761 if (!CPU_ISSET(cpu, &map) || CPU_ABSENT(cpu))
762 continue;
763 pcpu = pcpu_find(cpu);
764 thread_lock(curthread);
765 sched_bind(curthread, cpu);
766 thread_unlock(curthread);
767 while (gen[cpu] == pcpu->pc_idlethread->td_generation) {
768 error = tsleep(quiesce_cpus, prio, wmesg, 1);
769 if (error != EWOULDBLOCK)
770 goto out;
771 error = 0;
772 }
773 }
774out:
775 thread_lock(curthread);
776 sched_unbind(curthread);
777 thread_unlock(curthread);
778
779 return (error);
780}
781
782int
783quiesce_all_cpus(const char *wmesg, int prio)
784{
785
786 return quiesce_cpus(all_cpus, wmesg, prio);
787}
366/*
367 * All-CPU rendezvous. CPUs are signalled, all execute the setup function
368 * (if specified), rendezvous, execute the action function (if specified),
369 * rendezvous again, execute the teardown function (if specified), and then
370 * resume.
371 *
372 * Note that the supplied external functions _must_ be reentrant and aware
373 * that they are running in parallel and in an unknown lock context.
374 */
375void
376smp_rendezvous_action(void)
377{
378 struct thread *td;
379 void *local_func_arg;
380 void (*local_setup_func)(void*);
381 void (*local_action_func)(void*);
382 void (*local_teardown_func)(void*);
383#ifdef INVARIANTS
384 int owepreempt;
385#endif
386
387 /* Ensure we have up-to-date values. */
388 atomic_add_acq_int(&smp_rv_waiters[0], 1);
389 while (smp_rv_waiters[0] < smp_rv_ncpus)
390 cpu_spinwait();
391
392 /* Fetch rendezvous parameters after acquire barrier. */
393 local_func_arg = smp_rv_func_arg;
394 local_setup_func = smp_rv_setup_func;
395 local_action_func = smp_rv_action_func;
396 local_teardown_func = smp_rv_teardown_func;
397
398 /*
399 * Use a nested critical section to prevent any preemptions
400 * from occurring during a rendezvous action routine.
401 * Specifically, if a rendezvous handler is invoked via an IPI
402 * and the interrupted thread was in the critical_exit()
403 * function after setting td_critnest to 0 but before
404 * performing a deferred preemption, this routine can be
405 * invoked with td_critnest set to 0 and td_owepreempt true.
406 * In that case, a critical_exit() during the rendezvous
407 * action would trigger a preemption which is not permitted in
408 * a rendezvous action. To fix this, wrap all of the
409 * rendezvous action handlers in a critical section. We
410 * cannot use a regular critical section however as having
411 * critical_exit() preempt from this routine would also be
412 * problematic (the preemption must not occur before the IPI
413 * has been acknowledged via an EOI). Instead, we
414 * intentionally ignore td_owepreempt when leaving the
415 * critical section. This should be harmless because we do
416 * not permit rendezvous action routines to schedule threads,
417 * and thus td_owepreempt should never transition from 0 to 1
418 * during this routine.
419 */
420 td = curthread;
421 td->td_critnest++;
422#ifdef INVARIANTS
423 owepreempt = td->td_owepreempt;
424#endif
425
426 /*
427 * If requested, run a setup function before the main action
428 * function. Ensure all CPUs have completed the setup
429 * function before moving on to the action function.
430 */
431 if (local_setup_func != smp_no_rendevous_barrier) {
432 if (smp_rv_setup_func != NULL)
433 smp_rv_setup_func(smp_rv_func_arg);
434 atomic_add_int(&smp_rv_waiters[1], 1);
435 while (smp_rv_waiters[1] < smp_rv_ncpus)
436 cpu_spinwait();
437 }
438
439 if (local_action_func != NULL)
440 local_action_func(local_func_arg);
441
442 if (local_teardown_func != smp_no_rendevous_barrier) {
443 /*
444 * Signal that the main action has been completed. If a
445 * full exit rendezvous is requested, then all CPUs will
446 * wait here until all CPUs have finished the main action.
447 */
448 atomic_add_int(&smp_rv_waiters[2], 1);
449 while (smp_rv_waiters[2] < smp_rv_ncpus)
450 cpu_spinwait();
451
452 if (local_teardown_func != NULL)
453 local_teardown_func(local_func_arg);
454 }
455
456 /*
457 * Signal that the rendezvous is fully completed by this CPU.
458 * This means that no member of smp_rv_* pseudo-structure will be
459 * accessed by this target CPU after this point; in particular,
460 * memory pointed by smp_rv_func_arg.
461 */
462 atomic_add_int(&smp_rv_waiters[3], 1);
463
464 td->td_critnest--;
465 KASSERT(owepreempt == td->td_owepreempt,
466 ("rendezvous action changed td_owepreempt"));
467}
468
469void
470smp_rendezvous_cpus(cpuset_t map,
471 void (* setup_func)(void *),
472 void (* action_func)(void *),
473 void (* teardown_func)(void *),
474 void *arg)
475{
476 int curcpumap, i, ncpus = 0;
477
478 /* Look comments in the !SMP case. */
479 if (!smp_started) {
480 spinlock_enter();
481 if (setup_func != NULL)
482 setup_func(arg);
483 if (action_func != NULL)
484 action_func(arg);
485 if (teardown_func != NULL)
486 teardown_func(arg);
487 spinlock_exit();
488 return;
489 }
490
491 CPU_FOREACH(i) {
492 if (CPU_ISSET(i, &map))
493 ncpus++;
494 }
495 if (ncpus == 0)
496 panic("ncpus is 0 with non-zero map");
497
498 mtx_lock_spin(&smp_ipi_mtx);
499
500 /* Pass rendezvous parameters via global variables. */
501 smp_rv_ncpus = ncpus;
502 smp_rv_setup_func = setup_func;
503 smp_rv_action_func = action_func;
504 smp_rv_teardown_func = teardown_func;
505 smp_rv_func_arg = arg;
506 smp_rv_waiters[1] = 0;
507 smp_rv_waiters[2] = 0;
508 smp_rv_waiters[3] = 0;
509 atomic_store_rel_int(&smp_rv_waiters[0], 0);
510
511 /*
512 * Signal other processors, which will enter the IPI with
513 * interrupts off.
514 */
515 curcpumap = CPU_ISSET(curcpu, &map);
516 CPU_CLR(curcpu, &map);
517 ipi_selected(map, IPI_RENDEZVOUS);
518
519 /* Check if the current CPU is in the map */
520 if (curcpumap != 0)
521 smp_rendezvous_action();
522
523 /*
524 * Ensure that the master CPU waits for all the other
525 * CPUs to finish the rendezvous, so that smp_rv_*
526 * pseudo-structure and the arg are guaranteed to not
527 * be in use.
528 */
529 while (atomic_load_acq_int(&smp_rv_waiters[3]) < ncpus)
530 cpu_spinwait();
531
532 mtx_unlock_spin(&smp_ipi_mtx);
533}
534
535void
536smp_rendezvous(void (* setup_func)(void *),
537 void (* action_func)(void *),
538 void (* teardown_func)(void *),
539 void *arg)
540{
541 smp_rendezvous_cpus(all_cpus, setup_func, action_func, teardown_func, arg);
542}
543
544static struct cpu_group group[MAXCPU];
545
546struct cpu_group *
547smp_topo(void)
548{
549 char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ];
550 struct cpu_group *top;
551
552 /*
553 * Check for a fake topology request for debugging purposes.
554 */
555 switch (smp_topology) {
556 case 1:
557 /* Dual core with no sharing. */
558 top = smp_topo_1level(CG_SHARE_NONE, 2, 0);
559 break;
560 case 2:
561 /* No topology, all cpus are equal. */
562 top = smp_topo_none();
563 break;
564 case 3:
565 /* Dual core with shared L2. */
566 top = smp_topo_1level(CG_SHARE_L2, 2, 0);
567 break;
568 case 4:
569 /* quad core, shared l3 among each package, private l2. */
570 top = smp_topo_1level(CG_SHARE_L3, 4, 0);
571 break;
572 case 5:
573 /* quad core, 2 dualcore parts on each package share l2. */
574 top = smp_topo_2level(CG_SHARE_NONE, 2, CG_SHARE_L2, 2, 0);
575 break;
576 case 6:
577 /* Single-core 2xHTT */
578 top = smp_topo_1level(CG_SHARE_L1, 2, CG_FLAG_HTT);
579 break;
580 case 7:
581 /* quad core with a shared l3, 8 threads sharing L2. */
582 top = smp_topo_2level(CG_SHARE_L3, 4, CG_SHARE_L2, 8,
583 CG_FLAG_SMT);
584 break;
585 default:
586 /* Default, ask the system what it wants. */
587 top = cpu_topo();
588 break;
589 }
590 /*
591 * Verify the returned topology.
592 */
593 if (top->cg_count != mp_ncpus)
594 panic("Built bad topology at %p. CPU count %d != %d",
595 top, top->cg_count, mp_ncpus);
596 if (CPU_CMP(&top->cg_mask, &all_cpus))
597 panic("Built bad topology at %p. CPU mask (%s) != (%s)",
598 top, cpusetobj_strprint(cpusetbuf, &top->cg_mask),
599 cpusetobj_strprint(cpusetbuf2, &all_cpus));
600 return (top);
601}
602
603struct cpu_group *
604smp_topo_none(void)
605{
606 struct cpu_group *top;
607
608 top = &group[0];
609 top->cg_parent = NULL;
610 top->cg_child = NULL;
611 top->cg_mask = all_cpus;
612 top->cg_count = mp_ncpus;
613 top->cg_children = 0;
614 top->cg_level = CG_SHARE_NONE;
615 top->cg_flags = 0;
616
617 return (top);
618}
619
620static int
621smp_topo_addleaf(struct cpu_group *parent, struct cpu_group *child, int share,
622 int count, int flags, int start)
623{
624 char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ];
625 cpuset_t mask;
626 int i;
627
628 CPU_ZERO(&mask);
629 for (i = 0; i < count; i++, start++)
630 CPU_SET(start, &mask);
631 child->cg_parent = parent;
632 child->cg_child = NULL;
633 child->cg_children = 0;
634 child->cg_level = share;
635 child->cg_count = count;
636 child->cg_flags = flags;
637 child->cg_mask = mask;
638 parent->cg_children++;
639 for (; parent != NULL; parent = parent->cg_parent) {
640 if (CPU_OVERLAP(&parent->cg_mask, &child->cg_mask))
641 panic("Duplicate children in %p. mask (%s) child (%s)",
642 parent,
643 cpusetobj_strprint(cpusetbuf, &parent->cg_mask),
644 cpusetobj_strprint(cpusetbuf2, &child->cg_mask));
645 CPU_OR(&parent->cg_mask, &child->cg_mask);
646 parent->cg_count += child->cg_count;
647 }
648
649 return (start);
650}
651
652struct cpu_group *
653smp_topo_1level(int share, int count, int flags)
654{
655 struct cpu_group *child;
656 struct cpu_group *top;
657 int packages;
658 int cpu;
659 int i;
660
661 cpu = 0;
662 top = &group[0];
663 packages = mp_ncpus / count;
664 top->cg_child = child = &group[1];
665 top->cg_level = CG_SHARE_NONE;
666 for (i = 0; i < packages; i++, child++)
667 cpu = smp_topo_addleaf(top, child, share, count, flags, cpu);
668 return (top);
669}
670
671struct cpu_group *
672smp_topo_2level(int l2share, int l2count, int l1share, int l1count,
673 int l1flags)
674{
675 struct cpu_group *top;
676 struct cpu_group *l1g;
677 struct cpu_group *l2g;
678 int cpu;
679 int i;
680 int j;
681
682 cpu = 0;
683 top = &group[0];
684 l2g = &group[1];
685 top->cg_child = l2g;
686 top->cg_level = CG_SHARE_NONE;
687 top->cg_children = mp_ncpus / (l2count * l1count);
688 l1g = l2g + top->cg_children;
689 for (i = 0; i < top->cg_children; i++, l2g++) {
690 l2g->cg_parent = top;
691 l2g->cg_child = l1g;
692 l2g->cg_level = l2share;
693 for (j = 0; j < l2count; j++, l1g++)
694 cpu = smp_topo_addleaf(l2g, l1g, l1share, l1count,
695 l1flags, cpu);
696 }
697 return (top);
698}
699
700
701struct cpu_group *
702smp_topo_find(struct cpu_group *top, int cpu)
703{
704 struct cpu_group *cg;
705 cpuset_t mask;
706 int children;
707 int i;
708
709 CPU_SETOF(cpu, &mask);
710 cg = top;
711 for (;;) {
712 if (!CPU_OVERLAP(&cg->cg_mask, &mask))
713 return (NULL);
714 if (cg->cg_children == 0)
715 return (cg);
716 children = cg->cg_children;
717 for (i = 0, cg = cg->cg_child; i < children; cg++, i++)
718 if (CPU_OVERLAP(&cg->cg_mask, &mask))
719 break;
720 }
721 return (NULL);
722}
723#else /* !SMP */
724
725void
726smp_rendezvous_cpus(cpuset_t map,
727 void (*setup_func)(void *),
728 void (*action_func)(void *),
729 void (*teardown_func)(void *),
730 void *arg)
731{
732 /*
733 * In the !SMP case we just need to ensure the same initial conditions
734 * as the SMP case.
735 */
736 spinlock_enter();
737 if (setup_func != NULL)
738 setup_func(arg);
739 if (action_func != NULL)
740 action_func(arg);
741 if (teardown_func != NULL)
742 teardown_func(arg);
743 spinlock_exit();
744}
745
746void
747smp_rendezvous(void (*setup_func)(void *),
748 void (*action_func)(void *),
749 void (*teardown_func)(void *),
750 void *arg)
751{
752
753 /* Look comments in the smp_rendezvous_cpus() case. */
754 spinlock_enter();
755 if (setup_func != NULL)
756 setup_func(arg);
757 if (action_func != NULL)
758 action_func(arg);
759 if (teardown_func != NULL)
760 teardown_func(arg);
761 spinlock_exit();
762}
763
764/*
765 * Provide dummy SMP support for UP kernels. Modules that need to use SMP
766 * APIs will still work using this dummy support.
767 */
768static void
769mp_setvariables_for_up(void *dummy)
770{
771 mp_ncpus = 1;
772 mp_maxid = PCPU_GET(cpuid);
773 CPU_SETOF(mp_maxid, &all_cpus);
774 KASSERT(PCPU_GET(cpuid) == 0, ("UP must have a CPU ID of zero"));
775}
776SYSINIT(cpu_mp_setvariables, SI_SUB_TUNABLES, SI_ORDER_FIRST,
777 mp_setvariables_for_up, NULL);
778#endif /* SMP */
779
780void
781smp_no_rendevous_barrier(void *dummy)
782{
783#ifdef SMP
784 KASSERT((!smp_started),("smp_no_rendevous called and smp is started"));
785#endif
786}
787
788/*
789 * Wait specified idle threads to switch once. This ensures that even
790 * preempted threads have cycled through the switch function once,
791 * exiting their codepaths. This allows us to change global pointers
792 * with no other synchronization.
793 */
794int
795quiesce_cpus(cpuset_t map, const char *wmesg, int prio)
796{
797 struct pcpu *pcpu;
798 u_int gen[MAXCPU];
799 int error;
800 int cpu;
801
802 error = 0;
803 for (cpu = 0; cpu <= mp_maxid; cpu++) {
804 if (!CPU_ISSET(cpu, &map) || CPU_ABSENT(cpu))
805 continue;
806 pcpu = pcpu_find(cpu);
807 gen[cpu] = pcpu->pc_idlethread->td_generation;
808 }
809 for (cpu = 0; cpu <= mp_maxid; cpu++) {
810 if (!CPU_ISSET(cpu, &map) || CPU_ABSENT(cpu))
811 continue;
812 pcpu = pcpu_find(cpu);
813 thread_lock(curthread);
814 sched_bind(curthread, cpu);
815 thread_unlock(curthread);
816 while (gen[cpu] == pcpu->pc_idlethread->td_generation) {
817 error = tsleep(quiesce_cpus, prio, wmesg, 1);
818 if (error != EWOULDBLOCK)
819 goto out;
820 error = 0;
821 }
822 }
823out:
824 thread_lock(curthread);
825 sched_unbind(curthread);
826 thread_unlock(curthread);
827
828 return (error);
829}
830
831int
832quiesce_all_cpus(const char *wmesg, int prio)
833{
834
835 return quiesce_cpus(all_cpus, wmesg, prio);
836}