37 38#include <sys/param.h> 39#include <sys/systm.h> 40#include <sys/kernel.h> 41#include <sys/ktr.h> 42#include <sys/proc.h> 43#include <sys/bus.h> 44#include <sys/lock.h> 45#include <sys/mutex.h> 46#include <sys/pcpu.h> 47#include <sys/sched.h> 48#include <sys/smp.h> 49#include <sys/sysctl.h> 50 51#include <machine/cpu.h> 52#include <machine/smp.h> 53 54#include "opt_sched.h" 55 56#ifdef SMP 57volatile cpuset_t stopped_cpus; 58volatile cpuset_t started_cpus; 59volatile cpuset_t suspended_cpus; 60cpuset_t hlt_cpus_mask; 61cpuset_t logical_cpus_mask; 62 63void (*cpustop_restartfunc)(void); 64#endif 65/* This is used in modules that need to work in both SMP and UP. */ 66cpuset_t all_cpus; 67 68int mp_ncpus; 69/* export this for libkvm consumers. */ 70int mp_maxcpus = MAXCPU; 71 72volatile int smp_started; 73u_int mp_maxid; 74 75static SYSCTL_NODE(_kern, OID_AUTO, smp, CTLFLAG_RD|CTLFLAG_CAPRD, NULL, 76 "Kernel SMP"); 77 78SYSCTL_INT(_kern_smp, OID_AUTO, maxid, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_maxid, 0, 79 "Max CPU ID."); 80 81SYSCTL_INT(_kern_smp, OID_AUTO, maxcpus, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_maxcpus, 82 0, "Max number of CPUs that the system was compiled for."); 83 84int smp_active = 0; /* are the APs allowed to run? */ 85SYSCTL_INT(_kern_smp, OID_AUTO, active, CTLFLAG_RW, &smp_active, 0, 86 "Number of Auxillary Processors (APs) that were successfully started"); 87 88int smp_disabled = 0; /* has smp been disabled? */ 89SYSCTL_INT(_kern_smp, OID_AUTO, disabled, CTLFLAG_RDTUN|CTLFLAG_CAPRD, 90 &smp_disabled, 0, "SMP has been disabled from the loader"); 91TUNABLE_INT("kern.smp.disabled", &smp_disabled); 92 93int smp_cpus = 1; /* how many cpu's running */ 94SYSCTL_INT(_kern_smp, OID_AUTO, cpus, CTLFLAG_RD|CTLFLAG_CAPRD, &smp_cpus, 0, 95 "Number of CPUs online"); 96 97int smp_topology = 0; /* Which topology we're using. */ 98SYSCTL_INT(_kern_smp, OID_AUTO, topology, CTLFLAG_RD, &smp_topology, 0, 99 "Topology override setting; 0 is default provided by hardware."); 100TUNABLE_INT("kern.smp.topology", &smp_topology); 101 102#ifdef SMP 103/* Enable forwarding of a signal to a process running on a different CPU */ 104static int forward_signal_enabled = 1; 105SYSCTL_INT(_kern_smp, OID_AUTO, forward_signal_enabled, CTLFLAG_RW, 106 &forward_signal_enabled, 0, 107 "Forwarding of a signal to a process on a different CPU"); 108 109/* Variables needed for SMP rendezvous. */ 110static volatile int smp_rv_ncpus; 111static void (*volatile smp_rv_setup_func)(void *arg); 112static void (*volatile smp_rv_action_func)(void *arg); 113static void (*volatile smp_rv_teardown_func)(void *arg); 114static void *volatile smp_rv_func_arg; 115static volatile int smp_rv_waiters[4]; 116 117/* 118 * Shared mutex to restrict busywaits between smp_rendezvous() and 119 * smp(_targeted)_tlb_shootdown(). A deadlock occurs if both of these 120 * functions trigger at once and cause multiple CPUs to busywait with 121 * interrupts disabled. 122 */ 123struct mtx smp_ipi_mtx; 124 125/* 126 * Let the MD SMP code initialize mp_maxid very early if it can. 127 */ 128static void 129mp_setmaxid(void *dummy) 130{ 131 cpu_mp_setmaxid(); 132} 133SYSINIT(cpu_mp_setmaxid, SI_SUB_TUNABLES, SI_ORDER_FIRST, mp_setmaxid, NULL); 134 135/* 136 * Call the MD SMP initialization code. 137 */ 138static void 139mp_start(void *dummy) 140{ 141 142 mtx_init(&smp_ipi_mtx, "smp rendezvous", NULL, MTX_SPIN); 143 144 /* Probe for MP hardware. */ 145 if (smp_disabled != 0 || cpu_mp_probe() == 0) { 146 mp_ncpus = 1; 147 CPU_SETOF(PCPU_GET(cpuid), &all_cpus); 148 return; 149 } 150 151 cpu_mp_start(); 152 printf("FreeBSD/SMP: Multiprocessor System Detected: %d CPUs\n", 153 mp_ncpus); 154 cpu_mp_announce(); 155} 156SYSINIT(cpu_mp, SI_SUB_CPU, SI_ORDER_THIRD, mp_start, NULL); 157 158void 159forward_signal(struct thread *td) 160{ 161 int id; 162 163 /* 164 * signotify() has already set TDF_ASTPENDING and TDF_NEEDSIGCHECK on 165 * this thread, so all we need to do is poke it if it is currently 166 * executing so that it executes ast(). 167 */ 168 THREAD_LOCK_ASSERT(td, MA_OWNED); 169 KASSERT(TD_IS_RUNNING(td), 170 ("forward_signal: thread is not TDS_RUNNING")); 171 172 CTR1(KTR_SMP, "forward_signal(%p)", td->td_proc); 173 174 if (!smp_started || cold || panicstr) 175 return; 176 if (!forward_signal_enabled) 177 return; 178 179 /* No need to IPI ourself. */ 180 if (td == curthread) 181 return; 182 183 id = td->td_oncpu; 184 if (id == NOCPU) 185 return; 186 ipi_cpu(id, IPI_AST); 187} 188 189/* 190 * When called the executing CPU will send an IPI to all other CPUs 191 * requesting that they halt execution. 192 * 193 * Usually (but not necessarily) called with 'other_cpus' as its arg. 194 * 195 * - Signals all CPUs in map to stop. 196 * - Waits for each to stop. 197 * 198 * Returns: 199 * -1: error 200 * 0: NA 201 * 1: ok 202 * 203 */ 204static int 205generic_stop_cpus(cpuset_t map, u_int type) 206{ 207#ifdef KTR 208 char cpusetbuf[CPUSETBUFSIZ]; 209#endif 210 static volatile u_int stopping_cpu = NOCPU; 211 int i; 212 volatile cpuset_t *cpus; 213 214 KASSERT( 215#if defined(__amd64__) || defined(__i386__) 216 type == IPI_STOP || type == IPI_STOP_HARD || type == IPI_SUSPEND, 217#else 218 type == IPI_STOP || type == IPI_STOP_HARD, 219#endif 220 ("%s: invalid stop type", __func__)); 221 222 if (!smp_started) 223 return (0); 224 225 CTR2(KTR_SMP, "stop_cpus(%s) with %u type", 226 cpusetobj_strprint(cpusetbuf, &map), type); 227
| 37 38#include <sys/param.h> 39#include <sys/systm.h> 40#include <sys/kernel.h> 41#include <sys/ktr.h> 42#include <sys/proc.h> 43#include <sys/bus.h> 44#include <sys/lock.h> 45#include <sys/mutex.h> 46#include <sys/pcpu.h> 47#include <sys/sched.h> 48#include <sys/smp.h> 49#include <sys/sysctl.h> 50 51#include <machine/cpu.h> 52#include <machine/smp.h> 53 54#include "opt_sched.h" 55 56#ifdef SMP 57volatile cpuset_t stopped_cpus; 58volatile cpuset_t started_cpus; 59volatile cpuset_t suspended_cpus; 60cpuset_t hlt_cpus_mask; 61cpuset_t logical_cpus_mask; 62 63void (*cpustop_restartfunc)(void); 64#endif 65/* This is used in modules that need to work in both SMP and UP. */ 66cpuset_t all_cpus; 67 68int mp_ncpus; 69/* export this for libkvm consumers. */ 70int mp_maxcpus = MAXCPU; 71 72volatile int smp_started; 73u_int mp_maxid; 74 75static SYSCTL_NODE(_kern, OID_AUTO, smp, CTLFLAG_RD|CTLFLAG_CAPRD, NULL, 76 "Kernel SMP"); 77 78SYSCTL_INT(_kern_smp, OID_AUTO, maxid, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_maxid, 0, 79 "Max CPU ID."); 80 81SYSCTL_INT(_kern_smp, OID_AUTO, maxcpus, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_maxcpus, 82 0, "Max number of CPUs that the system was compiled for."); 83 84int smp_active = 0; /* are the APs allowed to run? */ 85SYSCTL_INT(_kern_smp, OID_AUTO, active, CTLFLAG_RW, &smp_active, 0, 86 "Number of Auxillary Processors (APs) that were successfully started"); 87 88int smp_disabled = 0; /* has smp been disabled? */ 89SYSCTL_INT(_kern_smp, OID_AUTO, disabled, CTLFLAG_RDTUN|CTLFLAG_CAPRD, 90 &smp_disabled, 0, "SMP has been disabled from the loader"); 91TUNABLE_INT("kern.smp.disabled", &smp_disabled); 92 93int smp_cpus = 1; /* how many cpu's running */ 94SYSCTL_INT(_kern_smp, OID_AUTO, cpus, CTLFLAG_RD|CTLFLAG_CAPRD, &smp_cpus, 0, 95 "Number of CPUs online"); 96 97int smp_topology = 0; /* Which topology we're using. */ 98SYSCTL_INT(_kern_smp, OID_AUTO, topology, CTLFLAG_RD, &smp_topology, 0, 99 "Topology override setting; 0 is default provided by hardware."); 100TUNABLE_INT("kern.smp.topology", &smp_topology); 101 102#ifdef SMP 103/* Enable forwarding of a signal to a process running on a different CPU */ 104static int forward_signal_enabled = 1; 105SYSCTL_INT(_kern_smp, OID_AUTO, forward_signal_enabled, CTLFLAG_RW, 106 &forward_signal_enabled, 0, 107 "Forwarding of a signal to a process on a different CPU"); 108 109/* Variables needed for SMP rendezvous. */ 110static volatile int smp_rv_ncpus; 111static void (*volatile smp_rv_setup_func)(void *arg); 112static void (*volatile smp_rv_action_func)(void *arg); 113static void (*volatile smp_rv_teardown_func)(void *arg); 114static void *volatile smp_rv_func_arg; 115static volatile int smp_rv_waiters[4]; 116 117/* 118 * Shared mutex to restrict busywaits between smp_rendezvous() and 119 * smp(_targeted)_tlb_shootdown(). A deadlock occurs if both of these 120 * functions trigger at once and cause multiple CPUs to busywait with 121 * interrupts disabled. 122 */ 123struct mtx smp_ipi_mtx; 124 125/* 126 * Let the MD SMP code initialize mp_maxid very early if it can. 127 */ 128static void 129mp_setmaxid(void *dummy) 130{ 131 cpu_mp_setmaxid(); 132} 133SYSINIT(cpu_mp_setmaxid, SI_SUB_TUNABLES, SI_ORDER_FIRST, mp_setmaxid, NULL); 134 135/* 136 * Call the MD SMP initialization code. 137 */ 138static void 139mp_start(void *dummy) 140{ 141 142 mtx_init(&smp_ipi_mtx, "smp rendezvous", NULL, MTX_SPIN); 143 144 /* Probe for MP hardware. */ 145 if (smp_disabled != 0 || cpu_mp_probe() == 0) { 146 mp_ncpus = 1; 147 CPU_SETOF(PCPU_GET(cpuid), &all_cpus); 148 return; 149 } 150 151 cpu_mp_start(); 152 printf("FreeBSD/SMP: Multiprocessor System Detected: %d CPUs\n", 153 mp_ncpus); 154 cpu_mp_announce(); 155} 156SYSINIT(cpu_mp, SI_SUB_CPU, SI_ORDER_THIRD, mp_start, NULL); 157 158void 159forward_signal(struct thread *td) 160{ 161 int id; 162 163 /* 164 * signotify() has already set TDF_ASTPENDING and TDF_NEEDSIGCHECK on 165 * this thread, so all we need to do is poke it if it is currently 166 * executing so that it executes ast(). 167 */ 168 THREAD_LOCK_ASSERT(td, MA_OWNED); 169 KASSERT(TD_IS_RUNNING(td), 170 ("forward_signal: thread is not TDS_RUNNING")); 171 172 CTR1(KTR_SMP, "forward_signal(%p)", td->td_proc); 173 174 if (!smp_started || cold || panicstr) 175 return; 176 if (!forward_signal_enabled) 177 return; 178 179 /* No need to IPI ourself. */ 180 if (td == curthread) 181 return; 182 183 id = td->td_oncpu; 184 if (id == NOCPU) 185 return; 186 ipi_cpu(id, IPI_AST); 187} 188 189/* 190 * When called the executing CPU will send an IPI to all other CPUs 191 * requesting that they halt execution. 192 * 193 * Usually (but not necessarily) called with 'other_cpus' as its arg. 194 * 195 * - Signals all CPUs in map to stop. 196 * - Waits for each to stop. 197 * 198 * Returns: 199 * -1: error 200 * 0: NA 201 * 1: ok 202 * 203 */ 204static int 205generic_stop_cpus(cpuset_t map, u_int type) 206{ 207#ifdef KTR 208 char cpusetbuf[CPUSETBUFSIZ]; 209#endif 210 static volatile u_int stopping_cpu = NOCPU; 211 int i; 212 volatile cpuset_t *cpus; 213 214 KASSERT( 215#if defined(__amd64__) || defined(__i386__) 216 type == IPI_STOP || type == IPI_STOP_HARD || type == IPI_SUSPEND, 217#else 218 type == IPI_STOP || type == IPI_STOP_HARD, 219#endif 220 ("%s: invalid stop type", __func__)); 221 222 if (!smp_started) 223 return (0); 224 225 CTR2(KTR_SMP, "stop_cpus(%s) with %u type", 226 cpusetobj_strprint(cpusetbuf, &map), type); 227
|
271 272 stopping_cpu = NOCPU; 273 return (1); 274} 275 276int 277stop_cpus(cpuset_t map) 278{ 279 280 return (generic_stop_cpus(map, IPI_STOP)); 281} 282 283int 284stop_cpus_hard(cpuset_t map) 285{ 286 287 return (generic_stop_cpus(map, IPI_STOP_HARD)); 288} 289 290#if defined(__amd64__) || defined(__i386__) 291int 292suspend_cpus(cpuset_t map) 293{ 294 295 return (generic_stop_cpus(map, IPI_SUSPEND)); 296} 297#endif 298 299/* 300 * Called by a CPU to restart stopped CPUs. 301 * 302 * Usually (but not necessarily) called with 'stopped_cpus' as its arg. 303 * 304 * - Signals all CPUs in map to restart. 305 * - Waits for each to restart. 306 * 307 * Returns: 308 * -1: error 309 * 0: NA 310 * 1: ok 311 */ 312static int 313generic_restart_cpus(cpuset_t map, u_int type) 314{ 315#ifdef KTR 316 char cpusetbuf[CPUSETBUFSIZ]; 317#endif 318 volatile cpuset_t *cpus; 319 320 KASSERT( 321#if defined(__amd64__) || defined(__i386__) 322 type == IPI_STOP || type == IPI_STOP_HARD || type == IPI_SUSPEND, 323#else 324 type == IPI_STOP || type == IPI_STOP_HARD, 325#endif 326 ("%s: invalid stop type", __func__)); 327 328 if (!smp_started) 329 return 0; 330 331 CTR1(KTR_SMP, "restart_cpus(%s)", cpusetobj_strprint(cpusetbuf, &map)); 332 333#if defined(__amd64__) || defined(__i386__) 334 if (type == IPI_SUSPEND) 335 cpus = &suspended_cpus; 336 else 337#endif 338 cpus = &stopped_cpus; 339 340 /* signal other cpus to restart */ 341 CPU_COPY_STORE_REL(&map, &started_cpus); 342 343 /* wait for each to clear its bit */ 344 while (CPU_OVERLAP(cpus, &map)) 345 cpu_spinwait(); 346 347 return 1; 348} 349 350int 351restart_cpus(cpuset_t map) 352{ 353 354 return (generic_restart_cpus(map, IPI_STOP)); 355} 356 357#if defined(__amd64__) || defined(__i386__) 358int 359resume_cpus(cpuset_t map) 360{ 361 362 return (generic_restart_cpus(map, IPI_SUSPEND)); 363} 364#endif 365 366/* 367 * All-CPU rendezvous. CPUs are signalled, all execute the setup function 368 * (if specified), rendezvous, execute the action function (if specified), 369 * rendezvous again, execute the teardown function (if specified), and then 370 * resume. 371 * 372 * Note that the supplied external functions _must_ be reentrant and aware 373 * that they are running in parallel and in an unknown lock context. 374 */ 375void 376smp_rendezvous_action(void) 377{ 378 struct thread *td; 379 void *local_func_arg; 380 void (*local_setup_func)(void*); 381 void (*local_action_func)(void*); 382 void (*local_teardown_func)(void*); 383#ifdef INVARIANTS 384 int owepreempt; 385#endif 386 387 /* Ensure we have up-to-date values. */ 388 atomic_add_acq_int(&smp_rv_waiters[0], 1); 389 while (smp_rv_waiters[0] < smp_rv_ncpus) 390 cpu_spinwait(); 391 392 /* Fetch rendezvous parameters after acquire barrier. */ 393 local_func_arg = smp_rv_func_arg; 394 local_setup_func = smp_rv_setup_func; 395 local_action_func = smp_rv_action_func; 396 local_teardown_func = smp_rv_teardown_func; 397 398 /* 399 * Use a nested critical section to prevent any preemptions 400 * from occurring during a rendezvous action routine. 401 * Specifically, if a rendezvous handler is invoked via an IPI 402 * and the interrupted thread was in the critical_exit() 403 * function after setting td_critnest to 0 but before 404 * performing a deferred preemption, this routine can be 405 * invoked with td_critnest set to 0 and td_owepreempt true. 406 * In that case, a critical_exit() during the rendezvous 407 * action would trigger a preemption which is not permitted in 408 * a rendezvous action. To fix this, wrap all of the 409 * rendezvous action handlers in a critical section. We 410 * cannot use a regular critical section however as having 411 * critical_exit() preempt from this routine would also be 412 * problematic (the preemption must not occur before the IPI 413 * has been acknowledged via an EOI). Instead, we 414 * intentionally ignore td_owepreempt when leaving the 415 * critical section. This should be harmless because we do 416 * not permit rendezvous action routines to schedule threads, 417 * and thus td_owepreempt should never transition from 0 to 1 418 * during this routine. 419 */ 420 td = curthread; 421 td->td_critnest++; 422#ifdef INVARIANTS 423 owepreempt = td->td_owepreempt; 424#endif 425 426 /* 427 * If requested, run a setup function before the main action 428 * function. Ensure all CPUs have completed the setup 429 * function before moving on to the action function. 430 */ 431 if (local_setup_func != smp_no_rendevous_barrier) { 432 if (smp_rv_setup_func != NULL) 433 smp_rv_setup_func(smp_rv_func_arg); 434 atomic_add_int(&smp_rv_waiters[1], 1); 435 while (smp_rv_waiters[1] < smp_rv_ncpus) 436 cpu_spinwait(); 437 } 438 439 if (local_action_func != NULL) 440 local_action_func(local_func_arg); 441 442 if (local_teardown_func != smp_no_rendevous_barrier) { 443 /* 444 * Signal that the main action has been completed. If a 445 * full exit rendezvous is requested, then all CPUs will 446 * wait here until all CPUs have finished the main action. 447 */ 448 atomic_add_int(&smp_rv_waiters[2], 1); 449 while (smp_rv_waiters[2] < smp_rv_ncpus) 450 cpu_spinwait(); 451 452 if (local_teardown_func != NULL) 453 local_teardown_func(local_func_arg); 454 } 455 456 /* 457 * Signal that the rendezvous is fully completed by this CPU. 458 * This means that no member of smp_rv_* pseudo-structure will be 459 * accessed by this target CPU after this point; in particular, 460 * memory pointed by smp_rv_func_arg. 461 */ 462 atomic_add_int(&smp_rv_waiters[3], 1); 463 464 td->td_critnest--; 465 KASSERT(owepreempt == td->td_owepreempt, 466 ("rendezvous action changed td_owepreempt")); 467} 468 469void 470smp_rendezvous_cpus(cpuset_t map, 471 void (* setup_func)(void *), 472 void (* action_func)(void *), 473 void (* teardown_func)(void *), 474 void *arg) 475{ 476 int curcpumap, i, ncpus = 0; 477 478 /* Look comments in the !SMP case. */ 479 if (!smp_started) { 480 spinlock_enter(); 481 if (setup_func != NULL) 482 setup_func(arg); 483 if (action_func != NULL) 484 action_func(arg); 485 if (teardown_func != NULL) 486 teardown_func(arg); 487 spinlock_exit(); 488 return; 489 } 490 491 CPU_FOREACH(i) { 492 if (CPU_ISSET(i, &map)) 493 ncpus++; 494 } 495 if (ncpus == 0) 496 panic("ncpus is 0 with non-zero map"); 497 498 mtx_lock_spin(&smp_ipi_mtx); 499 500 /* Pass rendezvous parameters via global variables. */ 501 smp_rv_ncpus = ncpus; 502 smp_rv_setup_func = setup_func; 503 smp_rv_action_func = action_func; 504 smp_rv_teardown_func = teardown_func; 505 smp_rv_func_arg = arg; 506 smp_rv_waiters[1] = 0; 507 smp_rv_waiters[2] = 0; 508 smp_rv_waiters[3] = 0; 509 atomic_store_rel_int(&smp_rv_waiters[0], 0); 510 511 /* 512 * Signal other processors, which will enter the IPI with 513 * interrupts off. 514 */ 515 curcpumap = CPU_ISSET(curcpu, &map); 516 CPU_CLR(curcpu, &map); 517 ipi_selected(map, IPI_RENDEZVOUS); 518 519 /* Check if the current CPU is in the map */ 520 if (curcpumap != 0) 521 smp_rendezvous_action(); 522 523 /* 524 * Ensure that the master CPU waits for all the other 525 * CPUs to finish the rendezvous, so that smp_rv_* 526 * pseudo-structure and the arg are guaranteed to not 527 * be in use. 528 */ 529 while (atomic_load_acq_int(&smp_rv_waiters[3]) < ncpus) 530 cpu_spinwait(); 531 532 mtx_unlock_spin(&smp_ipi_mtx); 533} 534 535void 536smp_rendezvous(void (* setup_func)(void *), 537 void (* action_func)(void *), 538 void (* teardown_func)(void *), 539 void *arg) 540{ 541 smp_rendezvous_cpus(all_cpus, setup_func, action_func, teardown_func, arg); 542} 543 544static struct cpu_group group[MAXCPU]; 545 546struct cpu_group * 547smp_topo(void) 548{ 549 char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ]; 550 struct cpu_group *top; 551 552 /* 553 * Check for a fake topology request for debugging purposes. 554 */ 555 switch (smp_topology) { 556 case 1: 557 /* Dual core with no sharing. */ 558 top = smp_topo_1level(CG_SHARE_NONE, 2, 0); 559 break; 560 case 2: 561 /* No topology, all cpus are equal. */ 562 top = smp_topo_none(); 563 break; 564 case 3: 565 /* Dual core with shared L2. */ 566 top = smp_topo_1level(CG_SHARE_L2, 2, 0); 567 break; 568 case 4: 569 /* quad core, shared l3 among each package, private l2. */ 570 top = smp_topo_1level(CG_SHARE_L3, 4, 0); 571 break; 572 case 5: 573 /* quad core, 2 dualcore parts on each package share l2. */ 574 top = smp_topo_2level(CG_SHARE_NONE, 2, CG_SHARE_L2, 2, 0); 575 break; 576 case 6: 577 /* Single-core 2xHTT */ 578 top = smp_topo_1level(CG_SHARE_L1, 2, CG_FLAG_HTT); 579 break; 580 case 7: 581 /* quad core with a shared l3, 8 threads sharing L2. */ 582 top = smp_topo_2level(CG_SHARE_L3, 4, CG_SHARE_L2, 8, 583 CG_FLAG_SMT); 584 break; 585 default: 586 /* Default, ask the system what it wants. */ 587 top = cpu_topo(); 588 break; 589 } 590 /* 591 * Verify the returned topology. 592 */ 593 if (top->cg_count != mp_ncpus) 594 panic("Built bad topology at %p. CPU count %d != %d", 595 top, top->cg_count, mp_ncpus); 596 if (CPU_CMP(&top->cg_mask, &all_cpus)) 597 panic("Built bad topology at %p. CPU mask (%s) != (%s)", 598 top, cpusetobj_strprint(cpusetbuf, &top->cg_mask), 599 cpusetobj_strprint(cpusetbuf2, &all_cpus)); 600 return (top); 601} 602 603struct cpu_group * 604smp_topo_none(void) 605{ 606 struct cpu_group *top; 607 608 top = &group[0]; 609 top->cg_parent = NULL; 610 top->cg_child = NULL; 611 top->cg_mask = all_cpus; 612 top->cg_count = mp_ncpus; 613 top->cg_children = 0; 614 top->cg_level = CG_SHARE_NONE; 615 top->cg_flags = 0; 616 617 return (top); 618} 619 620static int 621smp_topo_addleaf(struct cpu_group *parent, struct cpu_group *child, int share, 622 int count, int flags, int start) 623{ 624 char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ]; 625 cpuset_t mask; 626 int i; 627 628 CPU_ZERO(&mask); 629 for (i = 0; i < count; i++, start++) 630 CPU_SET(start, &mask); 631 child->cg_parent = parent; 632 child->cg_child = NULL; 633 child->cg_children = 0; 634 child->cg_level = share; 635 child->cg_count = count; 636 child->cg_flags = flags; 637 child->cg_mask = mask; 638 parent->cg_children++; 639 for (; parent != NULL; parent = parent->cg_parent) { 640 if (CPU_OVERLAP(&parent->cg_mask, &child->cg_mask)) 641 panic("Duplicate children in %p. mask (%s) child (%s)", 642 parent, 643 cpusetobj_strprint(cpusetbuf, &parent->cg_mask), 644 cpusetobj_strprint(cpusetbuf2, &child->cg_mask)); 645 CPU_OR(&parent->cg_mask, &child->cg_mask); 646 parent->cg_count += child->cg_count; 647 } 648 649 return (start); 650} 651 652struct cpu_group * 653smp_topo_1level(int share, int count, int flags) 654{ 655 struct cpu_group *child; 656 struct cpu_group *top; 657 int packages; 658 int cpu; 659 int i; 660 661 cpu = 0; 662 top = &group[0]; 663 packages = mp_ncpus / count; 664 top->cg_child = child = &group[1]; 665 top->cg_level = CG_SHARE_NONE; 666 for (i = 0; i < packages; i++, child++) 667 cpu = smp_topo_addleaf(top, child, share, count, flags, cpu); 668 return (top); 669} 670 671struct cpu_group * 672smp_topo_2level(int l2share, int l2count, int l1share, int l1count, 673 int l1flags) 674{ 675 struct cpu_group *top; 676 struct cpu_group *l1g; 677 struct cpu_group *l2g; 678 int cpu; 679 int i; 680 int j; 681 682 cpu = 0; 683 top = &group[0]; 684 l2g = &group[1]; 685 top->cg_child = l2g; 686 top->cg_level = CG_SHARE_NONE; 687 top->cg_children = mp_ncpus / (l2count * l1count); 688 l1g = l2g + top->cg_children; 689 for (i = 0; i < top->cg_children; i++, l2g++) { 690 l2g->cg_parent = top; 691 l2g->cg_child = l1g; 692 l2g->cg_level = l2share; 693 for (j = 0; j < l2count; j++, l1g++) 694 cpu = smp_topo_addleaf(l2g, l1g, l1share, l1count, 695 l1flags, cpu); 696 } 697 return (top); 698} 699 700 701struct cpu_group * 702smp_topo_find(struct cpu_group *top, int cpu) 703{ 704 struct cpu_group *cg; 705 cpuset_t mask; 706 int children; 707 int i; 708 709 CPU_SETOF(cpu, &mask); 710 cg = top; 711 for (;;) { 712 if (!CPU_OVERLAP(&cg->cg_mask, &mask)) 713 return (NULL); 714 if (cg->cg_children == 0) 715 return (cg); 716 children = cg->cg_children; 717 for (i = 0, cg = cg->cg_child; i < children; cg++, i++) 718 if (CPU_OVERLAP(&cg->cg_mask, &mask)) 719 break; 720 } 721 return (NULL); 722} 723#else /* !SMP */ 724 725void 726smp_rendezvous_cpus(cpuset_t map, 727 void (*setup_func)(void *), 728 void (*action_func)(void *), 729 void (*teardown_func)(void *), 730 void *arg) 731{ 732 /* 733 * In the !SMP case we just need to ensure the same initial conditions 734 * as the SMP case. 735 */ 736 spinlock_enter(); 737 if (setup_func != NULL) 738 setup_func(arg); 739 if (action_func != NULL) 740 action_func(arg); 741 if (teardown_func != NULL) 742 teardown_func(arg); 743 spinlock_exit(); 744} 745 746void 747smp_rendezvous(void (*setup_func)(void *), 748 void (*action_func)(void *), 749 void (*teardown_func)(void *), 750 void *arg) 751{ 752 753 /* Look comments in the smp_rendezvous_cpus() case. */ 754 spinlock_enter(); 755 if (setup_func != NULL) 756 setup_func(arg); 757 if (action_func != NULL) 758 action_func(arg); 759 if (teardown_func != NULL) 760 teardown_func(arg); 761 spinlock_exit(); 762} 763 764/* 765 * Provide dummy SMP support for UP kernels. Modules that need to use SMP 766 * APIs will still work using this dummy support. 767 */ 768static void 769mp_setvariables_for_up(void *dummy) 770{ 771 mp_ncpus = 1; 772 mp_maxid = PCPU_GET(cpuid); 773 CPU_SETOF(mp_maxid, &all_cpus); 774 KASSERT(PCPU_GET(cpuid) == 0, ("UP must have a CPU ID of zero")); 775} 776SYSINIT(cpu_mp_setvariables, SI_SUB_TUNABLES, SI_ORDER_FIRST, 777 mp_setvariables_for_up, NULL); 778#endif /* SMP */ 779 780void 781smp_no_rendevous_barrier(void *dummy) 782{ 783#ifdef SMP 784 KASSERT((!smp_started),("smp_no_rendevous called and smp is started")); 785#endif 786} 787 788/* 789 * Wait specified idle threads to switch once. This ensures that even 790 * preempted threads have cycled through the switch function once, 791 * exiting their codepaths. This allows us to change global pointers 792 * with no other synchronization. 793 */ 794int 795quiesce_cpus(cpuset_t map, const char *wmesg, int prio) 796{ 797 struct pcpu *pcpu; 798 u_int gen[MAXCPU]; 799 int error; 800 int cpu; 801 802 error = 0; 803 for (cpu = 0; cpu <= mp_maxid; cpu++) { 804 if (!CPU_ISSET(cpu, &map) || CPU_ABSENT(cpu)) 805 continue; 806 pcpu = pcpu_find(cpu); 807 gen[cpu] = pcpu->pc_idlethread->td_generation; 808 } 809 for (cpu = 0; cpu <= mp_maxid; cpu++) { 810 if (!CPU_ISSET(cpu, &map) || CPU_ABSENT(cpu)) 811 continue; 812 pcpu = pcpu_find(cpu); 813 thread_lock(curthread); 814 sched_bind(curthread, cpu); 815 thread_unlock(curthread); 816 while (gen[cpu] == pcpu->pc_idlethread->td_generation) { 817 error = tsleep(quiesce_cpus, prio, wmesg, 1); 818 if (error != EWOULDBLOCK) 819 goto out; 820 error = 0; 821 } 822 } 823out: 824 thread_lock(curthread); 825 sched_unbind(curthread); 826 thread_unlock(curthread); 827 828 return (error); 829} 830 831int 832quiesce_all_cpus(const char *wmesg, int prio) 833{ 834 835 return quiesce_cpus(all_cpus, wmesg, prio); 836}
| 267 268 stopping_cpu = NOCPU; 269 return (1); 270} 271 272int 273stop_cpus(cpuset_t map) 274{ 275 276 return (generic_stop_cpus(map, IPI_STOP)); 277} 278 279int 280stop_cpus_hard(cpuset_t map) 281{ 282 283 return (generic_stop_cpus(map, IPI_STOP_HARD)); 284} 285 286#if defined(__amd64__) || defined(__i386__) 287int 288suspend_cpus(cpuset_t map) 289{ 290 291 return (generic_stop_cpus(map, IPI_SUSPEND)); 292} 293#endif 294 295/* 296 * Called by a CPU to restart stopped CPUs. 297 * 298 * Usually (but not necessarily) called with 'stopped_cpus' as its arg. 299 * 300 * - Signals all CPUs in map to restart. 301 * - Waits for each to restart. 302 * 303 * Returns: 304 * -1: error 305 * 0: NA 306 * 1: ok 307 */ 308static int 309generic_restart_cpus(cpuset_t map, u_int type) 310{ 311#ifdef KTR 312 char cpusetbuf[CPUSETBUFSIZ]; 313#endif 314 volatile cpuset_t *cpus; 315 316 KASSERT( 317#if defined(__amd64__) || defined(__i386__) 318 type == IPI_STOP || type == IPI_STOP_HARD || type == IPI_SUSPEND, 319#else 320 type == IPI_STOP || type == IPI_STOP_HARD, 321#endif 322 ("%s: invalid stop type", __func__)); 323 324 if (!smp_started) 325 return 0; 326 327 CTR1(KTR_SMP, "restart_cpus(%s)", cpusetobj_strprint(cpusetbuf, &map)); 328 329#if defined(__amd64__) || defined(__i386__) 330 if (type == IPI_SUSPEND) 331 cpus = &suspended_cpus; 332 else 333#endif 334 cpus = &stopped_cpus; 335 336 /* signal other cpus to restart */ 337 CPU_COPY_STORE_REL(&map, &started_cpus); 338 339 /* wait for each to clear its bit */ 340 while (CPU_OVERLAP(cpus, &map)) 341 cpu_spinwait(); 342 343 return 1; 344} 345 346int 347restart_cpus(cpuset_t map) 348{ 349 350 return (generic_restart_cpus(map, IPI_STOP)); 351} 352 353#if defined(__amd64__) || defined(__i386__) 354int 355resume_cpus(cpuset_t map) 356{ 357 358 return (generic_restart_cpus(map, IPI_SUSPEND)); 359} 360#endif 361 362/* 363 * All-CPU rendezvous. CPUs are signalled, all execute the setup function 364 * (if specified), rendezvous, execute the action function (if specified), 365 * rendezvous again, execute the teardown function (if specified), and then 366 * resume. 367 * 368 * Note that the supplied external functions _must_ be reentrant and aware 369 * that they are running in parallel and in an unknown lock context. 370 */ 371void 372smp_rendezvous_action(void) 373{ 374 struct thread *td; 375 void *local_func_arg; 376 void (*local_setup_func)(void*); 377 void (*local_action_func)(void*); 378 void (*local_teardown_func)(void*); 379#ifdef INVARIANTS 380 int owepreempt; 381#endif 382 383 /* Ensure we have up-to-date values. */ 384 atomic_add_acq_int(&smp_rv_waiters[0], 1); 385 while (smp_rv_waiters[0] < smp_rv_ncpus) 386 cpu_spinwait(); 387 388 /* Fetch rendezvous parameters after acquire barrier. */ 389 local_func_arg = smp_rv_func_arg; 390 local_setup_func = smp_rv_setup_func; 391 local_action_func = smp_rv_action_func; 392 local_teardown_func = smp_rv_teardown_func; 393 394 /* 395 * Use a nested critical section to prevent any preemptions 396 * from occurring during a rendezvous action routine. 397 * Specifically, if a rendezvous handler is invoked via an IPI 398 * and the interrupted thread was in the critical_exit() 399 * function after setting td_critnest to 0 but before 400 * performing a deferred preemption, this routine can be 401 * invoked with td_critnest set to 0 and td_owepreempt true. 402 * In that case, a critical_exit() during the rendezvous 403 * action would trigger a preemption which is not permitted in 404 * a rendezvous action. To fix this, wrap all of the 405 * rendezvous action handlers in a critical section. We 406 * cannot use a regular critical section however as having 407 * critical_exit() preempt from this routine would also be 408 * problematic (the preemption must not occur before the IPI 409 * has been acknowledged via an EOI). Instead, we 410 * intentionally ignore td_owepreempt when leaving the 411 * critical section. This should be harmless because we do 412 * not permit rendezvous action routines to schedule threads, 413 * and thus td_owepreempt should never transition from 0 to 1 414 * during this routine. 415 */ 416 td = curthread; 417 td->td_critnest++; 418#ifdef INVARIANTS 419 owepreempt = td->td_owepreempt; 420#endif 421 422 /* 423 * If requested, run a setup function before the main action 424 * function. Ensure all CPUs have completed the setup 425 * function before moving on to the action function. 426 */ 427 if (local_setup_func != smp_no_rendevous_barrier) { 428 if (smp_rv_setup_func != NULL) 429 smp_rv_setup_func(smp_rv_func_arg); 430 atomic_add_int(&smp_rv_waiters[1], 1); 431 while (smp_rv_waiters[1] < smp_rv_ncpus) 432 cpu_spinwait(); 433 } 434 435 if (local_action_func != NULL) 436 local_action_func(local_func_arg); 437 438 if (local_teardown_func != smp_no_rendevous_barrier) { 439 /* 440 * Signal that the main action has been completed. If a 441 * full exit rendezvous is requested, then all CPUs will 442 * wait here until all CPUs have finished the main action. 443 */ 444 atomic_add_int(&smp_rv_waiters[2], 1); 445 while (smp_rv_waiters[2] < smp_rv_ncpus) 446 cpu_spinwait(); 447 448 if (local_teardown_func != NULL) 449 local_teardown_func(local_func_arg); 450 } 451 452 /* 453 * Signal that the rendezvous is fully completed by this CPU. 454 * This means that no member of smp_rv_* pseudo-structure will be 455 * accessed by this target CPU after this point; in particular, 456 * memory pointed by smp_rv_func_arg. 457 */ 458 atomic_add_int(&smp_rv_waiters[3], 1); 459 460 td->td_critnest--; 461 KASSERT(owepreempt == td->td_owepreempt, 462 ("rendezvous action changed td_owepreempt")); 463} 464 465void 466smp_rendezvous_cpus(cpuset_t map, 467 void (* setup_func)(void *), 468 void (* action_func)(void *), 469 void (* teardown_func)(void *), 470 void *arg) 471{ 472 int curcpumap, i, ncpus = 0; 473 474 /* Look comments in the !SMP case. */ 475 if (!smp_started) { 476 spinlock_enter(); 477 if (setup_func != NULL) 478 setup_func(arg); 479 if (action_func != NULL) 480 action_func(arg); 481 if (teardown_func != NULL) 482 teardown_func(arg); 483 spinlock_exit(); 484 return; 485 } 486 487 CPU_FOREACH(i) { 488 if (CPU_ISSET(i, &map)) 489 ncpus++; 490 } 491 if (ncpus == 0) 492 panic("ncpus is 0 with non-zero map"); 493 494 mtx_lock_spin(&smp_ipi_mtx); 495 496 /* Pass rendezvous parameters via global variables. */ 497 smp_rv_ncpus = ncpus; 498 smp_rv_setup_func = setup_func; 499 smp_rv_action_func = action_func; 500 smp_rv_teardown_func = teardown_func; 501 smp_rv_func_arg = arg; 502 smp_rv_waiters[1] = 0; 503 smp_rv_waiters[2] = 0; 504 smp_rv_waiters[3] = 0; 505 atomic_store_rel_int(&smp_rv_waiters[0], 0); 506 507 /* 508 * Signal other processors, which will enter the IPI with 509 * interrupts off. 510 */ 511 curcpumap = CPU_ISSET(curcpu, &map); 512 CPU_CLR(curcpu, &map); 513 ipi_selected(map, IPI_RENDEZVOUS); 514 515 /* Check if the current CPU is in the map */ 516 if (curcpumap != 0) 517 smp_rendezvous_action(); 518 519 /* 520 * Ensure that the master CPU waits for all the other 521 * CPUs to finish the rendezvous, so that smp_rv_* 522 * pseudo-structure and the arg are guaranteed to not 523 * be in use. 524 */ 525 while (atomic_load_acq_int(&smp_rv_waiters[3]) < ncpus) 526 cpu_spinwait(); 527 528 mtx_unlock_spin(&smp_ipi_mtx); 529} 530 531void 532smp_rendezvous(void (* setup_func)(void *), 533 void (* action_func)(void *), 534 void (* teardown_func)(void *), 535 void *arg) 536{ 537 smp_rendezvous_cpus(all_cpus, setup_func, action_func, teardown_func, arg); 538} 539 540static struct cpu_group group[MAXCPU]; 541 542struct cpu_group * 543smp_topo(void) 544{ 545 char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ]; 546 struct cpu_group *top; 547 548 /* 549 * Check for a fake topology request for debugging purposes. 550 */ 551 switch (smp_topology) { 552 case 1: 553 /* Dual core with no sharing. */ 554 top = smp_topo_1level(CG_SHARE_NONE, 2, 0); 555 break; 556 case 2: 557 /* No topology, all cpus are equal. */ 558 top = smp_topo_none(); 559 break; 560 case 3: 561 /* Dual core with shared L2. */ 562 top = smp_topo_1level(CG_SHARE_L2, 2, 0); 563 break; 564 case 4: 565 /* quad core, shared l3 among each package, private l2. */ 566 top = smp_topo_1level(CG_SHARE_L3, 4, 0); 567 break; 568 case 5: 569 /* quad core, 2 dualcore parts on each package share l2. */ 570 top = smp_topo_2level(CG_SHARE_NONE, 2, CG_SHARE_L2, 2, 0); 571 break; 572 case 6: 573 /* Single-core 2xHTT */ 574 top = smp_topo_1level(CG_SHARE_L1, 2, CG_FLAG_HTT); 575 break; 576 case 7: 577 /* quad core with a shared l3, 8 threads sharing L2. */ 578 top = smp_topo_2level(CG_SHARE_L3, 4, CG_SHARE_L2, 8, 579 CG_FLAG_SMT); 580 break; 581 default: 582 /* Default, ask the system what it wants. */ 583 top = cpu_topo(); 584 break; 585 } 586 /* 587 * Verify the returned topology. 588 */ 589 if (top->cg_count != mp_ncpus) 590 panic("Built bad topology at %p. CPU count %d != %d", 591 top, top->cg_count, mp_ncpus); 592 if (CPU_CMP(&top->cg_mask, &all_cpus)) 593 panic("Built bad topology at %p. CPU mask (%s) != (%s)", 594 top, cpusetobj_strprint(cpusetbuf, &top->cg_mask), 595 cpusetobj_strprint(cpusetbuf2, &all_cpus)); 596 return (top); 597} 598 599struct cpu_group * 600smp_topo_none(void) 601{ 602 struct cpu_group *top; 603 604 top = &group[0]; 605 top->cg_parent = NULL; 606 top->cg_child = NULL; 607 top->cg_mask = all_cpus; 608 top->cg_count = mp_ncpus; 609 top->cg_children = 0; 610 top->cg_level = CG_SHARE_NONE; 611 top->cg_flags = 0; 612 613 return (top); 614} 615 616static int 617smp_topo_addleaf(struct cpu_group *parent, struct cpu_group *child, int share, 618 int count, int flags, int start) 619{ 620 char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ]; 621 cpuset_t mask; 622 int i; 623 624 CPU_ZERO(&mask); 625 for (i = 0; i < count; i++, start++) 626 CPU_SET(start, &mask); 627 child->cg_parent = parent; 628 child->cg_child = NULL; 629 child->cg_children = 0; 630 child->cg_level = share; 631 child->cg_count = count; 632 child->cg_flags = flags; 633 child->cg_mask = mask; 634 parent->cg_children++; 635 for (; parent != NULL; parent = parent->cg_parent) { 636 if (CPU_OVERLAP(&parent->cg_mask, &child->cg_mask)) 637 panic("Duplicate children in %p. mask (%s) child (%s)", 638 parent, 639 cpusetobj_strprint(cpusetbuf, &parent->cg_mask), 640 cpusetobj_strprint(cpusetbuf2, &child->cg_mask)); 641 CPU_OR(&parent->cg_mask, &child->cg_mask); 642 parent->cg_count += child->cg_count; 643 } 644 645 return (start); 646} 647 648struct cpu_group * 649smp_topo_1level(int share, int count, int flags) 650{ 651 struct cpu_group *child; 652 struct cpu_group *top; 653 int packages; 654 int cpu; 655 int i; 656 657 cpu = 0; 658 top = &group[0]; 659 packages = mp_ncpus / count; 660 top->cg_child = child = &group[1]; 661 top->cg_level = CG_SHARE_NONE; 662 for (i = 0; i < packages; i++, child++) 663 cpu = smp_topo_addleaf(top, child, share, count, flags, cpu); 664 return (top); 665} 666 667struct cpu_group * 668smp_topo_2level(int l2share, int l2count, int l1share, int l1count, 669 int l1flags) 670{ 671 struct cpu_group *top; 672 struct cpu_group *l1g; 673 struct cpu_group *l2g; 674 int cpu; 675 int i; 676 int j; 677 678 cpu = 0; 679 top = &group[0]; 680 l2g = &group[1]; 681 top->cg_child = l2g; 682 top->cg_level = CG_SHARE_NONE; 683 top->cg_children = mp_ncpus / (l2count * l1count); 684 l1g = l2g + top->cg_children; 685 for (i = 0; i < top->cg_children; i++, l2g++) { 686 l2g->cg_parent = top; 687 l2g->cg_child = l1g; 688 l2g->cg_level = l2share; 689 for (j = 0; j < l2count; j++, l1g++) 690 cpu = smp_topo_addleaf(l2g, l1g, l1share, l1count, 691 l1flags, cpu); 692 } 693 return (top); 694} 695 696 697struct cpu_group * 698smp_topo_find(struct cpu_group *top, int cpu) 699{ 700 struct cpu_group *cg; 701 cpuset_t mask; 702 int children; 703 int i; 704 705 CPU_SETOF(cpu, &mask); 706 cg = top; 707 for (;;) { 708 if (!CPU_OVERLAP(&cg->cg_mask, &mask)) 709 return (NULL); 710 if (cg->cg_children == 0) 711 return (cg); 712 children = cg->cg_children; 713 for (i = 0, cg = cg->cg_child; i < children; cg++, i++) 714 if (CPU_OVERLAP(&cg->cg_mask, &mask)) 715 break; 716 } 717 return (NULL); 718} 719#else /* !SMP */ 720 721void 722smp_rendezvous_cpus(cpuset_t map, 723 void (*setup_func)(void *), 724 void (*action_func)(void *), 725 void (*teardown_func)(void *), 726 void *arg) 727{ 728 /* 729 * In the !SMP case we just need to ensure the same initial conditions 730 * as the SMP case. 731 */ 732 spinlock_enter(); 733 if (setup_func != NULL) 734 setup_func(arg); 735 if (action_func != NULL) 736 action_func(arg); 737 if (teardown_func != NULL) 738 teardown_func(arg); 739 spinlock_exit(); 740} 741 742void 743smp_rendezvous(void (*setup_func)(void *), 744 void (*action_func)(void *), 745 void (*teardown_func)(void *), 746 void *arg) 747{ 748 749 /* Look comments in the smp_rendezvous_cpus() case. */ 750 spinlock_enter(); 751 if (setup_func != NULL) 752 setup_func(arg); 753 if (action_func != NULL) 754 action_func(arg); 755 if (teardown_func != NULL) 756 teardown_func(arg); 757 spinlock_exit(); 758} 759 760/* 761 * Provide dummy SMP support for UP kernels. Modules that need to use SMP 762 * APIs will still work using this dummy support. 763 */ 764static void 765mp_setvariables_for_up(void *dummy) 766{ 767 mp_ncpus = 1; 768 mp_maxid = PCPU_GET(cpuid); 769 CPU_SETOF(mp_maxid, &all_cpus); 770 KASSERT(PCPU_GET(cpuid) == 0, ("UP must have a CPU ID of zero")); 771} 772SYSINIT(cpu_mp_setvariables, SI_SUB_TUNABLES, SI_ORDER_FIRST, 773 mp_setvariables_for_up, NULL); 774#endif /* SMP */ 775 776void 777smp_no_rendevous_barrier(void *dummy) 778{ 779#ifdef SMP 780 KASSERT((!smp_started),("smp_no_rendevous called and smp is started")); 781#endif 782} 783 784/* 785 * Wait specified idle threads to switch once. This ensures that even 786 * preempted threads have cycled through the switch function once, 787 * exiting their codepaths. This allows us to change global pointers 788 * with no other synchronization. 789 */ 790int 791quiesce_cpus(cpuset_t map, const char *wmesg, int prio) 792{ 793 struct pcpu *pcpu; 794 u_int gen[MAXCPU]; 795 int error; 796 int cpu; 797 798 error = 0; 799 for (cpu = 0; cpu <= mp_maxid; cpu++) { 800 if (!CPU_ISSET(cpu, &map) || CPU_ABSENT(cpu)) 801 continue; 802 pcpu = pcpu_find(cpu); 803 gen[cpu] = pcpu->pc_idlethread->td_generation; 804 } 805 for (cpu = 0; cpu <= mp_maxid; cpu++) { 806 if (!CPU_ISSET(cpu, &map) || CPU_ABSENT(cpu)) 807 continue; 808 pcpu = pcpu_find(cpu); 809 thread_lock(curthread); 810 sched_bind(curthread, cpu); 811 thread_unlock(curthread); 812 while (gen[cpu] == pcpu->pc_idlethread->td_generation) { 813 error = tsleep(quiesce_cpus, prio, wmesg, 1); 814 if (error != EWOULDBLOCK) 815 goto out; 816 error = 0; 817 } 818 } 819out: 820 thread_lock(curthread); 821 sched_unbind(curthread); 822 thread_unlock(curthread); 823 824 return (error); 825} 826 827int 828quiesce_all_cpus(const char *wmesg, int prio) 829{ 830 831 return quiesce_cpus(all_cpus, wmesg, prio); 832}
|