mp_machdep.c revision 72358
1/*- 2 * Copyright (c) 2000 Doug Rabson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: head/sys/powerpc/powerpc/mp_machdep.c 72358 2001-02-11 10:44:09Z markm $ 27 */ 28 29#include <sys/param.h> 30#include <sys/systm.h> 31#include <sys/ktr.h> 32#include <sys/proc.h> 33#include <sys/lock.h> 34#include <sys/malloc.h> 35#include <sys/mutex.h> 36#include <sys/kernel.h> 37#include <sys/sysctl.h> 38 39#include <vm/vm.h> 40#include <vm/pmap.h> 41#include <vm/vm_map.h> 42#include <sys/user.h> 43#include <sys/dkstat.h> 44 45#include <machine/smp.h> 46#include <machine/atomic.h> 47#include <machine/globaldata.h> 48#include <machine/pmap.h> 49#include <machine/rpb.h> 50#include <machine/clock.h> 51 52volatile u_int stopped_cpus; 53volatile u_int started_cpus; 54volatile u_int checkstate_probed_cpus; 55volatile u_int checkstate_need_ast; 56volatile u_int checkstate_pending_ast; 57struct proc* checkstate_curproc[MAXCPU]; 58int checkstate_cpustate[MAXCPU]; 59u_long checkstate_pc[MAXCPU]; 60volatile u_int resched_cpus; 61void (*cpustop_restartfunc) __P((void)); 62int mp_ncpus; 63 64int smp_started; 65int boot_cpu_id; 66u_int32_t all_cpus; 67 68static struct globaldata *cpuno_to_globaldata[MAXCPU]; 69 70int smp_active = 0; /* are the APs allowed to run? */ 71SYSCTL_INT(_machdep, OID_AUTO, smp_active, CTLFLAG_RW, &smp_active, 0, ""); 72 73/* Is forwarding of a interrupt to the CPU holding the ISR lock enabled ? */ 74int forward_irq_enabled = 1; 75SYSCTL_INT(_machdep, OID_AUTO, forward_irq_enabled, CTLFLAG_RW, 76 &forward_irq_enabled, 0, ""); 77 78/* Enable forwarding of a signal to a process running on a different CPU */ 79static int forward_signal_enabled = 1; 80SYSCTL_INT(_machdep, OID_AUTO, forward_signal_enabled, CTLFLAG_RW, 81 &forward_signal_enabled, 0, ""); 82 83/* Enable forwarding of roundrobin to all other cpus */ 84static int forward_roundrobin_enabled = 1; 85SYSCTL_INT(_machdep, OID_AUTO, forward_roundrobin_enabled, CTLFLAG_RW, 86 &forward_roundrobin_enabled, 0, ""); 87 88/* 89 * Communicate with a console running on a secondary processor. 90 * Return 1 on failure. 91 */ 92static int 93smp_send_secondary_command(const char *command, int cpuno) 94{ 95 u_int64_t mask; 96 97 mask = 1L << cpuno; 98 struct pcs *cpu = LOCATE_PCS(hwrpb, cpuno); 99 int i, len; 100 101 /* 102 * Sanity check. 103 */ 104 len = strlen(command); 105 if (len > sizeof(cpu->pcs_buffer.rxbuf)) { 106 printf("smp_send_secondary_command: command '%s' too long\n", 107 command); 108 return 0; 109 } 110 111 /* 112 * Wait for the rx bit to clear. 113 */ 114 for (i = 0; i < 100000; i++) { 115 if (!(hwrpb->rpb_rxrdy & mask)) 116 break; 117 DELAY(10); 118 } 119 if (hwrpb->rpb_rxrdy & mask) 120 return 0; 121 122 /* 123 * Write the command into the processor's buffer. 124 */ 125 bcopy(command, cpu->pcs_buffer.rxbuf, len); 126 cpu->pcs_buffer.rxlen = len; 127 128 /* 129 * Set the bit in the rxrdy mask and let the secondary try to 130 * handle the command. 131 */ 132 atomic_set_64(&hwrpb->rpb_rxrdy, mask); 133 134 /* 135 * Wait for the rx bit to clear. 136 */ 137 for (i = 0; i < 100000; i++) { 138 if (!(hwrpb->rpb_rxrdy & mask)) 139 break; 140 DELAY(10); 141 } 142 if (hwrpb->rpb_rxrdy & mask) 143 return 0; 144 145 return 1; 146} 147 148void 149smp_init_secondary(void) 150{ 151 152 mtx_lock(&Giant); 153 154 printf("smp_init_secondary: called\n"); 155 CTR0(KTR_SMP, "smp_init_secondary"); 156 157 /* 158 * Add to mask. 159 */ 160 smp_started = 1; 161 if (PCPU_GET(cpuno) + 1 > mp_ncpus) 162 mp_ncpus = PCPU_GET(cpuno) + 1; 163 spl0(); 164 165 mtx_unlock(&Giant); 166} 167 168extern void smp_init_secondary_glue(void); 169 170static int 171smp_start_secondary(int cpuno) 172{ 173 174 printf("smp_start_secondary: starting cpu %d\n", cpuno); 175 176 sz = round_page(UPAGES * PAGE_SIZE); 177 globaldata = malloc(sz, M_TEMP, M_NOWAIT); 178 if (!globaldata) { 179 printf("smp_start_secondary: can't allocate memory\n"); 180 return 0; 181 } 182 183 globaldata_init(globaldata, cpuno, sz); 184 185 /* 186 * Fire it up and hope for the best. 187 */ 188 if (!smp_send_secondary_command("START\r\n", cpuno)) { 189 printf("smp_init_secondary: can't send START command\n"); 190 free(globaldata, M_TEMP); 191 return 0; 192 } 193 194 /* 195 * It worked (I think). 196 */ 197 /* if (bootverbose) */ 198 printf("smp_init_secondary: cpu %d started\n", cpuno); 199 200 return 1; 201} 202 203/* 204 * Initialise a struct globaldata. 205 */ 206void 207globaldata_init(struct globaldata *globaldata, int cpuno, size_t sz) 208{ 209 210 bzero(globaldata, sz); 211 globaldata->gd_idlepcbphys = vtophys((vm_offset_t) &globaldata->gd_idlepcb); 212 globaldata->gd_idlepcb.apcb_ksp = (u_int64_t) 213 ((caddr_t) globaldata + sz - sizeof(struct trapframe)); 214 globaldata->gd_idlepcb.apcb_ptbr = proc0.p_addr->u_pcb.pcb_hw.apcb_ptbr; 215 globaldata->gd_cpuno = cpuno; 216 globaldata->gd_other_cpus = all_cpus & ~(1 << cpuno); 217 globaldata->gd_next_asn = 0; 218 globaldata->gd_current_asngen = 1; 219 globaldata->gd_cpuid = cpuno; 220 cpuno_to_globaldata[cpuno] = globaldata; 221} 222 223struct globaldata * 224globaldata_find(int cpuno) 225{ 226 227 return cpuno_to_globaldata[cpuno]; 228} 229 230/* Other stuff */ 231 232/* lock around the MP rendezvous */ 233static struct mtx smp_rv_mtx; 234 235/* only 1 CPU can panic at a time :) */ 236struct mtx panic_mtx; 237 238static void 239init_locks(void) 240{ 241 242 mtx_init(&smp_rv_mtx, "smp rendezvous", MTX_SPIN); 243 mtx_init(&panic_mtx, "panic", MTX_DEF); 244} 245 246void 247mp_start() 248{ 249} 250 251void 252mp_announce() 253{ 254} 255 256void 257smp_invltlb() 258{ 259} 260 261 262/* 263 * When called the executing CPU will send an IPI to all other CPUs 264 * requesting that they halt execution. 265 * 266 * Usually (but not necessarily) called with 'other_cpus' as its arg. 267 * 268 * - Signals all CPUs in map to stop. 269 * - Waits for each to stop. 270 * 271 * Returns: 272 * -1: error 273 * 0: NA 274 * 1: ok 275 * 276 * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs 277 * from executing at same time. 278 */ 279int 280stop_cpus(u_int map) 281{ 282 int i; 283 284 if (!smp_started) 285 return 0; 286 287 CTR1(KTR_SMP, "stop_cpus(%x)", map); 288 289 i = 0; 290 while ((stopped_cpus & map) != map) { 291 /* spin */ 292 i++; 293 if (i == 100000) { 294 printf("timeout stopping cpus\n"); 295 break; 296 } 297 } 298 299 printf("stopped_cpus=%x\n", stopped_cpus); 300 301 return 1; 302} 303 304 305/* 306 * Called by a CPU to restart stopped CPUs. 307 * 308 * Usually (but not necessarily) called with 'stopped_cpus' as its arg. 309 * 310 * - Signals all CPUs in map to restart. 311 * - Waits for each to restart. 312 * 313 * Returns: 314 * -1: error 315 * 0: NA 316 * 1: ok 317 */ 318int 319restart_cpus(u_int map) 320{ 321 322 if (!smp_started) 323 return 0; 324 325 CTR1(KTR_SMP, "restart_cpus(%x)", map); 326 327 started_cpus = map; /* signal other cpus to restart */ 328 329 while ((stopped_cpus & map) != 0) /* wait for each to clear its bit */ 330 ; 331 332 return 1; 333} 334 335/* 336 * All-CPU rendezvous. CPUs are signalled, all execute the setup function 337 * (if specified), rendezvous, execute the action function (if specified), 338 * rendezvous again, execute the teardown function (if specified), and then 339 * resume. 340 * 341 * Note that the supplied external functions _must_ be reentrant and aware 342 * that they are running in parallel and in an unknown lock context. 343 */ 344static void (*smp_rv_setup_func)(void *arg); 345static void (*smp_rv_action_func)(void *arg); 346static void (*smp_rv_teardown_func)(void *arg); 347static void *smp_rv_func_arg; 348static volatile int smp_rv_waiters[2]; 349 350void 351smp_rendezvous_action(void) 352{ 353 354 /* setup function */ 355 if (smp_rv_setup_func != NULL) 356 smp_rv_setup_func(smp_rv_func_arg); 357 /* spin on entry rendezvous */ 358 atomic_add_int(&smp_rv_waiters[0], 1); 359 while (smp_rv_waiters[0] < mp_ncpus) 360 ; 361 /* action function */ 362 if (smp_rv_action_func != NULL) 363 smp_rv_action_func(smp_rv_func_arg); 364 /* spin on exit rendezvous */ 365 atomic_add_int(&smp_rv_waiters[1], 1); 366 while (smp_rv_waiters[1] < mp_ncpus) 367 ; 368 /* teardown function */ 369 if (smp_rv_teardown_func != NULL) 370 smp_rv_teardown_func(smp_rv_func_arg); 371} 372 373void 374smp_rendezvous(void (* setup_func)(void *), 375 void (* action_func)(void *), 376 void (* teardown_func)(void *), 377 void *arg) 378{ 379 380 /* obtain rendezvous lock */ 381 mtx_lock_spin(&smp_rv_mtx); 382 383 /* set static function pointers */ 384 smp_rv_setup_func = setup_func; 385 smp_rv_action_func = action_func; 386 smp_rv_teardown_func = teardown_func; 387 smp_rv_func_arg = arg; 388 smp_rv_waiters[0] = 0; 389 smp_rv_waiters[1] = 0; 390 391 /* call executor function */ 392 smp_rendezvous_action(); 393 394 /* release lock */ 395 mtx_unlock_spin(&smp_rv_mtx); 396} 397 398static u_int64_t 399atomic_readandclear(u_int64_t* p) 400{ 401 u_int64_t v, temp; 402 403 __asm__ __volatile__ ( 404 : "=&r"(v), "=&r"(temp), "=m" (*p) 405 : "m"(*p) 406 : "memory"); 407 return v; 408} 409