mp_startup.c revision 3434:5142e1d7d0bc
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27#pragma ident	"%Z%%M%	%I%	%E% SMI"
28
29#include <sys/sysmacros.h>
30#include <sys/prom_plat.h>
31#include <sys/prom_debug.h>
32#include <vm/hat_sfmmu.h>
33#include <vm/seg_kp.h>
34#include <vm/seg_kmem.h>
35#include <sys/machsystm.h>
36#include <sys/callb.h>
37#include <sys/cpu_module.h>
38#include <sys/pg.h>
39#include <sys/cmt.h>
40#include <sys/dtrace.h>
41#include <sys/reboot.h>
42#include <sys/kdi.h>
43#include <sys/traptrace.h>
44#ifdef TRAPTRACE
45#include <sys/bootconf.h>
46#endif /* TRAPTRACE */
47#include <sys/cpu_sgnblk_defs.h>
48
49extern void cpu_intrq_setup(struct cpu *);
50extern void cpu_intrq_cleanup(struct cpu *);
51extern void cpu_intrq_register(struct cpu *);
52
53struct cpu	*cpus;	/* pointer to other cpus; dynamically allocate */
54struct cpu	*cpu[NCPU];	/* pointers to all CPUs */
55uint64_t	cpu_pa[NCPU];	/* pointers to all CPUs in PA */
56cpu_core_t	cpu_core[NCPU];	/* cpu_core structures */
57
58#ifdef TRAPTRACE
59caddr_t	ttrace_buf;	/* bop alloced traptrace for all cpus except 0 */
60#endif /* TRAPTRACE */
61
62/* bit mask of cpus ready for x-calls, protected by cpu_lock */
63cpuset_t cpu_ready_set;
64
65/* bit mask used to communicate with cpus during bringup */
66static cpuset_t proxy_ready_set;
67
68static void	slave_startup(void);
69
70/*
71 * Defined in $KARCH/os/mach_mp_startup.c
72 */
73#pragma weak init_cpu_info
74
75/*
76 * Amount of time (in milliseconds) we should wait before giving up on CPU
77 * initialization and assuming that the CPU we're trying to wake up is dead
78 * or out of control.
79 */
80#define	CPU_WAKEUP_GRACE_MSEC 1000
81
82extern hrtime_t nosteal_nsec;
83extern void cmp_set_nosteal_interval(void);
84
85#ifdef	TRAPTRACE
86/*
87 * This function bop allocs traptrace buffers for all cpus
88 * other than boot cpu.
89 */
90caddr_t
91trap_trace_alloc(caddr_t base)
92{
93	caddr_t	vaddr;
94	extern int max_ncpus;
95
96	if (max_ncpus == 1) {
97		return (base);
98	}
99
100	if ((vaddr = (caddr_t)BOP_ALLOC(bootops, base, (TRAP_TSIZE *
101		(max_ncpus - 1)), TRAP_TSIZE)) == NULL) {
102		panic("traptrace_alloc: can't bop alloc");
103	}
104	ttrace_buf = vaddr;
105	PRM_DEBUG(ttrace_buf);
106	return (vaddr + (TRAP_TSIZE * (max_ncpus - 1)));
107}
108#endif	/* TRAPTRACE */
109
110/*
111 * common slave cpu initialization code
112 */
113void
114common_startup_init(cpu_t *cp, int cpuid)
115{
116	kthread_id_t tp;
117	sfmmu_t *sfmmup;
118	caddr_t	sp;
119
120	/*
121	 * Allocate and initialize the startup thread for this CPU.
122	 */
123	tp = thread_create(NULL, 0, slave_startup, NULL, 0, &p0,
124	    TS_STOPPED, maxclsyspri);
125
126	/*
127	 * Set state to TS_ONPROC since this thread will start running
128	 * as soon as the CPU comes online.
129	 *
130	 * All the other fields of the thread structure are setup by
131	 * thread_create().
132	 */
133	THREAD_ONPROC(tp, cp);
134	tp->t_preempt = 1;
135	tp->t_bound_cpu = cp;
136	tp->t_affinitycnt = 1;
137	tp->t_cpu = cp;
138	tp->t_disp_queue = cp->cpu_disp;
139
140	sfmmup = astosfmmu(&kas);
141	CPUSET_ADD(sfmmup->sfmmu_cpusran, cpuid);
142
143	/*
144	 * Setup thread to start in slave_startup.
145	 */
146	sp = tp->t_stk;
147	tp->t_pc = (uintptr_t)slave_startup - 8;
148	tp->t_sp = (uintptr_t)((struct rwindow *)sp - 1) - STACK_BIAS;
149
150	cp->cpu_id = cpuid;
151	cp->cpu_self = cp;
152	cp->cpu_thread = tp;
153	cp->cpu_lwp = NULL;
154	cp->cpu_dispthread = tp;
155	cp->cpu_dispatch_pri = DISP_PRIO(tp);
156	cp->cpu_startup_thread = tp;
157}
158
159/*
160 * parametric flag setting functions.  these routines set the cpu
161 * state just prior to releasing the slave cpu.
162 */
163void
164cold_flag_set(int cpuid)
165{
166	cpu_t *cp;
167
168	ASSERT(MUTEX_HELD(&cpu_lock));
169
170	cp = cpu[cpuid];
171	cp->cpu_flags |= CPU_RUNNING | CPU_ENABLE | CPU_EXISTS;
172	cpu_add_active(cp);
173	/*
174	 * Add CPU_READY after the cpu_add_active() call
175	 * to avoid pausing cp.
176	 */
177	cp->cpu_flags |= CPU_READY;		/* ready */
178	cpu_set_state(cp);
179}
180
181static void
182warm_flag_set(int cpuid)
183{
184	cpu_t *cp;
185
186	ASSERT(MUTEX_HELD(&cpu_lock));
187
188	/*
189	 * warm start activates cpus into the OFFLINE state
190	 */
191	cp = cpu[cpuid];
192	cp->cpu_flags |= CPU_RUNNING | CPU_READY | CPU_EXISTS
193		| CPU_OFFLINE | CPU_QUIESCED;
194	cpu_set_state(cp);
195}
196
197/*
198 * Internal cpu startup sequencer
199 * The sequence is as follows:
200 *
201 * MASTER	SLAVE
202 * -------	----------
203 * assume the kernel data is initialized
204 * clear the proxy bit
205 * start the slave cpu
206 * wait for the slave cpu to set the proxy
207 *
208 *		the slave runs slave_startup and then sets the proxy
209 *		the slave waits for the master to add slave to the ready set
210 *
211 * the master finishes the initialization and
212 * adds the slave to the ready set
213 *
214 *		the slave exits the startup thread and is running
215 */
216void
217start_cpu(int cpuid, void(*flag_func)(int))
218{
219	extern void cpu_startup(int);
220	int timout;
221
222	ASSERT(MUTEX_HELD(&cpu_lock));
223
224	/*
225	 * Before we begin the dance, tell DTrace that we're about to start
226	 * a CPU.
227	 */
228	if (dtrace_cpustart_init != NULL)
229		(*dtrace_cpustart_init)();
230
231	/* start the slave cpu */
232	CPUSET_DEL(proxy_ready_set, cpuid);
233	if (prom_test("SUNW,start-cpu-by-cpuid") == 0) {
234		(void) prom_startcpu_bycpuid(cpuid, (caddr_t)&cpu_startup,
235		    cpuid);
236	} else {
237		/* "by-cpuid" interface didn't exist.  Do it the old way */
238		pnode_t nodeid = cpunodes[cpuid].nodeid;
239
240		ASSERT(nodeid != (pnode_t)0);
241		(void) prom_startcpu(nodeid, (caddr_t)&cpu_startup, cpuid);
242	}
243
244	/* wait for the slave cpu to check in. */
245	for (timout = CPU_WAKEUP_GRACE_MSEC; timout; timout--) {
246		if (CPU_IN_SET(proxy_ready_set, cpuid))
247			break;
248		DELAY(1000);
249	}
250	if (timout == 0) {
251		panic("cpu%d failed to start (2)", cpuid);
252	}
253
254	/*
255	 * The slave has started; we can tell DTrace that it's safe again.
256	 */
257	if (dtrace_cpustart_fini != NULL)
258		(*dtrace_cpustart_fini)();
259
260	/* run the master side of stick synchronization for the slave cpu */
261	sticksync_master();
262
263	/*
264	 * deal with the cpu flags in a phase-specific manner
265	 * for various reasons, this needs to run after the slave
266	 * is checked in but before the slave is released.
267	 */
268	(*flag_func)(cpuid);
269
270	/* release the slave */
271	CPUSET_ADD(cpu_ready_set, cpuid);
272}
273
274#ifdef TRAPTRACE
275int trap_tr0_inuse = 1;	/* it is always used on the boot cpu */
276int trap_trace_inuse[NCPU];
277#endif /* TRAPTRACE */
278
279#define	cpu_next_free	cpu_prev
280
281/*
282 * Routine to set up a CPU to prepare for starting it up.
283 */
284void
285setup_cpu_common(int cpuid)
286{
287	struct cpu *cp = NULL;
288	kthread_id_t tp;
289#ifdef TRAPTRACE
290	int tt_index;
291	TRAP_TRACE_CTL	*ctlp;
292	caddr_t	newbuf;
293#endif /* TRAPTRACE */
294
295	extern void idle();
296
297	ASSERT(MUTEX_HELD(&cpu_lock));
298	ASSERT(cpu[cpuid] == NULL);
299
300	ASSERT(ncpus <= max_ncpus);
301
302#ifdef TRAPTRACE
303	/*
304	 * allocate a traptrace buffer for this CPU.
305	 */
306	ctlp = &trap_trace_ctl[cpuid];
307	if (!trap_tr0_inuse) {
308		trap_tr0_inuse = 1;
309		newbuf = trap_tr0;
310		tt_index = -1;
311	} else {
312		for (tt_index = 0; tt_index < (max_ncpus-1); tt_index++)
313			if (!trap_trace_inuse[tt_index])
314			    break;
315		ASSERT(tt_index < max_ncpus - 1);
316		trap_trace_inuse[tt_index] = 1;
317		newbuf = (caddr_t)(ttrace_buf + (tt_index * TRAP_TSIZE));
318	}
319	ctlp->d.vaddr_base = newbuf;
320	ctlp->d.offset = ctlp->d.last_offset = 0;
321	ctlp->d.limit = trap_trace_bufsize;
322	ctlp->d.paddr_base = va_to_pa(newbuf);
323	ASSERT(ctlp->d.paddr_base != (uint64_t)-1);
324#endif /* TRAPTRACE */
325	/*
326	 * initialize hv traptrace buffer for this CPU
327	 */
328	mach_htraptrace_setup(cpuid);
329
330	/*
331	 * Obtain pointer to the appropriate cpu structure.
332	 */
333	if (cpu0.cpu_flags == 0) {
334		cp = &cpu0;
335	} else {
336		/*
337		 *  When dynamically allocating cpu structs,
338		 *  cpus is used as a pointer to a list of freed
339		 *  cpu structs.
340		 */
341		if (cpus) {
342			/* grab the first cpu struct on the free list */
343			cp = cpus;
344			if (cp->cpu_next_free)
345				cpus = cp->cpu_next_free;
346			else
347				cpus = NULL;
348		}
349	}
350
351	if (cp == NULL)
352		cp = vmem_xalloc(static_alloc_arena, CPU_ALLOC_SIZE,
353		    CPU_ALLOC_SIZE, 0, 0, NULL, NULL, VM_SLEEP);
354
355	bzero(cp, sizeof (*cp));
356
357	cp->cpu_id = cpuid;
358	cp->cpu_self = cp;
359
360	/*
361	 * Initialize ptl1_panic stack
362	 */
363	ptl1_init_cpu(cp);
364
365	/*
366	 * Initialize the dispatcher for this CPU.
367	 */
368	disp_cpu_init(cp);
369
370	cpu_vm_data_init(cp);
371
372	/*
373	 * Now, initialize per-CPU idle thread for this CPU.
374	 */
375	tp = thread_create(NULL, 0, idle, NULL, 0, &p0, TS_ONPROC, -1);
376
377	cp->cpu_idle_thread = tp;
378
379	tp->t_preempt = 1;
380	tp->t_bound_cpu = cp;
381	tp->t_affinitycnt = 1;
382	tp->t_cpu = cp;
383	tp->t_disp_queue = cp->cpu_disp;
384
385	/*
386	 * Registering a thread in the callback table is usually
387	 * done in the initialization code of the thread. In this
388	 * case, we do it right after thread creation to avoid
389	 * blocking idle thread while registering itself. It also
390	 * avoids the possibility of reregistration in case a CPU
391	 * restarts its idle thread.
392	 */
393	CALLB_CPR_INIT_SAFE(tp, "idle");
394
395	init_cpu_info(cp);
396
397	/*
398	 * Initialize the interrupt threads for this CPU
399	 */
400	cpu_intr_alloc(cp, NINTR_THREADS);
401
402	/*
403	 * Add CPU to list of available CPUs.
404	 * It'll be on the active list after it is started.
405	 */
406	cpu_add_unit(cp);
407
408	/*
409	 * Allocate and init cpu module private data structures,
410	 * including scrubber.
411	 */
412	cpu_init_private(cp);
413
414	/*
415	 * Initialize the CPUs physical ID cache, and processor groups
416	 */
417	pghw_physid_create(cp);
418	pg_cpu_init(cp);
419
420	if (nosteal_nsec == -1)
421		cmp_set_nosteal_interval();
422
423	cpu_intrq_setup(cp);
424
425	/*
426	 * Initialize MMU context domain information.
427	 */
428	sfmmu_cpu_init(cp);
429
430}
431
432/*
433 * Routine to clean up a CPU after shutting it down.
434 */
435int
436cleanup_cpu_common(int cpuid)
437{
438	struct cpu *cp;
439#ifdef TRAPTRACE
440	int i;
441	TRAP_TRACE_CTL	*ctlp;
442	caddr_t	newbuf;
443#endif /* TRAPTRACE */
444
445	ASSERT(MUTEX_HELD(&cpu_lock));
446	ASSERT(cpu[cpuid] != NULL);
447
448	cp = cpu[cpuid];
449
450	/* Free cpu module private data structures, including scrubber. */
451	cpu_uninit_private(cp);
452
453	/* Free cpu ID string and brand string. */
454	kmem_free(cp->cpu_idstr, strlen(cp->cpu_idstr) + 1);
455	kmem_free(cp->cpu_brandstr, strlen(cp->cpu_brandstr) + 1);
456
457	cpu_vm_data_destroy(cp);
458
459	/*
460	 * Remove CPU from list of available CPUs.
461	 */
462	cpu_del_unit(cpuid);
463
464	/*
465	 * Clean any machine specific interrupt states.
466	 */
467	cpu_intrq_cleanup(cp);
468
469	/*
470	 * At this point, the only threads bound to this CPU should be
471	 * special per-cpu threads: it's idle thread, it's pause thread,
472	 * and it's interrupt threads.  Clean these up.
473	 */
474	cpu_destroy_bound_threads(cp);
475
476	/*
477	 * Free the interrupt stack.
478	 */
479	segkp_release(segkp, cp->cpu_intr_stack);
480
481	/*
482	 * Free hv traptrace buffer for this CPU.
483	 */
484	mach_htraptrace_cleanup(cpuid);
485#ifdef TRAPTRACE
486	/*
487	 * Free the traptrace buffer for this CPU.
488	 */
489	ctlp = &trap_trace_ctl[cpuid];
490	newbuf = ctlp->d.vaddr_base;
491	i = (newbuf - ttrace_buf) / (TRAP_TSIZE);
492	if (((newbuf - ttrace_buf) % (TRAP_TSIZE) == 0) &&
493	    ((i >= 0) && (i < (max_ncpus-1)))) {
494		/*
495		 * This CPU got it's trap trace buffer from the
496		 * boot-alloc'd bunch of them.
497		 */
498		trap_trace_inuse[i] = 0;
499		bzero(newbuf, (TRAP_TSIZE));
500	} else if (newbuf == trap_tr0) {
501		trap_tr0_inuse = 0;
502		bzero(trap_tr0, (TRAP_TSIZE));
503	} else {
504		cmn_err(CE_WARN, "failed to free trap trace buffer from cpu%d",
505		    cpuid);
506	}
507	bzero(ctlp, sizeof (*ctlp));
508#endif /* TRAPTRACE */
509
510	/*
511	 * There is a race condition with mutex_vector_enter() which
512	 * caches a cpu pointer. The race is detected by checking cpu_next.
513	 */
514	disp_cpu_fini(cp);
515	cpu_pa[cpuid] = 0;
516	sfmmu_cpu_cleanup(cp);
517	bzero(cp, sizeof (*cp));
518
519	/*
520	 * Place the freed cpu structure on the list of freed cpus.
521	 */
522	if (cp != &cpu0) {
523		if (cpus) {
524			cp->cpu_next_free = cpus;
525			cpus = cp;
526		}
527		else
528			cpus = cp;
529	}
530
531	return (0);
532}
533
534/*
535 * This routine is used to start a previously powered off processor.
536 * Note that restarted cpus are initialized into the offline state.
537 */
538void
539restart_other_cpu(int cpuid)
540{
541	struct cpu *cp;
542	kthread_id_t tp;
543	caddr_t	sp;
544	extern void idle();
545
546	ASSERT(MUTEX_HELD(&cpu_lock));
547	ASSERT(cpuid < NCPU && cpu[cpuid] != NULL);
548
549	/*
550	 * Obtain pointer to the appropriate cpu structure.
551	 */
552	cp = cpu[cpuid];
553
554	common_startup_init(cp, cpuid);
555
556	/*
557	 * idle thread t_lock is held when the idle thread is suspended.
558	 * Manually unlock the t_lock of idle loop so that we can resume
559	 * the suspended idle thread.
560	 * Also adjust the PC of idle thread for re-retry.
561	 */
562	cp->cpu_intr_actv = 0;	/* clear the value from previous life */
563	cp->cpu_m.mutex_ready = 0; /* we are not ready yet */
564	lock_clear(&cp->cpu_idle_thread->t_lock);
565	tp = cp->cpu_idle_thread;
566
567	sp = tp->t_stk;
568	tp->t_sp = (uintptr_t)((struct rwindow *)sp - 1) - STACK_BIAS;
569	tp->t_pc = (uintptr_t)idle - 8;
570
571	/*
572	 * restart the cpu now
573	 */
574	promsafe_pause_cpus();
575	start_cpu(cpuid, warm_flag_set);
576	start_cpus();
577
578	/* call cmn_err outside pause_cpus/start_cpus to avoid deadlock */
579	cmn_err(CE_CONT, "!cpu%d initialization complete - restarted\n",
580	    cpuid);
581}
582
583/*
584 * Startup function executed on 'other' CPUs.  This is the first
585 * C function after cpu_start sets up the cpu registers.
586 */
587static void
588slave_startup(void)
589{
590	struct cpu	*cp = CPU;
591	ushort_t	original_flags = cp->cpu_flags;
592
593	mach_htraptrace_configure(cp->cpu_id);
594	cpu_intrq_register(CPU);
595	cp->cpu_m.mutex_ready = 1;
596	cp->cpu_m.poke_cpu_outstanding = B_FALSE;
597
598	/* acknowledge that we are done with initialization */
599	CPUSET_ADD(proxy_ready_set, cp->cpu_id);
600
601	/* synchronize STICK */
602	sticksync_slave();
603
604	if (boothowto & RB_DEBUG)
605		kdi_dvec_cpu_init(cp);
606
607	/*
608	 * the slave will wait here forever -- assuming that the master
609	 * will get back to us.  if it doesn't we've got bigger problems
610	 * than a master not replying to this slave.
611	 * the small delay improves the slave's responsiveness to the
612	 * master's ack and decreases the time window between master and
613	 * slave operations.
614	 */
615	while (!CPU_IN_SET(cpu_ready_set, cp->cpu_id))
616		DELAY(1);
617
618	/* enable interrupts */
619	(void) spl0();
620
621	/*
622	 * Signature block update to indicate that this CPU is in OS now.
623	 * This needs to be done after the PIL is lowered since on
624	 * some platforms the update code may block.
625	 */
626	CPU_SIGNATURE(OS_SIG, SIGST_RUN, SIGSUBST_NULL, cp->cpu_id);
627
628	/*
629	 * park the slave thread in a safe/quiet state and wait for the master
630	 * to finish configuring this CPU before proceeding to thread_exit().
631	 */
632	while (((volatile ushort_t)cp->cpu_flags) & CPU_QUIESCED)
633		DELAY(1);
634
635	/*
636	 * Initialize CPC CPU state.
637	 */
638	kcpc_hw_startup_cpu(original_flags);
639
640	/*
641	 * Notify the PG subsystem that the CPU  has started
642	 */
643	pg_cmt_cpu_startup(CPU);
644
645	/*
646	 * Now we are done with the startup thread, so free it up.
647	 */
648	thread_exit();
649	cmn_err(CE_PANIC, "slave_startup: cannot return");
650	/*NOTREACHED*/
651}
652
653extern struct cpu	*cpu[NCPU];	/* pointers to all CPUs */
654
655extern void setup_cpu_common(int);
656extern void common_startup_init(cpu_t *, int);
657extern void start_cpu(int, void(*func)(int));
658extern void cold_flag_set(int cpuid);
659
660/*
661 * cpu_bringup_set is a tunable (via /etc/system, debugger, etc.) that
662 * can be used during debugging to control which processors are brought
663 * online at boot time.  The variable represents a bitmap of the id's
664 * of the processors that will be brought online.  The initialization
665 * of this variable depends on the type of cpuset_t, which varies
666 * depending on the number of processors supported (see cpuvar.h).
667 */
668cpuset_t cpu_bringup_set;
669
670
671/*
672 * Generic start-all cpus entry.  Typically used during cold initialization.
673 * Note that cold start cpus are initialized into the online state.
674 */
675/*ARGSUSED*/
676void
677start_other_cpus(int flag)
678{
679	int cpuid;
680	extern void idlestop_init(void);
681	int bootcpu;
682
683	/*
684	 * Check if cpu_bringup_set has been explicitly set before
685	 * initializing it.
686	 */
687	if (CPUSET_ISNULL(cpu_bringup_set)) {
688#ifdef MPSAS
689		/* just CPU 0 */
690		CPUSET_ADD(cpu_bringup_set, 0);
691#else
692		CPUSET_ALL(cpu_bringup_set);
693#endif
694	}
695
696	if (&cpu_feature_init)
697		cpu_feature_init();
698
699	/*
700	 * Initialize CPC.
701	 */
702	kcpc_hw_init();
703
704	mutex_enter(&cpu_lock);
705
706	/*
707	 * Initialize our own cpu_info.
708	 */
709	init_cpu_info(CPU);
710
711	/*
712	 * Initialize CPU 0 cpu module private data area, including scrubber.
713	 */
714	cpu_init_private(CPU);
715
716	/*
717	 * perform such initialization as is needed
718	 * to be able to take CPUs on- and off-line.
719	 */
720	cpu_pause_init();
721	xc_init();		/* initialize processor crosscalls */
722	idlestop_init();
723
724	if (!use_mp) {
725		mutex_exit(&cpu_lock);
726		cmn_err(CE_CONT, "?***** Not in MP mode\n");
727		return;
728	}
729	/*
730	 * should we be initializing this cpu?
731	 */
732	bootcpu = getprocessorid();
733
734	/*
735	 * launch all the slave cpus now
736	 */
737	for (cpuid = 0; cpuid < NCPU; cpuid++) {
738		pnode_t nodeid = cpunodes[cpuid].nodeid;
739
740		if (nodeid == (pnode_t)0)
741			continue;
742
743		if (cpuid == bootcpu) {
744			if (!CPU_IN_SET(cpu_bringup_set, cpuid)) {
745				cmn_err(CE_WARN, "boot cpu not a member "
746				    "of cpu_bringup_set, adding it");
747				CPUSET_ADD(cpu_bringup_set, cpuid);
748			}
749			continue;
750		}
751		if (!CPU_IN_SET(cpu_bringup_set, cpuid))
752			continue;
753
754		ASSERT(cpu[cpuid] == NULL);
755
756		setup_cpu_common(cpuid);
757
758		common_startup_init(cpu[cpuid], cpuid);
759
760		start_cpu(cpuid, cold_flag_set);
761		/*
762		 * Because slave_startup() gets fired off after init()
763		 * starts, we can't use the '?' trick to do 'boot -v'
764		 * printing - so we always direct the 'cpu .. online'
765		 * messages to the log.
766		 */
767		cmn_err(CE_CONT, "!cpu%d initialization complete - online\n",
768		    cpuid);
769
770		/*
771		 * XXX: register_cpu_setup() callbacks should be called here
772		 * with a new setup code, CPU_BOOT (or something).
773		 */
774		if (dtrace_cpu_init != NULL)
775			(*dtrace_cpu_init)(cpuid);
776	}
777
778	/*
779	 * since all the cpus are online now, redistribute interrupts to them.
780	 */
781	intr_redist_all_cpus();
782
783	mutex_exit(&cpu_lock);
784
785	/*
786	 * Start the Ecache scrubber.  Must be done after all calls to
787	 * cpu_init_private for every cpu (including CPU 0).
788	 */
789	cpu_init_cache_scrub();
790
791	if (&cpu_mp_init)
792		cpu_mp_init();
793}
794