mp_machdep.c revision 302372
1/*-
2 * Copyright (c) 2008 Marcel Moolenaar
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/powerpc/powerpc/mp_machdep.c 302372 2016-07-06 14:09:49Z nwhitehorn $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/kernel.h>
33#include <sys/ktr.h>
34#include <sys/bus.h>
35#include <sys/cpuset.h>
36#include <sys/lock.h>
37#include <sys/malloc.h>
38#include <sys/mutex.h>
39#include <sys/pcpu.h>
40#include <sys/proc.h>
41#include <sys/sched.h>
42#include <sys/smp.h>
43
44#include <vm/vm.h>
45#include <vm/vm_param.h>
46#include <vm/pmap.h>
47#include <vm/vm_map.h>
48#include <vm/vm_extern.h>
49#include <vm/vm_kern.h>
50
51#include <machine/bus.h>
52#include <machine/cpu.h>
53#include <machine/intr_machdep.h>
54#include <machine/pcb.h>
55#include <machine/platform.h>
56#include <machine/md_var.h>
57#include <machine/setjmp.h>
58#include <machine/smp.h>
59
60#include "pic_if.h"
61
62extern struct pcpu __pcpu[MAXCPU];
63
64volatile static int ap_awake;
65volatile static u_int ap_letgo;
66volatile static u_quad_t ap_timebase;
67static u_int ipi_msg_cnt[32];
68static struct mtx ap_boot_mtx;
69struct pcb stoppcbs[MAXCPU];
70
71void
72machdep_ap_bootstrap(void)
73{
74
75	/* Set PIR */
76	PCPU_SET(pir, mfspr(SPR_PIR));
77	PCPU_SET(awake, 1);
78	__asm __volatile("msync; isync");
79
80	while (ap_letgo == 0)
81		;
82
83	/* Initialize DEC and TB, sync with the BSP values */
84#ifdef __powerpc64__
85	/* Writing to the time base register is hypervisor-privileged */
86	if (mfmsr() & PSL_HV)
87		mttb(ap_timebase);
88#else
89	mttb(ap_timebase);
90#endif
91	decr_ap_init();
92
93	/* Give platform code a chance to do anything necessary */
94	platform_smp_ap_init();
95
96	/* Serialize console output and AP count increment */
97	mtx_lock_spin(&ap_boot_mtx);
98	ap_awake++;
99	printf("SMP: AP CPU #%d launched\n", PCPU_GET(cpuid));
100	mtx_unlock_spin(&ap_boot_mtx);
101
102	/* Start per-CPU event timers. */
103	cpu_initclocks_ap();
104
105	/* Announce ourselves awake, and enter the scheduler */
106	sched_throw(NULL);
107}
108
109void
110cpu_mp_setmaxid(void)
111{
112	struct cpuref cpuref;
113	int error;
114
115	mp_ncpus = 0;
116	mp_maxid = 0;
117	error = platform_smp_first_cpu(&cpuref);
118	while (!error) {
119		mp_ncpus++;
120		mp_maxid = max(cpuref.cr_cpuid, mp_maxid);
121		error = platform_smp_next_cpu(&cpuref);
122	}
123	/* Sanity. */
124	if (mp_ncpus == 0)
125		mp_ncpus = 1;
126}
127
128int
129cpu_mp_probe(void)
130{
131
132	/*
133	 * We're not going to enable SMP if there's only 1 processor.
134	 */
135	return (mp_ncpus > 1);
136}
137
138void
139cpu_mp_start(void)
140{
141	struct cpuref bsp, cpu;
142	struct pcpu *pc;
143	int error;
144
145	error = platform_smp_get_bsp(&bsp);
146	KASSERT(error == 0, ("Don't know BSP"));
147	KASSERT(bsp.cr_cpuid == 0, ("%s: cpuid != 0", __func__));
148
149	error = platform_smp_first_cpu(&cpu);
150	while (!error) {
151		if (cpu.cr_cpuid >= MAXCPU) {
152			printf("SMP: cpu%d: skipped -- ID out of range\n",
153			    cpu.cr_cpuid);
154			goto next;
155		}
156		if (CPU_ISSET(cpu.cr_cpuid, &all_cpus)) {
157			printf("SMP: cpu%d: skipped - duplicate ID\n",
158			    cpu.cr_cpuid);
159			goto next;
160		}
161		if (cpu.cr_cpuid != bsp.cr_cpuid) {
162			void *dpcpu;
163
164			pc = &__pcpu[cpu.cr_cpuid];
165			dpcpu = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE,
166			    M_WAITOK | M_ZERO);
167			pcpu_init(pc, cpu.cr_cpuid, sizeof(*pc));
168			dpcpu_init(dpcpu, cpu.cr_cpuid);
169		} else {
170			pc = pcpup;
171			pc->pc_cpuid = bsp.cr_cpuid;
172			pc->pc_bsp = 1;
173		}
174		pc->pc_hwref = cpu.cr_hwref;
175		CPU_SET(pc->pc_cpuid, &all_cpus);
176next:
177		error = platform_smp_next_cpu(&cpu);
178	}
179}
180
181void
182cpu_mp_announce(void)
183{
184	struct pcpu *pc;
185	int i;
186
187	for (i = 0; i <= mp_maxid; i++) {
188		pc = pcpu_find(i);
189		if (pc == NULL)
190			continue;
191		printf("cpu%d: dev=%x", i, (int)pc->pc_hwref);
192		if (pc->pc_bsp)
193			printf(" (BSP)");
194		printf("\n");
195	}
196}
197
198static void
199cpu_mp_unleash(void *dummy)
200{
201	struct pcpu *pc;
202	int cpus, timeout;
203
204	if (mp_ncpus <= 1)
205		return;
206
207	mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
208
209	cpus = 0;
210	smp_cpus = 0;
211#ifdef BOOKE
212	tlb1_ap_prep();
213#endif
214	STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
215		cpus++;
216		if (!pc->pc_bsp) {
217			if (bootverbose)
218				printf("Waking up CPU %d (dev=%x)\n",
219				    pc->pc_cpuid, (int)pc->pc_hwref);
220
221			platform_smp_start_cpu(pc);
222
223			timeout = 2000;	/* wait 2sec for the AP */
224			while (!pc->pc_awake && --timeout > 0)
225				DELAY(1000);
226
227		} else {
228			PCPU_SET(pir, mfspr(SPR_PIR));
229			pc->pc_awake = 1;
230		}
231		if (pc->pc_awake) {
232			if (bootverbose)
233				printf("Adding CPU %d, pir=%x, awake=%x\n",
234				    pc->pc_cpuid, pc->pc_pir, pc->pc_awake);
235			smp_cpus++;
236		} else
237			CPU_SET(pc->pc_cpuid, &stopped_cpus);
238	}
239
240	ap_awake = 1;
241
242	/* Provide our current DEC and TB values for APs */
243	ap_timebase = mftb() + 10;
244	__asm __volatile("msync; isync");
245
246	/* Let APs continue */
247	atomic_store_rel_int(&ap_letgo, 1);
248
249#ifdef __powerpc64__
250	/* Writing to the time base register is hypervisor-privileged */
251	if (mfmsr() & PSL_HV)
252		mttb(ap_timebase);
253#else
254	mttb(ap_timebase);
255#endif
256
257	while (ap_awake < smp_cpus)
258		;
259
260	if (smp_cpus != cpus || cpus != mp_ncpus) {
261		printf("SMP: %d CPUs found; %d CPUs usable; %d CPUs woken\n",
262		    mp_ncpus, cpus, smp_cpus);
263	}
264
265	/* Let the APs get into the scheduler */
266	DELAY(10000);
267
268	/* XXX Atomic set operation? */
269	smp_started = 1;
270}
271
272SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, cpu_mp_unleash, NULL);
273
274int
275powerpc_ipi_handler(void *arg)
276{
277	u_int cpuid;
278	uint32_t ipimask;
279	int msg;
280
281	CTR2(KTR_SMP, "%s: MSR 0x%08x", __func__, mfmsr());
282
283	ipimask = atomic_readandclear_32(&(pcpup->pc_ipimask));
284	if (ipimask == 0)
285		return (FILTER_STRAY);
286	while ((msg = ffs(ipimask) - 1) != -1) {
287		ipimask &= ~(1u << msg);
288		ipi_msg_cnt[msg]++;
289		switch (msg) {
290		case IPI_AST:
291			CTR1(KTR_SMP, "%s: IPI_AST", __func__);
292			break;
293		case IPI_PREEMPT:
294			CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__);
295			sched_preempt(curthread);
296			break;
297		case IPI_RENDEZVOUS:
298			CTR1(KTR_SMP, "%s: IPI_RENDEZVOUS", __func__);
299			smp_rendezvous_action();
300			break;
301		case IPI_STOP:
302
303			/*
304			 * IPI_STOP_HARD is mapped to IPI_STOP so it is not
305			 * necessary to add such case in the switch.
306			 */
307			CTR1(KTR_SMP, "%s: IPI_STOP or IPI_STOP_HARD (stop)",
308			    __func__);
309			cpuid = PCPU_GET(cpuid);
310			savectx(&stoppcbs[cpuid]);
311			savectx(PCPU_GET(curpcb));
312			CPU_SET_ATOMIC(cpuid, &stopped_cpus);
313			while (!CPU_ISSET(cpuid, &started_cpus))
314				cpu_spinwait();
315			CPU_CLR_ATOMIC(cpuid, &stopped_cpus);
316			CPU_CLR_ATOMIC(cpuid, &started_cpus);
317			CTR1(KTR_SMP, "%s: IPI_STOP (restart)", __func__);
318			break;
319		case IPI_HARDCLOCK:
320			CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__);
321			hardclockintr();
322			break;
323		}
324	}
325
326	return (FILTER_HANDLED);
327}
328
329static void
330ipi_send(struct pcpu *pc, int ipi)
331{
332
333	CTR4(KTR_SMP, "%s: pc=%p, targetcpu=%d, IPI=%d", __func__,
334	    pc, pc->pc_cpuid, ipi);
335
336	atomic_set_32(&pc->pc_ipimask, (1 << ipi));
337	powerpc_sync();
338	PIC_IPI(root_pic, pc->pc_cpuid);
339
340	CTR1(KTR_SMP, "%s: sent", __func__);
341}
342
343/* Send an IPI to a set of cpus. */
344void
345ipi_selected(cpuset_t cpus, int ipi)
346{
347	struct pcpu *pc;
348
349	STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
350		if (CPU_ISSET(pc->pc_cpuid, &cpus))
351			ipi_send(pc, ipi);
352	}
353}
354
355/* Send an IPI to a specific CPU. */
356void
357ipi_cpu(int cpu, u_int ipi)
358{
359
360	ipi_send(cpuid_to_pcpu[cpu], ipi);
361}
362
363/* Send an IPI to all CPUs EXCEPT myself. */
364void
365ipi_all_but_self(int ipi)
366{
367	struct pcpu *pc;
368
369	STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
370		if (pc != pcpup)
371			ipi_send(pc, ipi);
372	}
373}
374