mp_machdep.c revision 194784
1104476Ssam/*-
2104476Ssam * Copyright (c) 2008 Marcel Moolenaar
3104476Ssam * All rights reserved.
4139825Simp *
5104476Ssam * Redistribution and use in source and binary forms, with or without
6104476Ssam * modification, are permitted provided that the following conditions
7104476Ssam * are met:
8104476Ssam *
9104476Ssam * 1. Redistributions of source code must retain the above copyright
10104476Ssam *    notice, this list of conditions and the following disclaimer.
11104476Ssam * 2. Redistributions in binary form must reproduce the above copyright
12104476Ssam *    notice, this list of conditions and the following disclaimer in the
13104476Ssam *    documentation and/or other materials provided with the distribution.
14104476Ssam *
15104476Ssam * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16104476Ssam * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17104476Ssam * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18104476Ssam * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19104476Ssam * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20104476Ssam * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21104476Ssam * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22104476Ssam * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23104476Ssam * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24104476Ssam * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25104476Ssam */
26104476Ssam
27104476Ssam#include <sys/cdefs.h>
28104476Ssam__FBSDID("$FreeBSD: head/sys/powerpc/powerpc/mp_machdep.c 194784 2009-06-23 22:42:39Z jeff $");
29104476Ssam
30104476Ssam#include <sys/param.h>
31104476Ssam#include <sys/systm.h>
32104476Ssam#include <sys/kernel.h>
33104476Ssam#include <sys/ktr.h>
34104476Ssam#include <sys/bus.h>
35104476Ssam#include <sys/pcpu.h>
36104476Ssam#include <sys/proc.h>
37104476Ssam#include <sys/sched.h>
38104476Ssam#include <sys/smp.h>
39158703Spjd
40104476Ssam#include <machine/bus.h>
41104476Ssam#include <machine/cpu.h>
42104476Ssam#include <machine/intr_machdep.h>
43104476Ssam#include <machine/platform.h>
44104476Ssam#include <machine/md_var.h>
45104476Ssam#include <machine/smp.h>
46104476Ssam
47104476Ssam#include "pic_if.h"
48104476Ssam
49104476Ssamextern struct pcpu __pcpu[MAXCPU];
50104476Ssam
51104476Ssamvolatile static int ap_awake;
52104476Ssamvolatile static u_int ap_letgo;
53104476Ssamvolatile static uint32_t ap_decr;
54104476Ssamvolatile static u_quad_t ap_timebase;
55104476Ssamstatic u_int ipi_msg_cnt[32];
56104476Ssam
57104476Ssamvoid
58104476Ssammachdep_ap_bootstrap(void)
59104476Ssam{
60104476Ssam
61104476Ssam	PCPU_SET(pir, mfspr(SPR_PIR));
62104476Ssam	PCPU_SET(awake, 1);
63104476Ssam	__asm __volatile("msync; isync");
64104476Ssam
65104476Ssam	while (ap_letgo == 0)
66104476Ssam		;
67104476Ssam
68104476Ssam	/* Initialize DEC and TB, sync with the BSP values */
69104476Ssam	decr_ap_init();
70104476Ssam	mttb(ap_timebase);
71104476Ssam	__asm __volatile("mtdec %0" :: "r"(ap_decr));
72104476Ssam
73104476Ssam	atomic_add_int(&ap_awake, 1);
74104476Ssam	CTR1(KTR_SMP, "SMP: AP CPU%d launched", PCPU_GET(cpuid));
75104476Ssam
76104476Ssam	/* Initialize curthread */
77104476Ssam	PCPU_SET(curthread, PCPU_GET(idlethread));
78104476Ssam	PCPU_SET(curpcb, curthread->td_pcb);
79104476Ssam
80104476Ssam	/* Let the DEC and external interrupts go */
81104476Ssam	mtmsr(mfmsr() | PSL_EE);
82104476Ssam	sched_throw(NULL);
83104476Ssam}
84104476Ssam
85104476Ssamstruct cpu_group *
86104476Ssamcpu_topo(void)
87104476Ssam{
88158703Spjd
89158703Spjd	return (smp_topo_none());
90158703Spjd}
91104476Ssam
92104476Ssamvoid
93104476Ssamcpu_mp_setmaxid(void)
94104476Ssam{
95104476Ssam	struct cpuref cpuref;
96104476Ssam	int error;
97104476Ssam
98104476Ssam	mp_ncpus = 0;
99104476Ssam	error = platform_smp_first_cpu(&cpuref);
100104476Ssam	while (!error) {
101104476Ssam		mp_ncpus++;
102		error = platform_smp_next_cpu(&cpuref);
103	}
104	/* Sanity. */
105	if (mp_ncpus == 0)
106		mp_ncpus = 1;
107
108	/*
109	 * Set the largest cpuid we're going to use. This is necessary
110	 * for VM initialization.
111	 */
112	mp_maxid = min(mp_ncpus, MAXCPU) - 1;
113}
114
115int
116cpu_mp_probe(void)
117{
118
119	/*
120	 * We're not going to enable SMP if there's only 1 processor.
121	 */
122	return (mp_ncpus > 1);
123}
124
125void
126cpu_mp_start(void)
127{
128	struct cpuref bsp, cpu;
129	struct pcpu *pc;
130	int error;
131
132	error = platform_smp_get_bsp(&bsp);
133	KASSERT(error == 0, ("Don't know BSP"));
134	KASSERT(bsp.cr_cpuid == 0, ("%s: cpuid != 0", __func__));
135
136	error = platform_smp_first_cpu(&cpu);
137	while (!error) {
138		if (cpu.cr_cpuid >= MAXCPU) {
139			printf("SMP: cpu%d: skipped -- ID out of range\n",
140			    cpu.cr_cpuid);
141			goto next;
142		}
143		if (all_cpus & (1 << cpu.cr_cpuid)) {
144			printf("SMP: cpu%d: skipped - duplicate ID\n",
145			    cpu.cr_cpuid);
146			goto next;
147		}
148		if (cpu.cr_cpuid != bsp.cr_cpuid) {
149			void *dpcpu;
150
151			pc = &__pcpu[cpu.cr_cpuid];
152			dpcpu = (void *)kmem_alloc(kernel_map, DPCPU_SIZE);
153			pcpu_init(pc, cpu.cr_cpuid, sizeof(*pc));
154			dpcpu_init(dpcpu, cpu.cr_cpuid);
155		} else {
156			pc = pcpup;
157			pc->pc_cpuid = bsp.cr_cpuid;
158			pc->pc_bsp = 1;
159		}
160		pc->pc_cpumask = 1 << pc->pc_cpuid;
161		pc->pc_hwref = cpu.cr_hwref;
162		all_cpus |= pc->pc_cpumask;
163next:
164		error = platform_smp_next_cpu(&cpu);
165	}
166}
167
168void
169cpu_mp_announce(void)
170{
171	struct pcpu *pc;
172	int i;
173
174	for (i = 0; i <= mp_maxid; i++) {
175		pc = pcpu_find(i);
176		if (pc == NULL)
177			continue;
178		printf("cpu%d: dev=%x", i, pc->pc_hwref);
179		if (pc->pc_bsp)
180			printf(" (BSP)");
181		printf("\n");
182	}
183}
184
185static void
186cpu_mp_unleash(void *dummy)
187{
188	struct pcpu *pc;
189	int cpus, timeout;
190
191	if (mp_ncpus <= 1)
192		return;
193
194	cpus = 0;
195	smp_cpus = 0;
196	SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
197		cpus++;
198		pc->pc_other_cpus = all_cpus & ~pc->pc_cpumask;
199		if (!pc->pc_bsp) {
200			if (bootverbose)
201				printf("Waking up CPU %d (dev=%x)\n",
202				    pc->pc_cpuid, pc->pc_hwref);
203
204			platform_smp_start_cpu(pc);
205
206			timeout = 2000;	/* wait 2sec for the AP */
207			while (!pc->pc_awake && --timeout > 0)
208				DELAY(1000);
209
210		} else {
211			PCPU_SET(pir, mfspr(SPR_PIR));
212			pc->pc_awake = 1;
213		}
214		if (pc->pc_awake) {
215			if (bootverbose)
216				printf("Adding CPU %d, pir=%x, awake=%x\n",
217				    pc->pc_cpuid, pc->pc_pir, pc->pc_awake);
218			smp_cpus++;
219		} else
220			stopped_cpus |= (1 << pc->pc_cpuid);
221	}
222
223	ap_awake = 1;
224
225	/* Provide our current DEC and TB values for APs */
226	__asm __volatile("mfdec %0" : "=r"(ap_decr));
227	ap_timebase = mftb() + 10;
228	__asm __volatile("msync; isync");
229
230	/* Let APs continue */
231	atomic_store_rel_int(&ap_letgo, 1);
232
233	mttb(ap_timebase);
234
235	while (ap_awake < smp_cpus)
236		;
237
238	if (smp_cpus != cpus || cpus != mp_ncpus) {
239		printf("SMP: %d CPUs found; %d CPUs usable; %d CPUs woken\n",
240		    mp_ncpus, cpus, smp_cpus);
241	}
242
243	smp_active = 1;
244	smp_started = 1;
245}
246
247SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, cpu_mp_unleash, NULL);
248
249int
250powerpc_ipi_handler(void *arg)
251{
252	cpumask_t self;
253	uint32_t ipimask;
254	int msg;
255
256	CTR2(KTR_SMP, "%s: MSR 0x%08x", __func__, mfmsr());
257
258	ipimask = atomic_readandclear_32(&(pcpup->pc_ipimask));
259	if (ipimask == 0)
260		return (FILTER_STRAY);
261	while ((msg = ffs(ipimask) - 1) != -1) {
262		ipimask &= ~(1u << msg);
263		ipi_msg_cnt[msg]++;
264		switch (msg) {
265		case IPI_AST:
266			CTR1(KTR_SMP, "%s: IPI_AST", __func__);
267			break;
268		case IPI_PREEMPT:
269			CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__);
270			sched_preempt(curthread);
271			break;
272		case IPI_RENDEZVOUS:
273			CTR1(KTR_SMP, "%s: IPI_RENDEZVOUS", __func__);
274			smp_rendezvous_action();
275			break;
276		case IPI_STOP:
277			CTR1(KTR_SMP, "%s: IPI_STOP (stop)", __func__);
278			self = PCPU_GET(cpumask);
279			savectx(PCPU_GET(curpcb));
280			atomic_set_int(&stopped_cpus, self);
281			while ((started_cpus & self) == 0)
282				cpu_spinwait();
283			atomic_clear_int(&started_cpus, self);
284			atomic_clear_int(&stopped_cpus, self);
285			CTR1(KTR_SMP, "%s: IPI_STOP (restart)", __func__);
286			break;
287		}
288	}
289
290	return (FILTER_HANDLED);
291}
292
293static void
294ipi_send(struct pcpu *pc, int ipi)
295{
296
297	CTR4(KTR_SMP, "%s: pc=%p, targetcpu=%d, IPI=%d", __func__,
298	    pc, pc->pc_cpuid, ipi);
299
300	atomic_set_32(&pc->pc_ipimask, (1 << ipi));
301	PIC_IPI(pic, pc->pc_cpuid);
302
303	CTR1(KTR_SMP, "%s: sent", __func__);
304}
305
306/* Send an IPI to a set of cpus. */
307void
308ipi_selected(cpumask_t cpus, int ipi)
309{
310	struct pcpu *pc;
311
312	SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
313		if (cpus & pc->pc_cpumask)
314			ipi_send(pc, ipi);
315	}
316}
317
318/* Send an IPI to all CPUs EXCEPT myself. */
319void
320ipi_all_but_self(int ipi)
321{
322	struct pcpu *pc;
323
324	SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
325		if (pc != pcpup)
326			ipi_send(pc, ipi);
327	}
328}
329