mp_machdep.c revision 331722
1/*-
2 * Copyright (c) 2009 Neelkanth Natu
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: stable/11/sys/mips/mips/mp_machdep.c 331722 2018-03-29 02:50:57Z eadler $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/cpuset.h>
33#include <sys/ktr.h>
34#include <sys/proc.h>
35#include <sys/lock.h>
36#include <sys/malloc.h>
37#include <sys/mutex.h>
38#include <sys/kernel.h>
39#include <sys/pcpu.h>
40#include <sys/smp.h>
41#include <sys/sched.h>
42#include <sys/bus.h>
43
44#include <vm/vm.h>
45#include <vm/pmap.h>
46#include <vm/vm_extern.h>
47#include <vm/vm_kern.h>
48
49#include <machine/clock.h>
50#include <machine/smp.h>
51#include <machine/hwfunc.h>
52#include <machine/intr_machdep.h>
53#include <machine/cache.h>
54#include <machine/tlb.h>
55
56struct pcb stoppcbs[MAXCPU];
57
58static void *dpcpu;
59static struct mtx ap_boot_mtx;
60
61static volatile int aps_ready;
62static volatile int mp_naps;
63
64static void
65ipi_send(struct pcpu *pc, int ipi)
66{
67
68	CTR3(KTR_SMP, "%s: cpu=%d, ipi=%x", __func__, pc->pc_cpuid, ipi);
69
70	atomic_set_32(&pc->pc_pending_ipis, ipi);
71	platform_ipi_send(pc->pc_cpuid);
72
73	CTR1(KTR_SMP, "%s: sent", __func__);
74}
75
76void
77ipi_all_but_self(int ipi)
78{
79	cpuset_t other_cpus;
80
81	other_cpus = all_cpus;
82	CPU_CLR(PCPU_GET(cpuid), &other_cpus);
83	ipi_selected(other_cpus, ipi);
84}
85
86/* Send an IPI to a set of cpus. */
87void
88ipi_selected(cpuset_t cpus, int ipi)
89{
90	struct pcpu *pc;
91
92	STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
93		if (CPU_ISSET(pc->pc_cpuid, &cpus)) {
94			CTR3(KTR_SMP, "%s: pc: %p, ipi: %x\n", __func__, pc,
95			    ipi);
96			ipi_send(pc, ipi);
97		}
98	}
99}
100
101/* Send an IPI to a specific CPU. */
102void
103ipi_cpu(int cpu, u_int ipi)
104{
105
106	CTR3(KTR_SMP, "%s: cpu: %d, ipi: %x\n", __func__, cpu, ipi);
107	ipi_send(cpuid_to_pcpu[cpu], ipi);
108}
109
110/*
111 * Handle an IPI sent to this processor.
112 */
113static int
114mips_ipi_handler(void *arg)
115{
116	u_int	cpu, ipi, ipi_bitmap;
117	int	bit;
118
119	cpu = PCPU_GET(cpuid);
120
121	platform_ipi_clear();	/* quiesce the pending ipi interrupt */
122
123	ipi_bitmap = atomic_readandclear_int(PCPU_PTR(pending_ipis));
124	if (ipi_bitmap == 0)
125		return (FILTER_STRAY);
126
127	CTR1(KTR_SMP, "smp_handle_ipi(), ipi_bitmap=%x", ipi_bitmap);
128
129	while ((bit = ffs(ipi_bitmap))) {
130		bit = bit - 1;
131		ipi = 1 << bit;
132		ipi_bitmap &= ~ipi;
133		switch (ipi) {
134		case IPI_RENDEZVOUS:
135			CTR0(KTR_SMP, "IPI_RENDEZVOUS");
136			smp_rendezvous_action();
137			break;
138
139		case IPI_AST:
140			CTR0(KTR_SMP, "IPI_AST");
141			break;
142
143		case IPI_STOP:
144			/*
145			 * IPI_STOP_HARD is mapped to IPI_STOP so it is not
146			 * necessary to add it in the switch.
147			 */
148			CTR0(KTR_SMP, "IPI_STOP or IPI_STOP_HARD");
149
150			savectx(&stoppcbs[cpu]);
151			tlb_save();
152
153			/* Indicate we are stopped */
154			CPU_SET_ATOMIC(cpu, &stopped_cpus);
155
156			/* Wait for restart */
157			while (!CPU_ISSET(cpu, &started_cpus))
158				cpu_spinwait();
159
160			CPU_CLR_ATOMIC(cpu, &started_cpus);
161			CPU_CLR_ATOMIC(cpu, &stopped_cpus);
162			CTR0(KTR_SMP, "IPI_STOP (restart)");
163			break;
164		case IPI_PREEMPT:
165			CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__);
166			sched_preempt(curthread);
167			break;
168		case IPI_HARDCLOCK:
169			CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__);
170			hardclockintr();
171			break;
172		default:
173			panic("Unknown IPI 0x%0x on cpu %d", ipi, curcpu);
174		}
175	}
176
177	return (FILTER_HANDLED);
178}
179
180static int
181start_ap(int cpuid)
182{
183	int cpus, ms;
184
185	cpus = mp_naps;
186	dpcpu = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE, M_WAITOK | M_ZERO);
187
188	mips_sync();
189
190	if (platform_start_ap(cpuid) != 0)
191		return (-1);			/* could not start AP */
192
193	for (ms = 0; ms < 5000; ++ms) {
194		if (mp_naps > cpus)
195			return (0);		/* success */
196		else
197			DELAY(1000);
198	}
199
200	return (-2);				/* timeout initializing AP */
201}
202
203void
204cpu_mp_setmaxid(void)
205{
206	cpuset_t cpumask;
207	int cpu, last;
208
209	platform_cpu_mask(&cpumask);
210	mp_ncpus = 0;
211	last = 1;
212	while ((cpu = CPU_FFS(&cpumask)) != 0) {
213		last = cpu;
214		cpu--;
215		CPU_CLR(cpu, &cpumask);
216		mp_ncpus++;
217	}
218	if (mp_ncpus <= 0)
219		mp_ncpus = 1;
220
221	mp_maxid = min(last, MAXCPU) - 1;
222}
223
224void
225cpu_mp_announce(void)
226{
227	/* NOTHING */
228}
229
230struct cpu_group *
231cpu_topo(void)
232{
233	return (platform_smp_topo());
234}
235
236int
237cpu_mp_probe(void)
238{
239
240	return (mp_ncpus > 1);
241}
242
243void
244cpu_mp_start(void)
245{
246	int error, cpuid;
247	cpuset_t cpumask;
248
249	mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
250
251	CPU_ZERO(&all_cpus);
252	platform_cpu_mask(&cpumask);
253
254	while (!CPU_EMPTY(&cpumask)) {
255		cpuid = CPU_FFS(&cpumask) - 1;
256		CPU_CLR(cpuid, &cpumask);
257
258		if (cpuid >= MAXCPU) {
259			printf("cpu_mp_start: ignoring AP #%d.\n", cpuid);
260			continue;
261		}
262
263		if (cpuid != platform_processor_id()) {
264			if ((error = start_ap(cpuid)) != 0) {
265				printf("AP #%d failed to start: %d\n", cpuid, error);
266				continue;
267			}
268			if (bootverbose)
269				printf("AP #%d started!\n", cpuid);
270		}
271		CPU_SET(cpuid, &all_cpus);
272	}
273}
274
275void
276smp_init_secondary(u_int32_t cpuid)
277{
278
279	/* TLB */
280	mips_wr_wired(0);
281	tlb_invalidate_all();
282	mips_wr_wired(VMWIRED_ENTRIES);
283
284	/*
285	 * We assume that the L1 cache on the APs is identical to the one
286	 * on the BSP.
287	 */
288	mips_dcache_wbinv_all();
289	mips_icache_sync_all();
290
291	mips_sync();
292
293	mips_wr_entryhi(0);
294
295	pcpu_init(PCPU_ADDR(cpuid), cpuid, sizeof(struct pcpu));
296	dpcpu_init(dpcpu, cpuid);
297
298	/* The AP has initialized successfully - allow the BSP to proceed */
299	++mp_naps;
300
301	/* Spin until the BSP is ready to release the APs */
302	while (!aps_ready)
303		;
304
305	/* Initialize curthread. */
306	KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread"));
307	PCPU_SET(curthread, PCPU_GET(idlethread));
308
309	mtx_lock_spin(&ap_boot_mtx);
310
311	smp_cpus++;
312
313	CTR1(KTR_SMP, "SMP: AP CPU #%d launched", PCPU_GET(cpuid));
314
315	if (bootverbose)
316		printf("SMP: AP CPU #%d launched.\n", PCPU_GET(cpuid));
317
318	if (smp_cpus == mp_ncpus) {
319		atomic_store_rel_int(&smp_started, 1);
320	}
321
322	mtx_unlock_spin(&ap_boot_mtx);
323
324	while (smp_started == 0)
325		; /* nothing */
326
327	/* Start per-CPU event timers. */
328	cpu_initclocks_ap();
329
330	/* enter the scheduler */
331	sched_throw(NULL);
332
333	panic("scheduler returned us to %s", __func__);
334	/* NOTREACHED */
335}
336
337static void
338release_aps(void *dummy __unused)
339{
340	int ipi_irq;
341
342	if (mp_ncpus == 1)
343		return;
344
345	/*
346	 * IPI handler
347	 */
348	ipi_irq = platform_ipi_intrnum();
349	cpu_establish_hardintr("ipi", mips_ipi_handler, NULL, NULL, ipi_irq,
350			       INTR_TYPE_MISC | INTR_EXCL, NULL);
351
352	atomic_store_rel_int(&aps_ready, 1);
353
354	while (smp_started == 0)
355		; /* nothing */
356}
357
358SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
359