aw_mp.c revision 302408
1/*-
2 * Copyright (c) 2014 Ganbold Tsagaankhuu <ganbold@freebsd.org>
3 * Copyright (c) 2016 Emmanuel Vadot <manu@bidouilliste.com>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: stable/11/sys/arm/allwinner/aw_mp.c 299380 2016-05-10 18:00:37Z manu $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/bus.h>
33#include <sys/kernel.h>
34#include <sys/lock.h>
35#include <sys/mutex.h>
36#include <sys/smp.h>
37
38#include <vm/vm.h>
39#include <vm/pmap.h>
40
41#include <machine/cpu.h>
42#include <machine/cpu-v6.h>
43#include <machine/smp.h>
44#include <machine/fdt.h>
45#include <machine/intr.h>
46#include <machine/platformvar.h>
47
48#include <arm/allwinner/aw_mp.h>
49#include <arm/allwinner/allwinner_machdep.h>
50
51/* Register for all dual-core SoC */
52#define	A20_CPUCFG_BASE		0x01c25c00
53/* Register for all quad-core SoC */
54#define	CPUCFG_BASE		0x01f01c00
55#define	CPUCFG_SIZE		0x400
56#define	PRCM_BASE		0x01f01400
57#define	PRCM_SIZE		0x800
58/* Register for multi-cluster SoC */
59#define	CPUXCFG_BASE		0x01700000
60#define	CPUXCFG_SIZE		0x400
61
62#define	CPU_OFFSET		0x40
63#define	CPU_OFFSET_CTL		0x04
64#define	CPU_OFFSET_STATUS	0x08
65#define	CPU_RST_CTL(cpuid)	((cpuid + 1) * CPU_OFFSET)
66#define	CPU_CTL(cpuid)		(((cpuid + 1) * CPU_OFFSET) + CPU_OFFSET_CTL)
67#define	CPU_STATUS(cpuid)	(((cpuid + 1) * CPU_OFFSET) + CPU_OFFSET_STATUS)
68
69#define	CPU_RESET		(1 << 0)
70#define	CPU_CORE_RESET		(1 << 1)
71
72#define	CPUCFG_GENCTL		0x184
73#define	CPUCFG_P_REG0		0x1a4
74
75#define	A20_CPU1_PWR_CLAMP	0x1b0
76#define	CPU_PWR_CLAMP_REG	0x140
77#define	CPU_PWR_CLAMP(cpu)	((cpu * 4) + CPU_PWR_CLAMP_REG)
78#define	CPU_PWR_CLAMP_STEPS	8
79
80#define	A20_CPU1_PWROFF_REG	0x1b4
81#define	CPU_PWROFF		0x100
82
83#define	CPUCFG_DBGCTL0		0x1e0
84#define	CPUCFG_DBGCTL1		0x1e4
85
86#define	CPUS_CL_RST(cl)		(0x30 + (cluster) * 0x4)
87#define	CPUX_CL_CTRL0(cl)	(0x0 + (cluster) * 0x10)
88#define	CPUX_CL_CTRL1(cl)	(0x4 + (cluster) * 0x10)
89#define	CPUX_CL_CPU_STATUS(cl)	(0x30 + (cluster) * 0x4)
90#define	CPUX_CL_RST(cl)		(0x80 + (cluster) * 0x4)
91#define	PRCM_CL_PWROFF(cl)	(0x100 + (cluster) * 0x4)
92#define	PRCM_CL_PWR_CLAMP(cl, cpu)	(0x140 + (cluster) * 0x4 + (cpu) * 0x4)
93
94void
95aw_mp_setmaxid(platform_t plat)
96{
97	int ncpu;
98	uint32_t reg;
99
100	if (mp_ncpus != 0)
101		return;
102
103	reg = cp15_l2ctlr_get();
104	ncpu = CPUV7_L2CTLR_NPROC(reg);
105
106	mp_ncpus = ncpu;
107	mp_maxid = ncpu - 1;
108}
109
110void
111aw_mp_start_ap(platform_t plat)
112{
113	bus_space_handle_t cpucfg;
114	bus_space_handle_t prcm;
115	int i, j, soc_family;
116	uint32_t val;
117
118	soc_family = allwinner_soc_family();
119	if (soc_family == ALLWINNERSOC_SUN7I) {
120		if (bus_space_map(fdtbus_bs_tag, A20_CPUCFG_BASE, CPUCFG_SIZE,
121		    0, &cpucfg) != 0)
122			panic("Couldn't map the CPUCFG\n");
123	} else {
124		if (bus_space_map(fdtbus_bs_tag, CPUCFG_BASE, CPUCFG_SIZE,
125		    0, &cpucfg) != 0)
126			panic("Couldn't map the CPUCFG\n");
127		if (bus_space_map(fdtbus_bs_tag, PRCM_BASE, PRCM_SIZE, 0,
128		    &prcm) != 0)
129			panic("Couldn't map the PRCM\n");
130	}
131
132	dcache_wbinv_poc_all();
133
134	bus_space_write_4(fdtbus_bs_tag, cpucfg, CPUCFG_P_REG0,
135	    pmap_kextract((vm_offset_t)mpentry));
136
137	/*
138	 * Assert nCOREPORESET low and set L1RSTDISABLE low.
139	 * Ensure DBGPWRDUP is set to LOW to prevent any external
140	 * debug access to the processor.
141	 */
142	for (i = 1; i < mp_ncpus; i++)
143		bus_space_write_4(fdtbus_bs_tag, cpucfg, CPU_RST_CTL(i), 0);
144
145	/* Set L1RSTDISABLE low */
146	val = bus_space_read_4(fdtbus_bs_tag, cpucfg, CPUCFG_GENCTL);
147	for (i = 1; i < mp_ncpus; i++)
148		val &= ~(1 << i);
149	bus_space_write_4(fdtbus_bs_tag, cpucfg, CPUCFG_GENCTL, val);
150
151	/* Set DBGPWRDUP low */
152	val = bus_space_read_4(fdtbus_bs_tag, cpucfg, CPUCFG_DBGCTL1);
153	for (i = 1; i < mp_ncpus; i++)
154		val &= ~(1 << i);
155	bus_space_write_4(fdtbus_bs_tag, cpucfg, CPUCFG_DBGCTL1, val);
156
157	/* Release power clamp */
158	for (i = 1; i < mp_ncpus; i++)
159		for (j = 0; j <= CPU_PWR_CLAMP_STEPS; j++) {
160			if (soc_family != ALLWINNERSOC_SUN7I) {
161				bus_space_write_4(fdtbus_bs_tag, prcm,
162				    CPU_PWR_CLAMP(i), 0xff >> j);
163			} else {
164				bus_space_write_4(fdtbus_bs_tag,
165				    cpucfg, A20_CPU1_PWR_CLAMP, 0xff >> j);
166			}
167		}
168	DELAY(10000);
169
170	/* Clear power-off gating */
171	if (soc_family != ALLWINNERSOC_SUN7I) {
172		val = bus_space_read_4(fdtbus_bs_tag, prcm, CPU_PWROFF);
173		for (i = 0; i < mp_ncpus; i++)
174			val &= ~(1 << i);
175		bus_space_write_4(fdtbus_bs_tag, prcm, CPU_PWROFF, val);
176	} else {
177		val = bus_space_read_4(fdtbus_bs_tag,
178		    cpucfg, A20_CPU1_PWROFF_REG);
179		val &= ~(1 << 0);
180		bus_space_write_4(fdtbus_bs_tag, cpucfg,
181		    A20_CPU1_PWROFF_REG, val);
182	}
183	DELAY(1000);
184
185	/* De-assert cpu core reset */
186	for (i = 1; i < mp_ncpus; i++)
187		bus_space_write_4(fdtbus_bs_tag, cpucfg, CPU_RST_CTL(i),
188		    CPU_RESET | CPU_CORE_RESET);
189
190	/* Assert DBGPWRDUP signal */
191	val = bus_space_read_4(fdtbus_bs_tag, cpucfg, CPUCFG_DBGCTL1);
192	for (i = 1; i < mp_ncpus; i++)
193		val |= (1 << i);
194	bus_space_write_4(fdtbus_bs_tag, cpucfg, CPUCFG_DBGCTL1, val);
195
196	armv7_sev();
197	bus_space_unmap(fdtbus_bs_tag, cpucfg, CPUCFG_SIZE);
198	if (soc_family != ALLWINNERSOC_SUN7I)
199		bus_space_unmap(fdtbus_bs_tag, prcm, PRCM_SIZE);
200}
201
202static void
203aw_mc_mp_start_cpu(bus_space_handle_t cpuscfg, bus_space_handle_t cpuxcfg,
204    bus_space_handle_t prcm, int cluster, int cpu)
205{
206	uint32_t val;
207	int i;
208
209	/* Assert core reset */
210	val = bus_space_read_4(fdtbus_bs_tag, cpuxcfg, CPUX_CL_RST(cluster));
211	val &= ~(1 << cpu);
212	bus_space_write_4(fdtbus_bs_tag, cpuxcfg, CPUX_CL_RST(cluster), val);
213
214	/* Assert power-on reset */
215	val = bus_space_read_4(fdtbus_bs_tag, cpuscfg, CPUS_CL_RST(cluster));
216	val &= ~(1 << cpu);
217	bus_space_write_4(fdtbus_bs_tag, cpuscfg, CPUS_CL_RST(cluster), val);
218
219	/* Disable automatic L1 cache invalidate at reset */
220	val = bus_space_read_4(fdtbus_bs_tag, cpuxcfg, CPUX_CL_CTRL0(cluster));
221	val &= ~(1 << cpu);
222	bus_space_write_4(fdtbus_bs_tag, cpuxcfg, CPUX_CL_CTRL0(cluster), val);
223
224	/* Release power clamp */
225	for (i = 0; i <= CPU_PWR_CLAMP_STEPS; i++)
226		bus_space_write_4(fdtbus_bs_tag, prcm,
227		    PRCM_CL_PWR_CLAMP(cluster, cpu), 0xff >> i);
228	while (bus_space_read_4(fdtbus_bs_tag, prcm,
229	    PRCM_CL_PWR_CLAMP(cluster, cpu)) != 0)
230		;
231
232	/* Clear power-off gating */
233	val = bus_space_read_4(fdtbus_bs_tag, prcm, PRCM_CL_PWROFF(cluster));
234	val &= ~(1 << cpu);
235	bus_space_write_4(fdtbus_bs_tag, prcm, PRCM_CL_PWROFF(cluster), val);
236
237	/* De-assert power-on reset */
238	val = bus_space_read_4(fdtbus_bs_tag, cpuscfg, CPUS_CL_RST(cluster));
239	val |= (1 << cpu);
240	bus_space_write_4(fdtbus_bs_tag, cpuscfg, CPUS_CL_RST(cluster), val);
241
242	/* De-assert core reset */
243	val = bus_space_read_4(fdtbus_bs_tag, cpuxcfg, CPUX_CL_RST(cluster));
244	val |= (1 << cpu);
245	bus_space_write_4(fdtbus_bs_tag, cpuxcfg, CPUX_CL_RST(cluster), val);
246}
247
248static void
249aw_mc_mp_start_ap(bus_space_handle_t cpuscfg, bus_space_handle_t cpuxcfg,
250    bus_space_handle_t prcm)
251{
252	int cluster, cpu;
253
254	KASSERT(mp_ncpus <= 4, ("multiple clusters not yet supported"));
255
256	dcache_wbinv_poc_all();
257
258	bus_space_write_4(fdtbus_bs_tag, cpuscfg, CPUCFG_P_REG0,
259	    pmap_kextract((vm_offset_t)mpentry));
260
261	cluster = 0;
262	for (cpu = 1; cpu < mp_ncpus; cpu++)
263		aw_mc_mp_start_cpu(cpuscfg, cpuxcfg, prcm, cluster, cpu);
264}
265
266void
267a83t_mp_start_ap(platform_t plat)
268{
269	bus_space_handle_t cpuscfg, cpuxcfg, prcm;
270
271	if (bus_space_map(fdtbus_bs_tag, CPUCFG_BASE, CPUCFG_SIZE,
272	    0, &cpuscfg) != 0)
273		panic("Couldn't map the CPUCFG\n");
274	if (bus_space_map(fdtbus_bs_tag, CPUXCFG_BASE, CPUXCFG_SIZE,
275	    0, &cpuxcfg) != 0)
276		panic("Couldn't map the CPUXCFG\n");
277	if (bus_space_map(fdtbus_bs_tag, PRCM_BASE, PRCM_SIZE, 0,
278	    &prcm) != 0)
279		panic("Couldn't map the PRCM\n");
280
281	aw_mc_mp_start_ap(cpuscfg, cpuxcfg, prcm);
282	armv7_sev();
283	bus_space_unmap(fdtbus_bs_tag, cpuxcfg, CPUXCFG_SIZE);
284	bus_space_unmap(fdtbus_bs_tag, cpuscfg, CPUCFG_SIZE);
285	bus_space_unmap(fdtbus_bs_tag, prcm, PRCM_SIZE);
286}
287