1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2008 Marcel Moolenaar
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD$");
31
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/kernel.h>
35#include <sys/bus.h>
36#include <sys/pcpu.h>
37#include <sys/proc.h>
38#include <sys/smp.h>
39
40#include <machine/bus.h>
41#include <machine/cpu.h>
42#include <machine/hid.h>
43#include <machine/intr_machdep.h>
44#include <machine/pcb.h>
45#include <machine/psl.h>
46#include <machine/smp.h>
47#include <machine/spr.h>
48#include <machine/trap.h>
49
50#include <dev/ofw/openfirm.h>
51#include <machine/ofw_machdep.h>
52
53void *ap_pcpu;
54
55static register_t bsp_state[8] __aligned(8);
56
57static void cpudep_save_config(void *dummy);
58SYSINIT(cpu_save_config, SI_SUB_CPU, SI_ORDER_ANY, cpudep_save_config, NULL);
59
60void
61cpudep_ap_early_bootstrap(void)
62{
63#ifndef __powerpc64__
64	register_t reg;
65#endif
66
67	switch (mfpvr() >> 16) {
68	case IBM970:
69	case IBM970FX:
70	case IBM970MP:
71		/* Set HIOR to 0 */
72		__asm __volatile("mtspr 311,%0" :: "r"(0));
73		powerpc_sync();
74
75		/* Restore HID4 and HID5, which are necessary for the MMU */
76
77#ifdef __powerpc64__
78		mtspr(SPR_HID4, bsp_state[2]); powerpc_sync(); isync();
79		mtspr(SPR_HID5, bsp_state[3]); powerpc_sync(); isync();
80#else
81		__asm __volatile("ld %0, 16(%2); sync; isync;	\
82		    mtspr %1, %0; sync; isync;"
83		    : "=r"(reg) : "K"(SPR_HID4), "b"(bsp_state));
84		__asm __volatile("ld %0, 24(%2); sync; isync;	\
85		    mtspr %1, %0; sync; isync;"
86		    : "=r"(reg) : "K"(SPR_HID5), "b"(bsp_state));
87#endif
88		powerpc_sync();
89		break;
90	case IBMPOWER8:
91	case IBMPOWER8E:
92	case IBMPOWER8NVL:
93	case IBMPOWER9:
94#ifdef __powerpc64__
95		if (mfmsr() & PSL_HV) {
96			isync();
97			/*
98			 * Direct interrupts to SRR instead of HSRR and
99			 * reset LPCR otherwise
100			 */
101			mtspr(SPR_LPID, 0);
102			isync();
103
104			mtspr(SPR_LPCR, lpcr);
105			isync();
106
107			/*
108			 * Nuke FSCR, to be managed on a per-process basis
109			 * later.
110			 */
111			mtspr(SPR_FSCR, 0);
112		}
113#endif
114		break;
115	}
116
117	__asm __volatile("mtsprg 0, %0" :: "r"(ap_pcpu));
118	powerpc_sync();
119}
120
121uintptr_t
122cpudep_ap_bootstrap(void)
123{
124	register_t msr, sp;
125
126	msr = psl_kernset & ~PSL_EE;
127	mtmsr(msr);
128
129	pcpup->pc_curthread = pcpup->pc_idlethread;
130#ifdef __powerpc64__
131	__asm __volatile("mr 13,%0" :: "r"(pcpup->pc_curthread));
132#else
133	__asm __volatile("mr 2,%0" :: "r"(pcpup->pc_curthread));
134#endif
135	pcpup->pc_curpcb = pcpup->pc_curthread->td_pcb;
136	sp = pcpup->pc_curpcb->pcb_sp;
137
138	return (sp);
139}
140
141static register_t
142mpc74xx_l2_enable(register_t l2cr_config)
143{
144	register_t ccr, bit;
145	uint16_t	vers;
146
147	vers = mfpvr() >> 16;
148	switch (vers) {
149	case MPC7400:
150	case MPC7410:
151		bit = L2CR_L2IP;
152		break;
153	default:
154		bit = L2CR_L2I;
155		break;
156	}
157
158	ccr = mfspr(SPR_L2CR);
159	if (ccr & L2CR_L2E)
160		return (ccr);
161
162	/* Configure L2 cache. */
163	ccr = l2cr_config & ~L2CR_L2E;
164	mtspr(SPR_L2CR, ccr | L2CR_L2I);
165	do {
166		ccr = mfspr(SPR_L2CR);
167	} while (ccr & bit);
168	powerpc_sync();
169	mtspr(SPR_L2CR, l2cr_config);
170	powerpc_sync();
171
172	return (l2cr_config);
173}
174
175static register_t
176mpc745x_l3_enable(register_t l3cr_config)
177{
178	register_t ccr;
179
180	ccr = mfspr(SPR_L3CR);
181	if (ccr & L3CR_L3E)
182		return (ccr);
183
184	/* Configure L3 cache. */
185	ccr = l3cr_config & ~(L3CR_L3E | L3CR_L3I | L3CR_L3PE | L3CR_L3CLKEN);
186	mtspr(SPR_L3CR, ccr);
187	ccr |= 0x4000000;       /* Magic, but documented. */
188	mtspr(SPR_L3CR, ccr);
189	ccr |= L3CR_L3CLKEN;
190	mtspr(SPR_L3CR, ccr);
191	mtspr(SPR_L3CR, ccr | L3CR_L3I);
192	while (mfspr(SPR_L3CR) & L3CR_L3I)
193		;
194	mtspr(SPR_L3CR, ccr & ~L3CR_L3CLKEN);
195	powerpc_sync();
196	DELAY(100);
197	mtspr(SPR_L3CR, ccr);
198	powerpc_sync();
199	DELAY(100);
200	ccr |= L3CR_L3E;
201	mtspr(SPR_L3CR, ccr);
202	powerpc_sync();
203
204	return(ccr);
205}
206
207static register_t
208mpc74xx_l1d_enable(void)
209{
210	register_t hid;
211
212	hid = mfspr(SPR_HID0);
213	if (hid & HID0_DCE)
214		return (hid);
215
216	/* Enable L1 D-cache */
217	hid |= HID0_DCE;
218	powerpc_sync();
219	mtspr(SPR_HID0, hid | HID0_DCFI);
220	powerpc_sync();
221
222	return (hid);
223}
224
225static register_t
226mpc74xx_l1i_enable(void)
227{
228	register_t hid;
229
230	hid = mfspr(SPR_HID0);
231	if (hid & HID0_ICE)
232		return (hid);
233
234	/* Enable L1 I-cache */
235	hid |= HID0_ICE;
236	isync();
237	mtspr(SPR_HID0, hid | HID0_ICFI);
238	isync();
239
240	return (hid);
241}
242
243static void
244cpudep_save_config(void *dummy)
245{
246	uint16_t	vers;
247
248	vers = mfpvr() >> 16;
249
250	switch(vers) {
251	case IBM970:
252	case IBM970FX:
253	case IBM970MP:
254		#ifdef __powerpc64__
255		bsp_state[0] = mfspr(SPR_HID0);
256		bsp_state[1] = mfspr(SPR_HID1);
257		bsp_state[2] = mfspr(SPR_HID4);
258		bsp_state[3] = mfspr(SPR_HID5);
259		#else
260		__asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32"
261		    : "=r" (bsp_state[0]),"=r" (bsp_state[1]) : "K" (SPR_HID0));
262		__asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32"
263		    : "=r" (bsp_state[2]),"=r" (bsp_state[3]) : "K" (SPR_HID1));
264		__asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32"
265		    : "=r" (bsp_state[4]),"=r" (bsp_state[5]) : "K" (SPR_HID4));
266		__asm __volatile ("mfspr %0,%2; mr %1,%0; srdi %0,%0,32"
267		    : "=r" (bsp_state[6]),"=r" (bsp_state[7]) : "K" (SPR_HID5));
268		#endif
269
270		powerpc_sync();
271
272		break;
273	case IBMCELLBE:
274		#ifdef NOTYET /* Causes problems if in instruction stream on 970 */
275		if (mfmsr() & PSL_HV) {
276			bsp_state[0] = mfspr(SPR_HID0);
277			bsp_state[1] = mfspr(SPR_HID1);
278			bsp_state[2] = mfspr(SPR_HID4);
279			bsp_state[3] = mfspr(SPR_HID6);
280
281			bsp_state[4] = mfspr(SPR_CELL_TSCR);
282		}
283		#endif
284
285		bsp_state[5] = mfspr(SPR_CELL_TSRL);
286
287		break;
288	case MPC7450:
289	case MPC7455:
290	case MPC7457:
291		/* Only MPC745x CPUs have an L3 cache. */
292		bsp_state[3] = mfspr(SPR_L3CR);
293
294		/* Fallthrough */
295	case MPC7400:
296	case MPC7410:
297	case MPC7447A:
298	case MPC7448:
299		bsp_state[2] = mfspr(SPR_L2CR);
300		bsp_state[1] = mfspr(SPR_HID1);
301		bsp_state[0] = mfspr(SPR_HID0);
302		break;
303	}
304}
305
306void
307cpudep_ap_setup()
308{
309	register_t	reg;
310	uint16_t	vers;
311
312	vers = mfpvr() >> 16;
313
314	switch(vers) {
315	case IBM970:
316	case IBM970FX:
317	case IBM970MP:
318		/*
319		 * The 970 has strange rules about how to update HID registers.
320		 * See Table 2-3, 970MP manual
321		 *
322		 * Note: HID4 and HID5 restored already in
323		 * cpudep_ap_early_bootstrap()
324		 */
325
326		__asm __volatile("mtasr %0; sync" :: "r"(0));
327	#ifdef __powerpc64__
328		__asm __volatile(" \
329			sync; isync;					\
330			mtspr	%1, %0;					\
331			mfspr	%0, %1;	mfspr	%0, %1;	mfspr	%0, %1;	\
332			mfspr	%0, %1;	mfspr	%0, %1;	mfspr	%0, %1; \
333			sync; isync"
334		    :: "r"(bsp_state[0]), "K"(SPR_HID0));
335		__asm __volatile("sync; isync;	\
336		    mtspr %1, %0; mtspr %1, %0; sync; isync"
337		    :: "r"(bsp_state[1]), "K"(SPR_HID1));
338	#else
339		__asm __volatile(" \
340			ld	%0,0(%2);				\
341			sync; isync;					\
342			mtspr	%1, %0;					\
343			mfspr	%0, %1;	mfspr	%0, %1;	mfspr	%0, %1;	\
344			mfspr	%0, %1;	mfspr	%0, %1;	mfspr	%0, %1; \
345			sync; isync"
346		    : "=r"(reg) : "K"(SPR_HID0), "b"(bsp_state));
347		__asm __volatile("ld %0, 8(%2); sync; isync;	\
348		    mtspr %1, %0; mtspr %1, %0; sync; isync"
349		    : "=r"(reg) : "K"(SPR_HID1), "b"(bsp_state));
350	#endif
351
352		powerpc_sync();
353		break;
354	case IBMCELLBE:
355		#ifdef NOTYET /* Causes problems if in instruction stream on 970 */
356		if (mfmsr() & PSL_HV) {
357			mtspr(SPR_HID0, bsp_state[0]);
358			mtspr(SPR_HID1, bsp_state[1]);
359			mtspr(SPR_HID4, bsp_state[2]);
360			mtspr(SPR_HID6, bsp_state[3]);
361
362			mtspr(SPR_CELL_TSCR, bsp_state[4]);
363		}
364		#endif
365
366		mtspr(SPR_CELL_TSRL, bsp_state[5]);
367
368		break;
369	case MPC7400:
370	case MPC7410:
371	case MPC7447A:
372	case MPC7448:
373	case MPC7450:
374	case MPC7455:
375	case MPC7457:
376		/* XXX: Program the CPU ID into PIR */
377		__asm __volatile("mtspr 1023,%0" :: "r"(PCPU_GET(cpuid)));
378
379		powerpc_sync();
380		isync();
381
382		mtspr(SPR_HID0, bsp_state[0]); isync();
383		mtspr(SPR_HID1, bsp_state[1]); isync();
384
385		/* Now enable the L3 cache. */
386		switch (vers) {
387		case MPC7450:
388		case MPC7455:
389		case MPC7457:
390			/* Only MPC745x CPUs have an L3 cache. */
391			reg = mpc745x_l3_enable(bsp_state[3]);
392		default:
393			break;
394		}
395
396		reg = mpc74xx_l2_enable(bsp_state[2]);
397		reg = mpc74xx_l1d_enable();
398		reg = mpc74xx_l1i_enable();
399
400		break;
401	case IBMPOWER7:
402	case IBMPOWER7PLUS:
403	case IBMPOWER8:
404	case IBMPOWER8E:
405	case IBMPOWER8NVL:
406	case IBMPOWER9:
407#ifdef __powerpc64__
408		if (mfmsr() & PSL_HV) {
409			mtspr(SPR_LPCR, mfspr(SPR_LPCR) | lpcr |
410			    LPCR_PECE_WAKESET);
411			isync();
412		}
413#endif
414		break;
415	default:
416#ifdef __powerpc64__
417		if (!(mfmsr() & PSL_HV)) /* Rely on HV to have set things up */
418			break;
419#endif
420		printf("WARNING: Unknown CPU type. Cache performace may be "
421		    "suboptimal.\n");
422		break;
423	}
424}
425