1/*	$OpenBSD: cpu.h,v 1.78 2024/06/22 10:22:29 jsg Exp $	*/
2/*	$NetBSD: cpu.h,v 1.1 1996/09/30 16:34:21 ws Exp $	*/
3
4/*
5 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
6 * Copyright (C) 1995, 1996 TooLs GmbH.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed by TooLs GmbH.
20 * 4. The name of TooLs GmbH may not be used to endorse or promote products
21 *    derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
29 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
31 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
32 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34#ifndef	_POWERPC_CPU_H_
35#define	_POWERPC_CPU_H_
36
37#include <machine/frame.h>
38
39#include <sys/clockintr.h>
40#include <sys/device.h>
41#include <sys/sched.h>
42#include <sys/srp.h>
43
44struct cpu_info {
45	struct device *ci_dev;		/* our device */
46	struct schedstate_percpu ci_schedstate; /* scheduler state */
47
48	struct proc *ci_curproc;
49
50	struct pcb *ci_curpcb;
51	struct pmap *ci_curpm;
52	struct proc *ci_fpuproc;
53	struct proc *ci_vecproc;
54	int ci_cpuid;
55
56	volatile int ci_want_resched;
57	volatile int ci_cpl;
58	volatile int ci_ipending;
59	volatile int ci_dec_deferred;
60
61	volatile int	ci_flags;
62#define	CI_FLAGS_SLEEPING		2
63
64#if defined(MULTIPROCESSOR)
65	struct srp_hazard ci_srp_hazards[SRP_HAZARD_NUM];
66#endif
67
68	int ci_intrdepth;
69	char *ci_intstk;
70#define CPUSAVE_LEN	8
71	register_t ci_tempsave[CPUSAVE_LEN];
72	register_t ci_ddbsave[CPUSAVE_LEN];
73#define DISISAVE_LEN	4
74	register_t ci_disisave[DISISAVE_LEN];
75
76	struct clockqueue ci_queue;
77
78	volatile int    ci_ddb_paused;
79#define	CI_DDB_RUNNING	0
80#define	CI_DDB_SHOULDSTOP	1
81#define	CI_DDB_STOPPED		2
82#define	CI_DDB_ENTERDDB		3
83#define	CI_DDB_INDDB		4
84
85	u_int32_t ci_randseed;
86
87#ifdef DIAGNOSTIC
88	int	ci_mutex_level;
89#endif
90#ifdef GPROF
91	struct gmonparam *ci_gmon;
92	struct clockintr ci_gmonclock;
93#endif
94	char ci_panicbuf[512];
95};
96
97static __inline struct cpu_info *
98curcpu(void)
99{
100	struct cpu_info *ci;
101
102	__asm volatile ("mfsprg %0,0" : "=r"(ci));
103	return ci;
104}
105
106#define	curpcb			(curcpu()->ci_curpcb)
107#define	curpm			(curcpu()->ci_curpm)
108
109#define CPU_INFO_UNIT(ci)	((ci)->ci_dev ? (ci)->ci_dev->dv_unit : 0)
110
111#ifdef MULTIPROCESSOR
112
113#define PPC_MAXPROCS		4
114
115static __inline int
116cpu_number(void)
117{
118	int pir;
119
120	pir = curcpu()->ci_cpuid;
121	return pir;
122}
123
124void	cpu_boot_secondary_processors(void);
125
126#define CPU_IS_PRIMARY(ci)	((ci)->ci_cpuid == 0)
127#define CPU_IS_RUNNING(ci)	1
128#define CPU_INFO_ITERATOR		int
129#define CPU_INFO_FOREACH(cii, ci)					\
130	for (cii = 0, ci = &cpu_info[0]; cii < ncpusfound; cii++, ci++)
131
132void cpu_unidle(struct cpu_info *);
133
134#else
135
136#define PPC_MAXPROCS		1
137
138#define cpu_number()		0
139
140#define CPU_IS_PRIMARY(ci)	1
141#define CPU_IS_RUNNING(ci)	1
142#define CPU_INFO_ITERATOR		int
143#define CPU_INFO_FOREACH(cii, ci)					\
144	for (cii = 0, ci = curcpu(); ci != NULL; ci = NULL)
145
146#define cpu_unidle(ci)
147
148#endif
149
150#define CPU_BUSY_CYCLE()	do {} while (0)
151
152#define MAXCPUS	PPC_MAXPROCS
153
154extern struct cpu_info cpu_info[PPC_MAXPROCS];
155
156#define	CLKF_USERMODE(frame)	(((frame)->srr1 & PSL_PR) != 0)
157#define	CLKF_PC(frame)		((frame)->srr0)
158#define	CLKF_INTR(frame)	((frame)->depth != 0)
159
160extern int ppc_cpuidle;
161extern int ppc_proc_is_64b;
162extern int ppc_nobat;
163
164void	cpu_bootstrap(void);
165
166static inline unsigned int
167cpu_rnd_messybits(void)
168{
169	unsigned int hi, lo;
170
171	__asm volatile("mftbu %0; mftb %1" : "=r" (hi), "=r" (lo));
172
173	return (hi ^ lo);
174}
175
176/*
177 * This is used during profiling to integrate system time.
178 */
179#define	PROC_PC(p)		(trapframe(p)->srr0)
180#define	PROC_STACK(p)		(trapframe(p)->fixreg[1])
181
182void	delay(unsigned);
183#define	DELAY(n)		delay(n)
184
185#define	aston(p)		((p)->p_md.md_astpending = 1)
186
187/*
188 * Preempt the current process if in interrupt from user mode,
189 * or after the current trap/syscall if in system mode.
190 */
191#define	need_resched(ci) \
192do {									\
193	ci->ci_want_resched = 1;					\
194	if (ci->ci_curproc != NULL)					\
195		aston(ci->ci_curproc);					\
196} while (0)
197#define clear_resched(ci) (ci)->ci_want_resched = 0
198
199#define	need_proftick(p)	aston(p)
200
201void	signotify(struct proc *);
202
203extern char *bootpath;
204
205#ifndef	CACHELINESIZE
206#define	CACHELINESIZE	32			/* For now		XXX */
207#endif
208
209static __inline void
210syncicache(void *from, size_t len)
211{
212	size_t	by, i;
213
214	by = CACHELINESIZE;
215	i = 0;
216	do {
217		__asm volatile ("dcbst %0,%1" :: "r"(from), "r"(i));
218		i += by;
219	} while (i < len);
220	__asm volatile ("sync");
221	i = 0;
222	do {
223		__asm volatile ("icbi %0,%1" :: "r"(from), "r"(i));
224		i += by;
225	} while (i < len);
226	__asm volatile ("sync; isync");
227}
228
229static __inline void
230invdcache(void *from, int len)
231{
232	int l;
233	char *p = from;
234
235	len = len + (((u_int32_t) from) & (CACHELINESIZE - 1));
236	l = len;
237
238	do {
239		__asm volatile ("dcbi 0,%0" :: "r"(p));
240		p += CACHELINESIZE;
241	} while ((l -= CACHELINESIZE) > 0);
242	__asm volatile ("sync");
243}
244
245static __inline void
246flushdcache(void *from, int len)
247{
248	int l;
249	char *p = from;
250
251	len = len + (((u_int32_t) from) & (CACHELINESIZE - 1));
252	l = len;
253
254	do {
255		__asm volatile ("dcbf 0,%0" :: "r"(p));
256		p += CACHELINESIZE;
257	} while ((l -= CACHELINESIZE) > 0);
258	__asm volatile ("sync");
259}
260
261#define FUNC_SPR(n, name) \
262static __inline u_int32_t ppc_mf ## name(void)			\
263{								\
264	u_int32_t ret;						\
265	__asm volatile ("mfspr %0," # n : "=r" (ret));		\
266	return ret;						\
267}								\
268static __inline void ppc_mt ## name(u_int32_t val)		\
269{								\
270	__asm volatile ("mtspr "# n ",%0" :: "r" (val));	\
271}								\
272
273FUNC_SPR(0, mq)
274FUNC_SPR(1, xer)
275FUNC_SPR(4, rtcu)
276FUNC_SPR(5, rtcl)
277FUNC_SPR(8, lr)
278FUNC_SPR(9, ctr)
279FUNC_SPR(18, dsisr)
280FUNC_SPR(19, dar)
281FUNC_SPR(22, dec)
282FUNC_SPR(25, sdr1)
283FUNC_SPR(26, srr0)
284FUNC_SPR(27, srr1)
285FUNC_SPR(256, vrsave)
286FUNC_SPR(272, sprg0)
287FUNC_SPR(273, sprg1)
288FUNC_SPR(274, sprg2)
289FUNC_SPR(275, sprg3)
290FUNC_SPR(280, asr)
291FUNC_SPR(282, ear)
292FUNC_SPR(287, pvr)
293FUNC_SPR(311, hior)
294FUNC_SPR(528, ibat0u)
295FUNC_SPR(529, ibat0l)
296FUNC_SPR(530, ibat1u)
297FUNC_SPR(531, ibat1l)
298FUNC_SPR(532, ibat2u)
299FUNC_SPR(533, ibat2l)
300FUNC_SPR(534, ibat3u)
301FUNC_SPR(535, ibat3l)
302FUNC_SPR(560, ibat4u)
303FUNC_SPR(561, ibat4l)
304FUNC_SPR(562, ibat5u)
305FUNC_SPR(563, ibat5l)
306FUNC_SPR(564, ibat6u)
307FUNC_SPR(565, ibat6l)
308FUNC_SPR(566, ibat7u)
309FUNC_SPR(567, ibat7l)
310FUNC_SPR(536, dbat0u)
311FUNC_SPR(537, dbat0l)
312FUNC_SPR(538, dbat1u)
313FUNC_SPR(539, dbat1l)
314FUNC_SPR(540, dbat2u)
315FUNC_SPR(541, dbat2l)
316FUNC_SPR(542, dbat3u)
317FUNC_SPR(543, dbat3l)
318FUNC_SPR(568, dbat4u)
319FUNC_SPR(569, dbat4l)
320FUNC_SPR(570, dbat5u)
321FUNC_SPR(571, dbat5l)
322FUNC_SPR(572, dbat6u)
323FUNC_SPR(573, dbat6l)
324FUNC_SPR(574, dbat7u)
325FUNC_SPR(575, dbat7l)
326FUNC_SPR(1009, hid1)
327FUNC_SPR(1010, iabr)
328FUNC_SPR(1017, l2cr)
329FUNC_SPR(1018, l3cr)
330FUNC_SPR(1013, dabr)
331FUNC_SPR(1023, pir)
332
333static __inline u_int32_t
334ppc_mftbl(void)
335{
336	int ret;
337	__asm volatile ("mftb %0" : "=r" (ret));
338	return ret;
339}
340
341
342static __inline u_int64_t
343ppc_mftb(void)
344{
345	u_long scratch;
346	u_int64_t tb;
347
348	__asm volatile ("1: mftbu %0; mftb %L0; mftbu %1;"
349	    " cmpw 0,%0,%1; bne 1b" : "=r"(tb), "=r"(scratch));
350	return tb;
351}
352
353static __inline void
354ppc_mttb(u_int64_t tb)
355{
356	__asm volatile ("mttbl %0" :: "r"(0));
357	__asm volatile ("mttbu %0" :: "r"((u_int32_t)(tb >> 32)));
358	__asm volatile ("mttbl %0" :: "r"((u_int32_t)(tb & 0xffffffff)));
359}
360
361static __inline u_int32_t
362ppc_mfmsr(void)
363{
364	int ret;
365        __asm volatile ("mfmsr %0" : "=r" (ret));
366	return ret;
367}
368
369static __inline void
370ppc_mtmsr(u_int32_t val)
371{
372        __asm volatile ("mtmsr %0" :: "r" (val));
373}
374
375static __inline void
376ppc_mtsrin(u_int32_t val, u_int32_t sn_shifted)
377{
378	__asm volatile ("mtsrin %0,%1" :: "r"(val), "r"(sn_shifted));
379}
380
381u_int64_t ppc64_mfscomc(void);
382void ppc_mtscomc(u_int32_t);
383void ppc64_mtscomc(u_int64_t);
384u_int64_t ppc64_mfscomd(void);
385void ppc_mtscomd(u_int32_t);
386u_int32_t ppc_mfhid0(void);
387void ppc_mthid0(u_int32_t);
388u_int64_t ppc64_mfhid1(void);
389void ppc64_mthid1(u_int64_t);
390u_int64_t ppc64_mfhid4(void);
391void ppc64_mthid4(u_int64_t);
392u_int64_t ppc64_mfhid5(void);
393void ppc64_mthid5(u_int64_t);
394
395#include <machine/psl.h>
396
397/*
398 * General functions to enable and disable interrupts
399 * without having inlined assembly code in many functions.
400 */
401static __inline void
402ppc_intr_enable(int enable)
403{
404	u_int32_t msr;
405	if (enable != 0) {
406		msr = ppc_mfmsr();
407		msr |= PSL_EE;
408		ppc_mtmsr(msr);
409	}
410}
411
412static __inline int
413ppc_intr_disable(void)
414{
415	u_int32_t emsr, dmsr;
416	emsr = ppc_mfmsr();
417	dmsr = emsr & ~PSL_EE;
418	ppc_mtmsr(dmsr);
419	return (emsr & PSL_EE);
420}
421
422static inline void
423intr_enable(void)
424{
425	ppc_mtmsr(ppc_mfmsr() | PSL_EE);
426}
427
428static __inline u_long
429intr_disable(void)
430{
431	return ppc_intr_disable();
432}
433
434static __inline void
435intr_restore(u_long s)
436{
437	ppc_intr_enable(s);
438}
439
440int ppc_cpuspeed(int *);
441
442/*
443 * PowerPC CPU types
444 */
445#define	PPC_CPU_MPC601		1
446#define	PPC_CPU_MPC603		3
447#define	PPC_CPU_MPC604		4
448#define	PPC_CPU_MPC603e		6
449#define	PPC_CPU_MPC603ev	7
450#define	PPC_CPU_MPC750		8
451#define	PPC_CPU_MPC604ev	9
452#define	PPC_CPU_MPC7400		12
453#define	PPC_CPU_IBM970		0x0039
454#define	PPC_CPU_IBM970FX	0x003c
455#define	PPC_CPU_IBM970MP	0x0044
456#define	PPC_CPU_IBM750FX	0x7000
457#define	PPC_CPU_MPC7410		0x800c
458#define	PPC_CPU_MPC7447A	0x8003
459#define	PPC_CPU_MPC7448		0x8004
460#define	PPC_CPU_MPC7450		0x8000
461#define	PPC_CPU_MPC7455		0x8001
462#define	PPC_CPU_MPC7457		0x8002
463#define	PPC_CPU_MPC83xx		0x8083
464
465/*
466 * This needs to be included late since it relies on definitions higher
467 * up in this file.
468 */
469#if defined(MULTIPROCESSOR) && defined(_KERNEL)
470#include <sys/mplock.h>
471#endif
472
473#endif	/* _POWERPC_CPU_H_ */
474