1/*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31
32#ifndef _PPC_PROC_REG_H_
33#define _PPC_PROC_REG_H_
34
35#include <mach/boolean.h>
36
37/* Define some useful masks that convert from bit numbers */
38
39#if __PPC__
40#ifdef __BIG_ENDIAN__
41#ifndef ENDIAN_MASK
42#define ENDIAN_MASK(val,size) (1 << ((size-1) - val))
43#endif
44#else
45#error code not ported to little endian targets yet
46#endif /* __BIG_ENDIAN__ */
47#endif /* __PPC__ */
48
49#define MASK32(PART)	ENDIAN_MASK(PART ## _BIT, 32)
50#define MASK16(PART)	ENDIAN_MASK(PART ## _BIT, 16)
51#define MASK8(PART)	ENDIAN_MASK(PART ## _BIT, 8)
52
53#undef MASK
54#define MASK(PART)	MASK32(PART)
55
56#define BITS_PER_WORD	32
57#define BITS_PER_WORD_POW2 5
58
59/* Defines for decoding the MSR bits */
60
61#define MSR_SF_BIT		0
62#define MSR_HV_BIT		3
63#define MSR_RES1_BIT	1
64#define MSR_RES2_BIT	2
65#define MSR_RES3_BIT	3
66#define MSR_RES4_BIT	4
67#define MSR_RES5_BIT	5
68#define MSR_VEC_BIT		6
69#define MSR_RES7_BIT	7
70#define MSR_RES8_BIT	8
71#define MSR_RES9_BIT	9
72#define MSR_RES10_BIT	10
73#define MSR_RES11_BIT	11
74#define MSR_KEY_BIT	12	/* Key bit on 603e (not on 603) */
75#define	MSR_POW_BIT	13
76#define MSR_TGPR_BIT	14	/* Temporary GPR mappings on 603/603e */
77#define MSR_ILE_BIT	15
78#define	MSR_EE_BIT	16
79#define	MSR_PR_BIT	17
80#define MSR_FP_BIT	18
81#define MSR_ME_BIT	19
82#define MSR_FE0_BIT	20
83#define MSR_SE_BIT	21
84#define	MSR_BE_BIT	22
85#define MSR_FE1_BIT	23
86#define MSR_RES24_BIT	24	/* AL bit in power architectures */
87#define MSR_IP_BIT      25
88#define MSR_IR_BIT      26
89#define MSR_DR_BIT      27
90#define MSR_RES28_BIT	28
91#define MSR_PM_BIT	29
92#define	MSR_RI_BIT	30
93#define MSR_LE_BIT	31
94
95/* MSR for kernel mode, interrupts disabled, running in virtual mode */
96#define MSR_SUPERVISOR_INT_OFF (MASK(MSR_ME) | MASK(MSR_IR) | MASK(MSR_DR))
97
98/* MSR for above but with interrupts enabled */
99#define MSR_SUPERVISOR_INT_ON (MSR_SUPERVISOR_INT_OFF | MASK(MSR_EE))
100
101/* MSR for physical mode code */
102#define MSR_VM_OFF     (MASK(MSR_ME))
103
104/* MSR for physical instruction, virtual data */
105#define MSR_PHYS_INST_VIRT_DATA     (MASK(MSR_ME) | MASK(MSR_IR))
106
107/* MSR mask for user-exported bits - identify bits that must be set/reset */
108
109/* SET - external exceptions, machine check, vm on, user-level privs */
110#define MSR_EXPORT_MASK_SET	(MASK(MSR_EE)| MASK(MSR_ME)| \
111				 MASK(MSR_IR)|MASK(MSR_DR)|MASK(MSR_PR))
112
113/* only the following bits may be changed by a task */
114#define MSR_IMPORT_BITS (MASK(MSR_FE0)|MASK(MSR_SE)|MASK(MSR_BE)| \
115			 MASK(MSR_FE1)| MASK(MSR_PM) | MASK(MSR_LE))
116
117#define MSR_PREPARE_FOR_IMPORT(origmsr, newmsr) \
118	((origmsr & ~MSR_IMPORT_BITS) | (newmsr & MSR_IMPORT_BITS))
119
120#define MSR_VEC_ON	(MASK(MSR_VEC))
121
122#define USER_MODE(msr) (msr & MASK(MSR_PR) ? TRUE : FALSE)
123
124/* seg reg values must be simple expressions so that assembler can cope */
125#define SEG_REG_INVALID 0x0000
126#define KERNEL_SEG_REG0_VALUE 0x20000000 /* T=0,Ks=0,Ku=1 PPC_SID_KERNEL=0*/
127
128/* For SEG_REG_PROT we have T=0, Ks=0, Ku=1 */
129#define SEG_REG_PROT	0x20000000   /* seg regs should have these bits set */
130
131/* SR_COPYIN is used for copyin/copyout+remapping and must be
132 * saved and restored in the thread context.
133 */
134/* SR_UNUSED_BY_KERN is unused by the kernel, and thus contains
135 * the space ID of the currently interrupted user task immediately
136 * after an exception and before interrupts are reenabled. It's used
137 * purely for an assert.
138 */
139
140/* SR_KERNEL used for asserts... */
141
142#define SR_COPYIN	sr14
143#define SR_UNUSED_BY_KERN sr13
144#define SR_KERNEL 	sr0
145
146#define SR_UNUSED_BY_KERN_NUM 13
147#define SR_COPYIN_NAME	sr14
148#define SR_COPYIN_NUM	14
149#define BAT_INVALID 0
150
151
152/* DSISR bits on data access exceptions */
153
154#define DSISR_IO_BIT		0	/* NOT USED on 601 */
155#define DSISR_HASH_BIT		1
156#define DSISR_NOEX_BIT		3
157#define DSISR_PROT_BIT		4
158#define DSISR_IO_SPC_BIT	5
159#define DSISR_WRITE_BIT		6
160#define DSISR_WATCH_BIT		9
161#define DSISR_EIO_BIT		11
162
163#define dsiMiss 			0x40000000
164#define dsiMissb 			1
165#define dsiNoEx				0x10000000
166#define dsiProt				0x08000000
167#define dsiInvMode			0x04000000
168#define dsiStore			0x02000000
169#define dsiAC				0x00400000
170#define dsiSeg				0x00200000
171#define dsiValid			0x5E600000
172#define dsiLinkage			0x00010000	/* Linkage mapping type - software flag */
173#define dsiLinkageb			15			/* Linkage mapping type - software flag */
174#define dsiSoftware			0x0000FFFF
175
176/* SRR1 bits on data/instruction translation exceptions */
177
178#define SRR1_TRANS_HASH_BIT	1
179#define SRR1_TRANS_IO_BIT	3
180#define SRR1_TRANS_PROT_BIT	4
181#define SRR1_TRANS_NO_PTE_BIT	10
182
183/* SRR1 bits on program exceptions */
184
185#define SRR1_PRG_FE_BIT		11
186#define SRR1_PRG_ILL_INS_BIT	12
187#define SRR1_PRG_PRV_INS_BIT	13
188#define SRR1_PRG_TRAP_BIT	14
189
190/*
191 * Virtual to physical mapping macros/structures.
192 * IMPORTANT NOTE: there is one mapping per HW page, not per MACH page.
193 */
194
195#define PTE1_WIMG_GUARD_BIT	28	/* Needed for assembler */
196#define PTE1_REFERENCED_BIT	23	/* ditto */
197#define PTE1_CHANGED_BIT	24
198#define PTE0_HASH_ID_BIT	25
199
200#define PTE_WIMG_CB_CACHED_COHERENT		0 	/* cached, writeback, coherent (default) */
201#define PTE_WIMG_CB_CACHED_COHERENT_GUARDED	1 	/* cached, writeback, coherent, guarded */
202#define PTE_WIMG_UNCACHED_COHERENT		2	/* uncached, coherentt */
203#define PTE_WIMG_UNCACHED_COHERENT_GUARDED	3	/* uncached, coherent, guarded */
204
205#define PTE_WIMG_DEFAULT 	PTE_WIMG_CB_CACHED_COHERENT
206#define PTE_WIMG_IO		PTE_WIMG_UNCACHED_COHERENT_GUARDED
207
208
209
210#ifndef ASSEMBLER
211#ifdef __GNUC__
212
213/* Structures and types for machine registers */
214
215
216/*
217 * C-helper inline functions for accessing machine registers follow.
218 */
219
220
221/*
222 * Various memory/IO synchronisation instructions
223 */
224
225        /*	Use eieio as a memory barrier to order stores.
226         *	Useful for device control and PTE maintenance.
227         */
228
229#define eieio() \
230        __asm__ volatile("eieio")
231
232        /* 	Use sync to ensure previous stores have completed.
233        	This is  required when manipulating locks and/or
234        	maintaining PTEs or other shared structures on SMP
235        	machines.
236        */
237
238#define sync() \
239        __asm__ volatile("sync")
240
241        /*	Use isync to sychronize context; that is, the ensure
242        	no prefetching of instructions happen before the
243        	instruction.
244        */
245
246#define isync() \
247        __asm__ volatile("isync")
248
249
250/*
251 * Access to various system registers
252 */
253
254extern unsigned int mflr(void);
255
256extern __inline__ unsigned int mflr(void)
257{
258        unsigned int result;
259        __asm__ volatile("mflr %0" : "=r" (result));
260        return result;
261}
262
263extern unsigned int mfpvr(void);
264
265extern __inline__ unsigned int mfpvr(void)
266{
267        unsigned int result;
268        __asm__ ("mfpvr %0" : "=r" (result));
269        return result;
270}
271
272/* mtmsr might need syncs etc around it, don't provide simple
273 * inline macro
274 */
275
276extern unsigned int mfmsr(void);
277
278extern __inline__ unsigned int mfmsr(void)
279{
280        unsigned int result;
281        __asm__ volatile("mfmsr %0" : "=r" (result));
282        return result;
283}
284
285
286extern unsigned int mfdar(void);
287
288extern __inline__ unsigned int mfdar(void)
289{
290        unsigned int result;
291        __asm__ volatile("mfdar %0" : "=r" (result));
292        return result;
293}
294
295extern void mtdec(unsigned int val);
296
297extern __inline__ void mtdec(unsigned int val)
298{
299        __asm__ volatile("mtdec %0" : : "r" (val));
300        return;
301}
302
303extern void mttb(unsigned int val);
304
305extern __inline__ void mttb(unsigned int val)
306{
307        __asm__ volatile("mtspr tbl, %0" : : "r" (val));
308        return;
309}
310
311extern unsigned int mftb(void);
312
313extern __inline__ unsigned int mftb(void)
314{
315        unsigned int result;
316        __asm__ volatile("mftb %0" : "=r" (result));
317        return result;
318}
319
320extern void mttbu(unsigned int val);
321
322extern __inline__ void mttbu(unsigned int val)
323{
324        __asm__ volatile("mtspr tbu, %0" : : "r" (val));
325        return;
326}
327
328extern unsigned int mftbu(void);
329
330extern __inline__ unsigned int mftbu(void)
331{
332        unsigned int result;
333        __asm__ volatile("mftbu %0" : "=r" (result));
334        return result;
335}
336
337extern unsigned int mfl2cr(void);
338
339extern __inline__ unsigned int mfl2cr(void)
340{
341  unsigned int result;
342  __asm__ volatile("mfspr %0, l2cr" : "=r" (result));
343  return result;
344}
345
346extern unsigned int cntlzw(unsigned int num);
347
348extern __inline__ unsigned int cntlzw(unsigned int num)
349{
350  unsigned int result;
351  __asm__ volatile("cntlzw %0, %1" : "=r" (result) : "r" (num));
352  return result;
353}
354
355
356/* functions for doing byte reversed loads and stores */
357
358extern unsigned int lwbrx(unsigned int addr);
359
360extern __inline__ unsigned int lwbrx(unsigned int addr)
361{
362  unsigned int result;
363  __asm__ volatile("lwbrx %0, 0, %1" : "=r" (result) : "r" (addr));
364  return result;
365}
366
367extern void stwbrx(unsigned int data, unsigned int addr);
368
369extern __inline__ void stwbrx(unsigned int data, unsigned int addr)
370{
371  __asm__ volatile("stwbrx %0, 0, %1" : : "r" (data), "r" (addr));
372}
373
374/* Performance Monitor Register access routines */
375extern unsigned long   mfmmcr0(void);
376extern void                    mtmmcr0(unsigned long);
377extern unsigned long   mfmmcr1(void);
378extern void                    mtmmcr1(unsigned long);
379extern unsigned long   mfmmcr2(void);
380extern void                    mtmmcr2(unsigned long);
381extern unsigned long   mfpmc1(void);
382extern void                    mtpmc1(unsigned long);
383extern unsigned long   mfpmc2(void);
384extern void                    mtpmc2(unsigned long);
385extern unsigned long   mfpmc3(void);
386extern void                    mtpmc3(unsigned long);
387extern unsigned long   mfpmc4(void);
388extern void                    mtpmc4(unsigned long);
389extern unsigned long   mfsia(void);
390extern unsigned long   mfsda(void);
391
392/* macros since the argument n is a hard-coded constant */
393
394#define mtsprg(n, reg)  __asm__ volatile("mtsprg  " # n ", %0" : : "r" (reg))
395#define mfsprg(reg, n)  __asm__ volatile("mfsprg  %0, " # n : "=r" (reg))
396
397#define mtspr(spr, val)  __asm__ volatile("mtspr  " # spr ", %0" : : "r" (val))
398#define mfspr(reg, spr)  __asm__ volatile("mfspr  %0, " # spr : "=r" (reg))
399
400#endif /* __GNUC__ */
401#endif /* !ASSEMBLER */
402
403#endif /* _PPC_PROC_REG_H_ */
404