1/*	$NetBSD: psl.h,v 1.47 2011/07/16 11:15:52 nakayama Exp $ */
2
3/*
4 * Copyright (c) 1992, 1993
5 *	The Regents of the University of California.  All rights reserved.
6 *
7 * This software was developed by the Computer Systems Engineering group
8 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
9 * contributed to Berkeley.
10 *
11 * All advertising materials mentioning features or use of this software
12 * must display the following acknowledgement:
13 *	This product includes software developed by the University of
14 *	California, Lawrence Berkeley Laboratory.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 *    notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 *    notice, this list of conditions and the following disclaimer in the
23 *    documentation and/or other materials provided with the distribution.
24 * 3. Neither the name of the University nor the names of its contributors
25 *    may be used to endorse or promote products derived from this software
26 *    without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 *	@(#)psl.h	8.1 (Berkeley) 6/11/93
41 */
42
43#ifndef PSR_IMPL
44
45/*
46 * SPARC Process Status Register (in psl.h for hysterical raisins).  This
47 * doesn't exist on the V9.
48 *
49 * The picture in the Sun manuals looks like this:
50 *	                                     1 1
51 *	 31   28 27   24 23   20 19       14 3 2 11    8 7 6 5 4       0
52 *	+-------+-------+-------+-----------+-+-+-------+-+-+-+---------+
53 *	|  impl |  ver  |  icc  |  reserved |E|E|  pil  |S|P|E|   CWP   |
54 *	|       |       |n z v c|           |C|F|       | |S|T|         |
55 *	+-------+-------+-------+-----------+-+-+-------+-+-+-+---------+
56 */
57
58#define PSR_IMPL	0xf0000000	/* implementation */
59#define PSR_VER		0x0f000000	/* version */
60#define PSR_ICC		0x00f00000	/* integer condition codes */
61#define PSR_N		0x00800000	/* negative */
62#define PSR_Z		0x00400000	/* zero */
63#define PSR_O		0x00200000	/* overflow */
64#define PSR_C		0x00100000	/* carry */
65#define PSR_EC		0x00002000	/* coprocessor enable */
66#define PSR_EF		0x00001000	/* FP enable */
67#define PSR_PIL		0x00000f00	/* interrupt level */
68#define PSR_S		0x00000080	/* supervisor (kernel) mode */
69#define PSR_PS		0x00000040	/* previous supervisor mode (traps) */
70#define PSR_ET		0x00000020	/* trap enable */
71#define PSR_CWP		0x0000001f	/* current window pointer */
72
73#define PSR_BITS "\20\16EC\15EF\10S\7PS\6ET"
74
75/*
76 * SPARC V9 CCR register
77 */
78
79#define ICC_C	0x01L
80#define ICC_V	0x02L
81#define ICC_Z	0x04L
82#define ICC_N	0x08L
83#define XCC_SHIFT	4
84#define XCC_C	(ICC_C<<XCC_SHIFT)
85#define XCC_V	(ICC_V<<XCC_SHIFT)
86#define XCC_Z	(ICC_Z<<XCC_SHIFT)
87#define XCC_N	(ICC_N<<XCC_SHIFT)
88
89
90/*
91 * SPARC V9 PSTATE register (what replaces the PSR in V9)
92 *
93 * Here's the layout:
94 *
95 *    11   10    9     8   7  6   5     4     3     2     1   0
96 *  +------------------------------------------------------------+
97 *  | IG | MG | CLE | TLE | MM | RED | PEF | AM | PRIV | IE | AG |
98 *  +------------------------------------------------------------+
99 */
100
101#define PSTATE_IG	0x800	/* enable spitfire interrupt globals */
102#define PSTATE_MG	0x400	/* enable spitfire MMU globals */
103#define PSTATE_CLE	0x200	/* current little endian */
104#define PSTATE_TLE	0x100	/* traps little endian */
105#define PSTATE_MM	0x0c0	/* memory model */
106#define PSTATE_MM_TSO	0x000	/* total store order */
107#define PSTATE_MM_PSO	0x040	/* partial store order */
108#define PSTATE_MM_RMO	0x080	/* Relaxed memory order */
109#define PSTATE_RED	0x020	/* RED state */
110#define PSTATE_PEF	0x010	/* enable floating point */
111#define PSTATE_AM	0x008	/* 32-bit address masking */
112#define PSTATE_PRIV	0x004	/* privileged mode */
113#define PSTATE_IE	0x002	/* interrupt enable */
114#define PSTATE_AG	0x001	/* enable alternate globals */
115
116#define PSTATE_BITS "\20\14IG\13MG\12CLE\11TLE\10\7MM\6RED\5PEF\4AM\3PRIV\2IE\1AG"
117
118
119/*
120 * 32-bit code requires TSO or at best PSO since that's what's supported on
121 * SPARC V8 and earlier machines.
122 *
123 * 64-bit code sets the memory model in the ELF header.
124 *
125 * We're running kernel code in TSO for the moment so we don't need to worry
126 * about possible memory barrier bugs.
127 */
128
129#ifdef __arch64__
130#define PSTATE_PROM	(PSTATE_MM_TSO|PSTATE_PRIV)
131#define PSTATE_NUCLEUS	(PSTATE_MM_TSO|PSTATE_PRIV|PSTATE_AG)
132#define PSTATE_KERN	(PSTATE_MM_TSO|PSTATE_PRIV)
133#define PSTATE_INTR	(PSTATE_KERN|PSTATE_IE)
134#define PSTATE_USER32	(PSTATE_MM_TSO|PSTATE_AM|PSTATE_IE)
135#define PSTATE_USER	(PSTATE_MM_RMO|PSTATE_IE)
136#else
137#define PSTATE_PROM	(PSTATE_MM_TSO|PSTATE_PRIV)
138#define PSTATE_NUCLEUS	(PSTATE_MM_TSO|PSTATE_AM|PSTATE_PRIV|PSTATE_AG)
139#define PSTATE_KERN	(PSTATE_MM_TSO|PSTATE_AM|PSTATE_PRIV)
140#define PSTATE_INTR	(PSTATE_KERN|PSTATE_IE)
141#define PSTATE_USER32	(PSTATE_MM_TSO|PSTATE_AM|PSTATE_IE)
142#define PSTATE_USER	(PSTATE_MM_TSO|PSTATE_AM|PSTATE_IE)
143#endif
144
145
146/*
147 * SPARC V9 TSTATE register
148 *
149 *   39 32 31 24 23 18  17   8	7 5 4   0
150 *  +-----+-----+-----+--------+---+-----+
151 *  | CCR | ASI |  -  | PSTATE | - | CWP |
152 *  +-----+-----+-----+--------+---+-----+
153 */
154
155#define TSTATE_CWP		0x01f
156#define TSTATE_PSTATE		0x6ff00
157#define TSTATE_PSTATE_SHIFT	8
158#define TSTATE_ASI		0xff000000LL
159#define TSTATE_ASI_SHIFT	24
160#define TSTATE_CCR		0xff00000000LL
161#define TSTATE_CCR_SHIFT	32
162
163#define PSRCC_TO_TSTATE(x)	(((int64_t)(x)&PSR_ICC)<<(TSTATE_CCR_SHIFT-20))
164#define TSTATECCR_TO_PSR(x)	(((x)&TSTATE_CCR)>>(TSTATE_CCR_SHIFT-20))
165
166/*
167 * These are here to simplify life.
168 */
169#define TSTATE_IG	(PSTATE_IG<<TSTATE_PSTATE_SHIFT)
170#define TSTATE_MG	(PSTATE_MG<<TSTATE_PSTATE_SHIFT)
171#define TSTATE_CLE	(PSTATE_CLE<<TSTATE_PSTATE_SHIFT)
172#define TSTATE_TLE	(PSTATE_TLE<<TSTATE_PSTATE_SHIFT)
173#define TSTATE_MM	(PSTATE_MM<<TSTATE_PSTATE_SHIFT)
174#define TSTATE_MM_TSO	(PSTATE_MM_TSO<<TSTATE_PSTATE_SHIFT)
175#define TSTATE_MM_PSO	(PSTATE_MM_PSO<<TSTATE_PSTATE_SHIFT)
176#define TSTATE_MM_RMO	(PSTATE_MM_RMO<<TSTATE_PSTATE_SHIFT)
177#define TSTATE_RED	(PSTATE_RED<<TSTATE_PSTATE_SHIFT)
178#define TSTATE_PEF	(PSTATE_PEF<<TSTATE_PSTATE_SHIFT)
179#define TSTATE_AM	(PSTATE_AM<<TSTATE_PSTATE_SHIFT)
180#define TSTATE_PRIV	(PSTATE_PRIV<<TSTATE_PSTATE_SHIFT)
181#define TSTATE_IE	(PSTATE_IE<<TSTATE_PSTATE_SHIFT)
182#define TSTATE_AG	(PSTATE_AG<<TSTATE_PSTATE_SHIFT)
183
184#define TSTATE_BITS "\20\14IG\13MG\12CLE\11TLE\10\7MM\6RED\5PEF\4AM\3PRIV\2IE\1AG"
185
186#define TSTATE_KERN	((PSTATE_KERN)<<TSTATE_PSTATE_SHIFT)
187#define TSTATE_USER	((PSTATE_USER)<<TSTATE_PSTATE_SHIFT)
188/*
189 * SPARC V9 VER version register.
190 *
191 *  63   48 47  32 31  24 23 16 15    8 7 5 4      0
192 * +-------+------+------+-----+-------+---+--------+
193 * | manuf | impl | mask |  -  | maxtl | - | maxwin |
194 * +-------+------+------+-----+-------+---+--------+
195 *
196 */
197
198#define VER_MANUF	0xffff000000000000LL
199#define VER_MANUF_SHIFT	48
200#define VER_IMPL	0x0000ffff00000000LL
201#define VER_IMPL_SHIFT	32
202#define VER_MASK	0x00000000ff000000LL
203#define VER_MASK_SHIFT	24
204#define VER_MAXTL	0x000000000000ff00LL
205#define VER_MAXTL_SHIFT	8
206#define VER_MAXWIN	0x000000000000001fLL
207
208/*
209 * Here are a few things to help us transition between user and kernel mode:
210 */
211
212/* Memory models */
213#define KERN_MM		PSTATE_MM_TSO
214#define USER_MM		PSTATE_MM_RMO
215
216/*
217 * Register window handlers.  These point to generic routines that check the
218 * stack pointer and then vector to the real handler.  We could optimize this
219 * if we could guarantee only 32-bit or 64-bit stacks.
220 */
221#define WSTATE_KERN	026
222#define WSTATE_USER	022
223
224#define CWP		0x01f
225
226/* 64-byte alignment -- this seems the best place to put this. */
227#define SPARC64_BLOCK_SIZE	64
228#define SPARC64_BLOCK_ALIGN	0x3f
229
230#if defined(_KERNEL) && !defined(_LOCORE)
231
232/*
233 * GCC pseudo-functions for manipulating PSR (primarily PIL field).
234 */
235static __inline __attribute__((__always_inline__)) int
236getpsr(void)
237{
238	int psr;
239
240	__asm volatile("rd %%psr,%0" : "=r" (psr));
241	return (psr);
242}
243
244static __inline __attribute__((__always_inline__)) int
245getmid(void)
246{
247	int mid;
248
249	__asm volatile("rd %%tbr,%0" : "=r" (mid));
250	return ((mid >> 20) & 0x3);
251}
252
253static __inline __attribute__((__always_inline__)) void
254setpsr(int newpsr)
255{
256	__asm volatile("wr %0,0,%%psr" : : "r" (newpsr) : "memory");
257	__asm volatile("nop; nop; nop");
258}
259
260static __inline __attribute__((__always_inline__)) void
261spl0(void)
262{
263	int psr, oldipl;
264
265	/*
266	 * wrpsr xors two values: we choose old psr and old ipl here,
267	 * which gives us the same value as the old psr but with all
268	 * the old PIL bits turned off.
269	 */
270	__asm volatile("rd %%psr,%0" : "=r" (psr) : : "memory");
271	oldipl = psr & PSR_PIL;
272	__asm volatile("wr %0,%1,%%psr" : : "r" (psr), "r" (oldipl));
273
274	/*
275	 * Three instructions must execute before we can depend
276	 * on the bits to be changed.
277	 */
278	__asm volatile("nop; nop; nop");
279}
280
281/*
282 * PIL 1 through 14 can use this macro.
283 * (spl0 and splhigh are special since they put all 0s or all 1s
284 * into the ipl field.)
285 */
286#define	_SPLSET(name, newipl) \
287static __inline __attribute__((__always_inline__)) void name(void) \
288{ \
289	int psr; \
290	__asm volatile("rd %%psr,%0" : "=r" (psr)); \
291	psr &= ~PSR_PIL; \
292	__asm volatile("wr %0,%1,%%psr" : : \
293	    "r" (psr), "n" ((newipl) << 8)); \
294	__asm volatile("nop; nop; nop" : : : "memory"); \
295}
296
297_SPLSET(spllowerschedclock, IPL_SCHED)
298
299typedef uint8_t ipl_t;
300typedef struct {
301	ipl_t _ipl;
302} ipl_cookie_t;
303
304static inline ipl_cookie_t
305makeiplcookie(ipl_t ipl)
306{
307
308	return (ipl_cookie_t){._ipl = ipl};
309}
310
311/* Raise IPL and return previous value */
312static __inline int
313splraiseipl(ipl_cookie_t icookie)
314{
315	int newipl = icookie._ipl;
316	int psr, oldipl;
317
318	__asm volatile("rd %%psr,%0" : "=r" (psr));
319
320	oldipl = psr & PSR_PIL;
321	newipl <<= 8;
322	if (newipl <= oldipl)
323		return (oldipl);
324
325	psr = (psr & ~oldipl) | newipl;
326
327	__asm volatile("wr %0,0,%%psr" : : "r" (psr));
328	__asm volatile("nop; nop; nop" : : : "memory");
329
330	return (oldipl);
331}
332
333#include <sys/spl.h>
334
335#define	splausoft()	splraiseipl(makeiplcookie(IPL_SOFTAUDIO))
336#define	splfdsoft()	splraiseipl(makeiplcookie(IPL_SOFTFDC))
337
338#define	splfd()		splraiseipl(makeiplcookie(IPL_FD))
339#define	splts102()	splraiseipl(makeiplcookie(IPL_TS102))
340
341#define	splzs()		splraiseipl(makeiplcookie(IPL_ZS))
342
343/* splx does not have a return value */
344static __inline __attribute__((__always_inline__)) void
345splx(int newipl)
346{
347	int psr;
348
349	__asm volatile("rd %%psr,%0" : "=r" (psr) : : "memory");
350	__asm volatile("wr %0,%1,%%psr" : : \
351	    "r" (psr & ~PSR_PIL), "rn" (newipl));
352	__asm volatile("nop; nop; nop");
353}
354#endif /* KERNEL && !_LOCORE */
355
356#endif /* PSR_IMPL */
357