cpufunc.h revision 80709
1/*-
2 * Copyright (c) 2001 Jake Burkholder.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/sparc64/include/cpufunc.h 80709 2001-07-31 06:05:05Z jake $
27 */
28
29#ifndef	_MACHINE_CPUFUNC_H_
30#define	_MACHINE_CPUFUNC_H_
31
32#include <machine/asi.h>
33#include <machine/pstate.h>
34
35/*
36 * membar operand macros for use in other macros when # is a special
37 * character.  Keep these in sync with what the hardware expects.
38 */
39#define	C_Lookaside	(0)
40#define	C_MemIssue	(1)
41#define	C_Sync		(2)
42#define	M_LoadLoad	(0)
43#define	M_StoreLoad	(1)
44#define	M_LoadStore	(2)
45#define	M_StoreStore	(3)
46
47#define	CMASK_SHIFT	(4)
48#define	MMASK_SHIFT	(0)
49
50#define	CMASK_GEN(bit)	((1 << (bit)) << CMASK_SHIFT)
51#define	MMASK_GEN(bit)	((1 << (bit)) << MMASK_SHIFT)
52
53#define	Lookaside	CMASK_GEN(C_Lookaside)
54#define	MemIssue	CMASK_GEN(C_MemIssue)
55#define	Sync		CMASK_GEN(C_Sync)
56#define	LoadLoad	MMASK_GEN(M_LoadLoad)
57#define	StoreLoad	MMASK_GEN(M_StoreLoad)
58#define	LoadStore	MMASK_GEN(M_LoadStore)
59#define	StoreStore	MMASK_GEN(M_StoreStore)
60
61#define	casa(rs1, rs2, rd, asi) ({					\
62	u_int __rd = (u_int32_t)(rd);					\
63	__asm __volatile("casa [%1] %2, %3, %0"				\
64	    : "+r" (__rd) : "r" (rs1), "n" (asi), "r" (rs2));		\
65	__rd;								\
66})
67
68#define	casxa(rs1, rs2, rd, asi) ({					\
69	u_long __rd = (u_int64_t)(rd);					\
70	__asm __volatile("casxa [%1] %2, %3, %0"			\
71	    : "+r" (__rd) : "r" (rs1), "n" (asi), "r" (rs2));		\
72	__rd;								\
73})
74
75#define	flush(va) do {							\
76	__asm __volatile("flush %0" : : "r" (va));			\
77} while (0)
78
79#define	ldxa(va, asi) ({						\
80	u_long __r;							\
81	__asm __volatile("ldxa [%1] %2, %0"				\
82	    : "=r" (__r) : "r" (va), "n" (asi));			\
83	__r;								\
84})
85
86#define	stxa(va, asi, val) do {						\
87	__asm __volatile("stxa %0, [%1] %2"				\
88	    : : "r" (val), "r" (va), "n" (asi));			\
89} while (0)
90
91#define	membar(mask) do {						\
92	__asm __volatile("membar %0" : : "n" (mask));			\
93} while (0)
94
95#define	rd(name) ({							\
96	u_int64_t __sr;							\
97	__asm __volatile("rd %%" #name ", %0" : "=r" (__sr) :);		\
98	__sr;								\
99})
100
101#define	wr(name, val, xor) do {						\
102	__asm __volatile("wr %0, %1, %%" #name				\
103	    : : "r" (val), "rI" (xor));					\
104} while (0)
105
106#define	rdpr(name) ({							\
107	u_int64_t __pr;							\
108	__asm __volatile("rdpr %%" #name", %0" : "=r" (__pr) :);	\
109	__pr;								\
110})
111
112#define	wrpr(name, val, xor) do {					\
113	__asm __volatile("wrpr %0, %1, %%" #name			\
114	    : : "r" (val), "rI" (xor));					\
115} while (0)
116
117static __inline void
118breakpoint(void)
119{
120	__asm __volatile("ta 1");
121}
122
123/*
124 * XXX use %pil for these.
125 */
126static __inline critical_t
127critical_enter(void)
128{
129	critical_t ie;
130
131	ie = rdpr(pstate);
132	if (ie & PSTATE_IE)
133		wrpr(pstate, ie, PSTATE_IE);
134	return (ie);
135}
136
137static __inline void
138critical_exit(critical_t ie)
139{
140
141	if (ie & PSTATE_IE)
142		wrpr(pstate, ie, 0);
143}
144
145#if 0
146#define	HAVE_INLINE_FFS
147/*
148 * See page 202 of the SPARC v9 Architecture Manual.
149 */
150static __inline int
151ffs(int mask)
152{
153	int result;
154	int neg;
155	int tmp;
156
157	__asm __volatile(
158	"	neg	%3, %1 ;	"
159	"	xnor	%3, %1, %2 ;	"
160	"	popc	%2, %0 ;	"
161	"	movrz	%3, %%g0, %0 ;	"
162	: "=r" (result), "=r" (neg), "=r" (tmp) : "r" (mask));
163	return (result);
164}
165#endif
166
167#endif /* !_MACHINE_CPUFUNC_H_ */
168