fenv.h revision 140219
1219820Sjeff/*-
2219820Sjeff * Copyright (c) 2004 David Schultz <das@FreeBSD.ORG>
3219820Sjeff * All rights reserved.
4219820Sjeff *
5271127Shselasky * Redistribution and use in source and binary forms, with or without
6219820Sjeff * modification, are permitted provided that the following conditions
7219820Sjeff * are met:
8219820Sjeff * 1. Redistributions of source code must retain the above copyright
9219820Sjeff *    notice, this list of conditions and the following disclaimer.
10219820Sjeff * 2. Redistributions in binary form must reproduce the above copyright
11219820Sjeff *    notice, this list of conditions and the following disclaimer in the
12219820Sjeff *    documentation and/or other materials provided with the distribution.
13219820Sjeff *
14219820Sjeff * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15219820Sjeff * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16219820Sjeff * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17219820Sjeff * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18219820Sjeff * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19219820Sjeff * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20219820Sjeff * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21219820Sjeff * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22219820Sjeff * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23219820Sjeff * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24219820Sjeff * SUCH DAMAGE.
25219820Sjeff *
26219820Sjeff * $FreeBSD: head/lib/msun/powerpc/fenv.h 140219 2005-01-14 07:09:23Z das $
27219820Sjeff */
28219820Sjeff
29219820Sjeff#ifndef	_FENV_H_
30219820Sjeff#define	_FENV_H_
31219820Sjeff
32219820Sjeff#include <sys/_types.h>
33219820Sjeff
34219820Sjefftypedef	__uint32_t	fenv_t;
35219820Sjefftypedef	__uint32_t	fexcept_t;
36219820Sjeff
37219820Sjeff/* Exception flags */
38219820Sjeff#define	FE_INEXACT	0x02000000
39219820Sjeff#define	FE_DIVBYZERO	0x04000000
40219820Sjeff#define	FE_UNDERFLOW	0x08000000
41219820Sjeff#define	FE_OVERFLOW	0x10000000
42219820Sjeff#define	FE_INVALID	0x20000000	/* all types of invalid FP ops */
43219820Sjeff
44219820Sjeff/*
45283675Smarkj * The PowerPC architecture has extra invalid flags that indicate the
46219820Sjeff * specific type of invalid operation occurred.  These flags may be
47219820Sjeff * tested, set, and cleared---but not masked---separately.  All of
48271127Shselasky * these bits are cleared when FE_INVALID is cleared, but only
49271127Shselasky * FE_VXSOFT is set when FE_INVALID is explicitly set in software.
50271127Shselasky */
51282513Shselasky#define	FE_VXCVI	0x00000100	/* invalid integer convert */
52219820Sjeff#define	FE_VXSQRT	0x00000200	/* square root of a negative */
53282513Shselasky#define	FE_VXSOFT	0x00000400	/* software-requested exception */
54219820Sjeff#define	FE_VXVC		0x00080000	/* ordered comparison involving NaN */
55219820Sjeff#define	FE_VXIMZ	0x00100000	/* inf * 0 */
56219820Sjeff#define	FE_VXZDZ	0x00200000	/* 0 / 0 */
57219820Sjeff#define	FE_VXIDI	0x00400000	/* inf / inf */
58219820Sjeff#define	FE_VXISI	0x00800000	/* inf - inf */
59#define	FE_VXSNAN	0x01000000	/* operation on a signalling NaN */
60#define	FE_ALL_INVALID	(FE_VXCVI | FE_VXSQRT | FE_VXSOFT | FE_VXVC | \
61			 FE_VXIMZ | FE_VXZDZ | FE_VXIDI | FE_VXISI | \
62			 FE_VXSNAN | FE_INVALID)
63#define	FE_ALL_EXCEPT	(FE_DIVBYZERO | FE_INEXACT | \
64			 FE_ALL_INVALID | FE_OVERFLOW | FE_UNDERFLOW)
65
66/* Rounding modes */
67#define	FE_TONEAREST	0x0000
68#define	FE_TOWARDZERO	0x0001
69#define	FE_UPWARD	0x0002
70#define	FE_DOWNWARD	0x0003
71#define	_ROUND_MASK	(FE_TONEAREST | FE_DOWNWARD | \
72			 FE_UPWARD | FE_TOWARDZERO)
73
74__BEGIN_DECLS
75
76/* Default floating-point environment */
77extern const fenv_t	__fe_dfl_env;
78#define	FE_DFL_ENV	(&__fe_dfl_env)
79
80/* We need to be able to map status flag positions to mask flag positions */
81#define	_FPUSW_SHIFT	22
82#define	_ENABLE_MASK	((FE_DIVBYZERO | FE_INEXACT | FE_INVALID | \
83			 FE_OVERFLOW | FE_UNDERFLOW) >> _FPUSW_SHIFT)
84
85#define	__mffs(__env)	__asm __volatile("mffs %0" : "=f" (*(__env)))
86#define	__mtfsf(__env)	__asm __volatile("mtfsf 255,%0" : : "f" (__env))
87
88union __fpscr {
89	double __d;
90	struct {
91		__uint32_t __junk;
92		fenv_t __reg;
93	} __bits;
94};
95
96static __inline int
97feclearexcept(int __excepts)
98{
99	union __fpscr __r;
100
101	if (__excepts & FE_INVALID)
102		__excepts |= FE_ALL_INVALID;
103	__mffs(&__r.__d);
104	__r.__bits.__reg &= ~__excepts;
105	__mtfsf(__r.__d);
106	return (0);
107}
108
109static __inline int
110fegetexceptflag(fexcept_t *__flagp, int __excepts)
111{
112	union __fpscr __r;
113
114	__mffs(&__r.__d);
115	*__flagp = __r.__bits.__reg & __excepts;
116	return (0);
117}
118
119static __inline int
120fesetexceptflag(const fexcept_t *__flagp, int __excepts)
121{
122	union __fpscr __r;
123
124	if (__excepts & FE_INVALID)
125		__excepts |= FE_ALL_EXCEPT;
126	__mffs(&__r.__d);
127	__r.__bits.__reg &= ~__excepts;
128	__r.__bits.__reg |= *__flagp & __excepts;
129	__mtfsf(__r.__d);
130	return (0);
131}
132
133static __inline int
134feraiseexcept(int __excepts)
135{
136	union __fpscr __r;
137
138	if (__excepts & FE_INVALID)
139		__excepts |= FE_VXSOFT;
140	__mffs(&__r.__d);
141	__r.__bits.__reg |= __excepts;
142	__mtfsf(__r.__d);
143	return (0);
144}
145
146static __inline int
147fetestexcept(int __excepts)
148{
149	union __fpscr __r;
150
151	__mffs(&__r.__d);
152	return (__r.__bits.__reg & __excepts);
153}
154
155static __inline int
156fegetround(void)
157{
158	union __fpscr __r;
159
160	__mffs(&__r.__d);
161	return (__r.__bits.__reg & _ROUND_MASK);
162}
163
164static __inline int
165fesetround(int __round)
166{
167	union __fpscr __r;
168
169	if (__round & ~_ROUND_MASK)
170		return (-1);
171	__mffs(&__r.__d);
172	__r.__bits.__reg &= ~_ROUND_MASK;
173	__r.__bits.__reg |= __round;
174	__mtfsf(__r.__d);
175	return (0);
176}
177
178static __inline int
179fegetenv(fenv_t *__envp)
180{
181	union __fpscr __r;
182
183	__mffs(&__r.__d);
184	*__envp = __r.__bits.__reg;
185	return (0);
186}
187
188static __inline int
189feholdexcept(fenv_t *__envp)
190{
191	union __fpscr __r;
192
193	__mffs(&__r.__d);
194	*__envp = __r.__d;
195	__r.__bits.__reg &= ~(FE_ALL_EXCEPT | _ENABLE_MASK);
196	__mtfsf(__r.__d);
197	return (0);
198}
199
200static __inline int
201fesetenv(const fenv_t *__envp)
202{
203	union __fpscr __r;
204
205	__r.__bits.__reg = *__envp;
206	__mtfsf(__r.__d);
207	return (0);
208}
209
210static __inline int
211feupdateenv(const fenv_t *__envp)
212{
213	union __fpscr __r;
214
215	__mffs(&__r.__d);
216	__r.__bits.__reg &= FE_ALL_EXCEPT;
217	__r.__bits.__reg |= *__envp;
218	__mtfsf(__r.__d);
219	return (0);
220}
221
222#if __BSD_VISIBLE
223
224static __inline int
225fesetmask(int __mask)
226{
227	union __fpscr __r;
228	fenv_t __oldmask;
229
230	__mffs(&__r.__d);
231	__oldmask = __r.__bits.__reg;
232	__r.__bits.__reg &= ~_ENABLE_MASK;
233	__r.__bits.__reg |= __mask >> _FPUSW_SHIFT;
234	__mtfsf(__r.__d);
235	return ((__oldmask & _ENABLE_MASK) << _FPUSW_SHIFT);
236}
237
238static __inline int
239fegetmask(void)
240{
241	union __fpscr __r;
242
243	__mffs(&__r.__d);
244	return ((__r.__bits.__reg & _ENABLE_MASK) << _FPUSW_SHIFT);
245}
246
247#endif /* __BSD_VISIBLE */
248
249__END_DECLS
250
251#endif	/* !_FENV_H_ */
252