fpu_explode.c revision 1.5
1/*	$OpenBSD: fpu_explode.c,v 1.5 2004/09/28 18:03:36 otto Exp $	*/
2
3/*
4 * Copyright (c) 1992, 1993
5 *	The Regents of the University of California.  All rights reserved.
6 *
7 * This software was developed by the Computer Systems Engineering group
8 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
9 * contributed to Berkeley.
10 *
11 * All advertising materials mentioning features or use of this software
12 * must display the following acknowledgement:
13 *	This product includes software developed by the University of
14 *	California, Lawrence Berkeley Laboratory.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 *    notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 *    notice, this list of conditions and the following disclaimer in the
23 *    documentation and/or other materials provided with the distribution.
24 * 3. All advertising materials mentioning features or use of this software
25 *    must display the following acknowledgement:
26 *	This product includes software developed by the University of
27 *	California, Berkeley and its contributors.
28 * 4. Neither the name of the University nor the names of its contributors
29 *    may be used to endorse or promote products derived from this software
30 *    without specific prior written permission.
31 *
32 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42 * SUCH DAMAGE.
43 *
44 *	@(#)fpu_explode.c	8.1 (Berkeley) 6/11/93
45 *	$NetBSD: fpu_explode.c,v 1.5 2000/08/03 18:32:08 eeh Exp $
46 */
47
48#include <sys/cdefs.h>
49#if 0
50__FBSDID("$FreeBSD: src/lib/libc/sparc64/fpu/fpu_explode.c,v 1.5 2002/05/11 21:20:04 jake Exp $");
51#endif
52
53/*
54 * FPU subroutines: `explode' the machine's `packed binary' format numbers
55 * into our internal format.
56 */
57
58#include <sys/param.h>
59
60#include <machine/frame.h>
61#include <machine/fsr.h>
62#include <machine/ieee.h>
63#include <machine/instr.h>
64
65#include "fpu_arith.h"
66#include "fpu_emu.h"
67#include "fpu_extern.h"
68#include "fpu_reg.h"
69
70/*
71 * N.B.: in all of the following, we assume the FP format is
72 *
73 *	---------------------------
74 *	| s | exponent | fraction |
75 *	---------------------------
76 *
77 * (which represents -1**s * 1.fraction * 2**exponent), so that the
78 * sign bit is way at the top (bit 31), the exponent is next, and
79 * then the remaining bits mark the fraction.  A zero exponent means
80 * zero or denormalized (0.fraction rather than 1.fraction), and the
81 * maximum possible exponent, 2bias+1, signals inf (fraction==0) or NaN.
82 *
83 * Since the sign bit is always the topmost bit---this holds even for
84 * integers---we set that outside all the *tof functions.  Each function
85 * returns the class code for the new number (but note that we use
86 * FPC_QNAN for all NaNs; fpu_explode will fix this if appropriate).
87 */
88
89/*
90 * int -> fpn.
91 */
92int
93__fpu_itof(fp, i)
94	struct fpn *fp;
95	u_int i;
96{
97
98	if (i == 0)
99		return (FPC_ZERO);
100	/*
101	 * The value FP_1 represents 2^FP_LG, so set the exponent
102	 * there and let normalization fix it up.  Convert negative
103	 * numbers to sign-and-magnitude.  Note that this relies on
104	 * fpu_norm()'s handling of `supernormals'; see fpu_subr.c.
105	 */
106	fp->fp_exp = FP_LG;
107	fp->fp_mant[0] = (int)i < 0 ? -i : i;
108	fp->fp_mant[1] = 0;
109	fp->fp_mant[2] = 0;
110	fp->fp_mant[3] = 0;
111	__fpu_norm(fp);
112	return (FPC_NUM);
113}
114
115/*
116 * uint -> fpn.
117 */
118int
119__fpu_uitof(fp, i)
120	struct fpn *fp;
121	u_int i;
122{
123
124	if (i == 0)
125		return (FPC_ZERO);
126	/*
127	 * The value FP_1 represents 2^FP_LG, so set the exponent
128	 * there and let normalization fix it up.
129	 * Note that this relies on fpu_norm()'s handling of
130	 * `supernormals'; see fpu_subr.c.
131	 */
132	fp->fp_exp = FP_LG;
133	fp->fp_mant[0] = i;
134	fp->fp_mant[1] = 0;
135	fp->fp_mant[2] = 0;
136	fp->fp_mant[3] = 0;
137	__fpu_norm(fp);
138	return (FPC_NUM);
139}
140
141/*
142 * 64-bit int -> fpn.
143 */
144int
145__fpu_xtof(fp, i)
146	struct fpn *fp;
147	u_int64_t i;
148{
149
150	if (i == 0)
151		return (FPC_ZERO);
152	/*
153	 * The value FP_1 represents 2^FP_LG, so set the exponent
154	 * there and let normalization fix it up.  Convert negative
155	 * numbers to sign-and-magnitude.  Note that this relies on
156	 * fpu_norm()'s handling of `supernormals'; see fpu_subr.c.
157	 */
158	fp->fp_exp = FP_LG2;
159	i = ((int64_t)i < 0) ? -i : i;
160	fp->fp_mant[0] = (i >> 32) & 0xffffffff;
161	fp->fp_mant[1] = (i >> 0)  & 0xffffffff;
162	fp->fp_mant[2] = 0;
163	fp->fp_mant[3] = 0;
164	__fpu_norm(fp);
165	return (FPC_NUM);
166}
167
168/*
169 * 64-bit uint -> fpn.
170 */
171int
172__fpu_uxtof(fp, i)
173	struct fpn *fp;
174	u_int64_t i;
175{
176
177	if (i == 0)
178		return (FPC_ZERO);
179	/*
180	 * The value FP_1 represents 2^FP_LG, so set the exponent
181	 * there and let normalization fix it up.
182	 * Note that this relies on fpu_norm()'s handling of
183	 * `supernormals'; see fpu_subr.c.
184	 */
185	fp->fp_exp = FP_LG2;
186	fp->fp_mant[0] = (i >> 32) & 0xffffffff;
187	fp->fp_mant[1] = (i >> 0)  & 0xffffffff;
188	fp->fp_mant[2] = 0;
189	fp->fp_mant[3] = 0;
190	__fpu_norm(fp);
191	return (FPC_NUM);
192}
193
194#define	mask(nbits) ((1L << (nbits)) - 1)
195
196/*
197 * All external floating formats convert to internal in the same manner,
198 * as defined here.  Note that only normals get an implied 1.0 inserted.
199 */
200#define	FP_TOF(exp, expbias, allfrac, f0, f1, f2, f3) \
201	if (exp == 0) { \
202		if (allfrac == 0) \
203			return (FPC_ZERO); \
204		fp->fp_exp = 1 - expbias; \
205		fp->fp_mant[0] = f0; \
206		fp->fp_mant[1] = f1; \
207		fp->fp_mant[2] = f2; \
208		fp->fp_mant[3] = f3; \
209		__fpu_norm(fp); \
210		return (FPC_NUM); \
211	} \
212	if (exp == (2 * expbias + 1)) { \
213		if (allfrac == 0) \
214			return (FPC_INF); \
215		fp->fp_mant[0] = f0; \
216		fp->fp_mant[1] = f1; \
217		fp->fp_mant[2] = f2; \
218		fp->fp_mant[3] = f3; \
219		return (FPC_QNAN); \
220	} \
221	fp->fp_exp = exp - expbias; \
222	fp->fp_mant[0] = FP_1 | f0; \
223	fp->fp_mant[1] = f1; \
224	fp->fp_mant[2] = f2; \
225	fp->fp_mant[3] = f3; \
226	return (FPC_NUM)
227
228/*
229 * 32-bit single precision -> fpn.
230 * We assume a single occupies at most (64-FP_LG) bits in the internal
231 * format: i.e., needs at most fp_mant[0] and fp_mant[1].
232 */
233int
234__fpu_stof(fp, i)
235	struct fpn *fp;
236	u_int i;
237{
238	int exp;
239	u_int frac, f0, f1;
240#define SNG_SHIFT (SNG_FRACBITS - FP_LG)
241
242	exp = (i >> (32 - 1 - SNG_EXPBITS)) & mask(SNG_EXPBITS);
243	frac = i & mask(SNG_FRACBITS);
244	f0 = frac >> SNG_SHIFT;
245	f1 = frac << (32 - SNG_SHIFT);
246	FP_TOF(exp, SNG_EXP_BIAS, frac, f0, f1, 0, 0);
247}
248
249/*
250 * 64-bit double -> fpn.
251 * We assume this uses at most (96-FP_LG) bits.
252 */
253int
254__fpu_dtof(fp, i, j)
255	struct fpn *fp;
256	u_int i, j;
257{
258	int exp;
259	u_int frac, f0, f1, f2;
260#define DBL_SHIFT (DBL_FRACBITS - 32 - FP_LG)
261
262	exp = (i >> (32 - 1 - DBL_EXPBITS)) & mask(DBL_EXPBITS);
263	frac = i & mask(DBL_FRACBITS - 32);
264	f0 = frac >> DBL_SHIFT;
265	f1 = (frac << (32 - DBL_SHIFT)) | (j >> DBL_SHIFT);
266	f2 = j << (32 - DBL_SHIFT);
267	frac |= j;
268	FP_TOF(exp, DBL_EXP_BIAS, frac, f0, f1, f2, 0);
269}
270
271/*
272 * 128-bit extended -> fpn.
273 */
274int
275__fpu_qtof(fp, i, j, k, l)
276	struct fpn *fp;
277	u_int i, j, k, l;
278{
279	int exp;
280	u_int frac, f0, f1, f2, f3;
281#define EXT_SHIFT (-(EXT_FRACBITS - 3 * 32 - FP_LG))	/* left shift! */
282
283	/*
284	 * Note that ext and fpn `line up', hence no shifting needed.
285	 */
286	exp = (i >> (32 - 1 - EXT_EXPBITS)) & mask(EXT_EXPBITS);
287	frac = i & mask(EXT_FRACBITS - 3 * 32);
288	f0 = (frac << EXT_SHIFT) | (j >> (32 - EXT_SHIFT));
289	f1 = (j << EXT_SHIFT) | (k >> (32 - EXT_SHIFT));
290	f2 = (k << EXT_SHIFT) | (l >> (32 - EXT_SHIFT));
291	f3 = l << EXT_SHIFT;
292	frac |= j | k | l;
293	FP_TOF(exp, EXT_EXP_BIAS, frac, f0, f1, f2, f3);
294}
295
296/*
297 * Explode the contents of a / regpair / regquad.
298 * If the input is a signalling NaN, an NV (invalid) exception
299 * will be set.  (Note that nothing but NV can occur until ALU
300 * operations are performed.)
301 */
302void
303__fpu_explode(fe, fp, type, reg)
304	struct fpemu *fe;
305	struct fpn *fp;
306	int type, reg;
307{
308	u_int32_t s = 0/* XXX gcc */, *sp;
309	u_int64_t l[2];
310
311	if (type == FTYPE_LNG || type == FTYPE_DBL || type == FTYPE_EXT) {
312		l[0] = __fpu_getreg64(reg & ~1);
313		sp = (u_int32_t *)l;
314		fp->fp_sign = sp[0] >> 31;
315		fp->fp_sticky = 0;
316		switch (type) {
317			case FTYPE_LNG:
318				s = __fpu_xtof(fp, l[0]);
319				break;
320			case FTYPE_DBL:
321				s = __fpu_dtof(fp, sp[0], sp[1]);
322				break;
323			case FTYPE_EXT:
324				l[1] = __fpu_getreg64((reg & ~1) + 2);
325				s = __fpu_qtof(fp, sp[0], sp[1], sp[2], sp[3]);
326				break;
327			default:
328#ifdef DIAGNOSTIC
329				__utrap_panic("fpu_explode");
330#endif
331		}
332	} else {
333#ifdef DIAGNOSTIC
334		if (type != FTYPE_SNG)
335			__utrap_panic("fpu_explode");
336#endif
337		s = __fpu_getreg32(reg);
338		fp->fp_sign = s >> 31;
339		fp->fp_sticky = 0;
340		s = __fpu_stof(fp, s);
341	}
342
343	if (s == FPC_QNAN && (fp->fp_mant[0] & FP_QUIETBIT) == 0) {
344		/*
345		 * Input is a signalling NaN.  All operations that return
346		 * an input NaN operand put it through a ``NaN conversion'',
347		 * which basically just means ``turn on the quiet bit''.
348		 * We do this here so that all NaNs internally look quiet
349		 * (we can tell signalling ones by their class).
350		 */
351		fp->fp_mant[0] |= FP_QUIETBIT;
352		fe->fe_cx = FSR_NV;	/* assert invalid operand */
353		s = FPC_SNAN;
354	}
355	fp->fp_class = s;
356	DPRINTF(FPE_REG, ("fpu_explode: %%%c%d => ", (type == FTYPE_LNG) ? 'x' :
357		((type == FTYPE_INT) ? 'i' :
358			((type == FTYPE_SNG) ? 's' :
359				((type == FTYPE_DBL) ? 'd' :
360					((type == FTYPE_EXT) ? 'q' : '?')))),
361		reg));
362	DUMPFPN(FPE_REG, fp);
363	DPRINTF(FPE_REG, ("\n"));
364}
365