1/*	$NetBSD: fpu_implode.c,v 1.12 2003/08/07 16:29:37 agc Exp $ */
2
3/*
4 * Copyright (c) 1992, 1993
5 *	The Regents of the University of California.  All rights reserved.
6 *
7 * This software was developed by the Computer Systems Engineering group
8 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
9 * contributed to Berkeley.
10 *
11 * All advertising materials mentioning features or use of this software
12 * must display the following acknowledgement:
13 *	This product includes software developed by the University of
14 *	California, Lawrence Berkeley Laboratory.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 *    notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 *    notice, this list of conditions and the following disclaimer in the
23 *    documentation and/or other materials provided with the distribution.
24 * 3. Neither the name of the University nor the names of its contributors
25 *    may be used to endorse or promote products derived from this software
26 *    without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 *
40 *	@(#)fpu_implode.c	8.1 (Berkeley) 6/11/93
41 */
42
43/*
44 * FPU subroutines: `implode' internal format numbers into the machine's
45 * `packed binary' format.
46 */
47
48#include <sys/cdefs.h>
49__KERNEL_RCSID(0, "$NetBSD: fpu_implode.c,v 1.12 2003/08/07 16:29:37 agc Exp $");
50
51#if defined(_KERNEL_OPT)
52#include "opt_sparc_arch.h"
53#endif
54
55#include <sys/types.h>
56#include <sys/systm.h>
57
58#include <machine/ieee.h>
59#include <machine/instr.h>
60#include <machine/reg.h>
61
62#include <sparc/fpu/fpu_arith.h>
63#include <sparc/fpu/fpu_emu.h>
64#include <sparc/fpu/fpu_extern.h>
65
66static int round(struct fpemu *, struct fpn *);
67static int toinf(struct fpemu *, int);
68
69/*
70 * Round a number (algorithm from Motorola MC68882 manual, modified for
71 * our internal format).  Set inexact exception if rounding is required.
72 * Return true iff we rounded up.
73 *
74 * After rounding, we discard the guard and round bits by shifting right
75 * 2 bits (a la fpu_shr(), but we do not bother with fp->fp_sticky).
76 * This saves effort later.
77 *
78 * Note that we may leave the value 2.0 in fp->fp_mant; it is the caller's
79 * responsibility to fix this if necessary.
80 */
81static int
82round(struct fpemu *fe, struct fpn *fp)
83{
84	register u_int m0, m1, m2, m3;
85	register int gr, s;
86
87	m0 = fp->fp_mant[0];
88	m1 = fp->fp_mant[1];
89	m2 = fp->fp_mant[2];
90	m3 = fp->fp_mant[3];
91	gr = m3 & 3;
92	s = fp->fp_sticky;
93
94	/* mant >>= FP_NG */
95	m3 = (m3 >> FP_NG) | (m2 << (32 - FP_NG));
96	m2 = (m2 >> FP_NG) | (m1 << (32 - FP_NG));
97	m1 = (m1 >> FP_NG) | (m0 << (32 - FP_NG));
98	m0 >>= FP_NG;
99
100	if ((gr | s) == 0)	/* result is exact: no rounding needed */
101		goto rounddown;
102
103	fe->fe_cx |= FSR_NX;	/* inexact */
104
105	/* Go to rounddown to round down; break to round up. */
106	switch ((fe->fe_fsr >> FSR_RD_SHIFT) & FSR_RD_MASK) {
107
108	case FSR_RD_RN:
109	default:
110		/*
111		 * Round only if guard is set (gr & 2).  If guard is set,
112		 * but round & sticky both clear, then we want to round
113		 * but have a tie, so round to even, i.e., add 1 iff odd.
114		 */
115		if ((gr & 2) == 0)
116			goto rounddown;
117		if ((gr & 1) || fp->fp_sticky || (m3 & 1))
118			break;
119		goto rounddown;
120
121	case FSR_RD_RZ:
122		/* Round towards zero, i.e., down. */
123		goto rounddown;
124
125	case FSR_RD_RM:
126		/* Round towards -Inf: up if negative, down if positive. */
127		if (fp->fp_sign)
128			break;
129		goto rounddown;
130
131	case FSR_RD_RP:
132		/* Round towards +Inf: up if positive, down otherwise. */
133		if (!fp->fp_sign)
134			break;
135		goto rounddown;
136	}
137
138	/* Bump low bit of mantissa, with carry. */
139	FPU_ADDS(m3, m3, 1);
140	FPU_ADDCS(m2, m2, 0);
141	FPU_ADDCS(m1, m1, 0);
142	FPU_ADDC(m0, m0, 0);
143	fp->fp_mant[0] = m0;
144	fp->fp_mant[1] = m1;
145	fp->fp_mant[2] = m2;
146	fp->fp_mant[3] = m3;
147	return (1);
148
149rounddown:
150	fp->fp_mant[0] = m0;
151	fp->fp_mant[1] = m1;
152	fp->fp_mant[2] = m2;
153	fp->fp_mant[3] = m3;
154	return (0);
155}
156
157/*
158 * For overflow: return true if overflow is to go to +/-Inf, according
159 * to the sign of the overflowing result.  If false, overflow is to go
160 * to the largest magnitude value instead.
161 */
162static int
163toinf(struct fpemu *fe, int sign)
164{
165	int inf;
166
167	/* look at rounding direction */
168	switch ((fe->fe_fsr >> FSR_RD_SHIFT) & FSR_RD_MASK) {
169
170	default:
171	case FSR_RD_RN:		/* the nearest value is always Inf */
172		inf = 1;
173		break;
174
175	case FSR_RD_RZ:		/* toward 0 => never towards Inf */
176		inf = 0;
177		break;
178
179	case FSR_RD_RP:		/* toward +Inf iff positive */
180		inf = sign == 0;
181		break;
182
183	case FSR_RD_RM:		/* toward -Inf iff negative */
184		inf = sign;
185		break;
186	}
187	return (inf);
188}
189
190/*
191 * fpn -> int (int value returned as return value).
192 *
193 * N.B.: this conversion always rounds towards zero (this is a peculiarity
194 * of the SPARC instruction set).
195 */
196u_int
197fpu_ftoi(struct fpemu *fe, struct fpn *fp)
198{
199	register u_int i;
200	register int sign, exp;
201
202	sign = fp->fp_sign;
203	switch (fp->fp_class) {
204
205	case FPC_ZERO:
206		return (0);
207
208	case FPC_NUM:
209		/*
210		 * If exp >= 2^32, overflow.  Otherwise shift value right
211		 * into last mantissa word (this will not exceed 0xffffffff),
212		 * shifting any guard and round bits out into the sticky
213		 * bit.  Then ``round'' towards zero, i.e., just set an
214		 * inexact exception if sticky is set (see round()).
215		 * If the result is > 0x80000000, or is positive and equals
216		 * 0x80000000, overflow; otherwise the last fraction word
217		 * is the result.
218		 */
219		if ((exp = fp->fp_exp) >= 32)
220			break;
221		/* NB: the following includes exp < 0 cases */
222		if (fpu_shr(fp, FP_NMANT - 1 - exp) != 0)
223			fe->fe_cx |= FSR_NX;
224		i = fp->fp_mant[3];
225		if (i >= ((u_int)0x80000000 + sign))
226			break;
227		return (sign ? -i : i);
228
229	default:		/* Inf, qNaN, sNaN */
230		break;
231	}
232	/* overflow: replace any inexact exception with invalid */
233	fe->fe_cx = (fe->fe_cx & ~FSR_NX) | FSR_NV;
234	return (0x7fffffff + sign);
235}
236
237#ifdef SUN4U
238/*
239 * fpn -> extended int (high bits of int value returned as return value).
240 *
241 * N.B.: this conversion always rounds towards zero (this is a peculiarity
242 * of the SPARC instruction set).
243 */
244u_int
245fpu_ftox(struct fpemu *fe, struct fpn *fp, u_int *res)
246{
247	register uint64_t i;
248	register int sign, exp;
249
250	sign = fp->fp_sign;
251	switch (fp->fp_class) {
252
253	case FPC_ZERO:
254		res[1] = 0;
255		return (0);
256
257	case FPC_NUM:
258		/*
259		 * If exp >= 2^64, overflow.  Otherwise shift value right
260		 * into last mantissa word (this will not exceed 0xffffffffffffffff),
261		 * shifting any guard and round bits out into the sticky
262		 * bit.  Then ``round'' towards zero, i.e., just set an
263		 * inexact exception if sticky is set (see round()).
264		 * If the result is > 0x8000000000000000, or is positive and equals
265		 * 0x8000000000000000, overflow; otherwise the last fraction word
266		 * is the result.
267		 */
268		if ((exp = fp->fp_exp) >= 64)
269			break;
270		/* NB: the following includes exp < 0 cases */
271		if (fpu_shr(fp, FP_NMANT - 1 - exp) != 0)
272			fe->fe_cx |= FSR_NX;
273		i = ((uint64_t)fp->fp_mant[2]<<32)|fp->fp_mant[3];
274		if (i >= ((uint64_t)0x8000000000000000LL + sign))
275			break;
276		if (sign) i = -i;
277		res[1] = (int)i;
278		return (i>>32);
279
280	default:		/* Inf, qNaN, sNaN */
281		break;
282	}
283	/* overflow: replace any inexact exception with invalid */
284	fe->fe_cx = (fe->fe_cx & ~FSR_NX) | FSR_NV;
285	return (0x7fffffffffffffffLL + sign);
286}
287#endif /* SUN4U */
288
289/*
290 * fpn -> single (32 bit single returned as return value).
291 * We assume <= 29 bits in a single-precision fraction (1.f part).
292 */
293u_int
294fpu_ftos(struct fpemu *fe, struct fpn *fp)
295{
296	register u_int sign = fp->fp_sign << 31;
297	register int exp;
298
299#define	SNG_EXP(e)	((e) << SNG_FRACBITS)	/* makes e an exponent */
300#define	SNG_MASK	(SNG_EXP(1) - 1)	/* mask for fraction */
301
302	/* Take care of non-numbers first. */
303	if (ISNAN(fp)) {
304		/*
305		 * Preserve upper bits of NaN, per SPARC V8 appendix N.
306		 * Note that fp->fp_mant[0] has the quiet bit set,
307		 * even if it is classified as a signalling NaN.
308		 */
309		(void) fpu_shr(fp, FP_NMANT - 1 - SNG_FRACBITS);
310		exp = SNG_EXP_INFNAN;
311		goto done;
312	}
313	if (ISINF(fp))
314		return (sign | SNG_EXP(SNG_EXP_INFNAN));
315	if (ISZERO(fp))
316		return (sign);
317
318	/*
319	 * Normals (including subnormals).  Drop all the fraction bits
320	 * (including the explicit ``implied'' 1 bit) down into the
321	 * single-precision range.  If the number is subnormal, move
322	 * the ``implied'' 1 into the explicit range as well, and shift
323	 * right to introduce leading zeroes.  Rounding then acts
324	 * differently for normals and subnormals: the largest subnormal
325	 * may round to the smallest normal (1.0 x 2^minexp), or may
326	 * remain subnormal.  In the latter case, signal an underflow
327	 * if the result was inexact or if underflow traps are enabled.
328	 *
329	 * Rounding a normal, on the other hand, always produces another
330	 * normal (although either way the result might be too big for
331	 * single precision, and cause an overflow).  If rounding a
332	 * normal produces 2.0 in the fraction, we need not adjust that
333	 * fraction at all, since both 1.0 and 2.0 are zero under the
334	 * fraction mask.
335	 *
336	 * Note that the guard and round bits vanish from the number after
337	 * rounding.
338	 */
339	if ((exp = fp->fp_exp + SNG_EXP_BIAS) <= 0) {	/* subnormal */
340		/* -NG for g,r; -SNG_FRACBITS-exp for fraction */
341		(void) fpu_shr(fp, FP_NMANT - FP_NG - SNG_FRACBITS - exp);
342		if (round(fe, fp) && fp->fp_mant[3] == SNG_EXP(1))
343			return (sign | SNG_EXP(1) | 0);
344		if ((fe->fe_cx & FSR_NX) ||
345		    (fe->fe_fsr & (FSR_UF << FSR_TEM_SHIFT)))
346			fe->fe_cx |= FSR_UF;
347		return (sign | SNG_EXP(0) | fp->fp_mant[3]);
348	}
349	/* -FP_NG for g,r; -1 for implied 1; -SNG_FRACBITS for fraction */
350	(void) fpu_shr(fp, FP_NMANT - FP_NG - 1 - SNG_FRACBITS);
351#ifdef DIAGNOSTIC
352	if ((fp->fp_mant[3] & SNG_EXP(1 << FP_NG)) == 0)
353		panic("fpu_ftos");
354#endif
355	if (round(fe, fp) && fp->fp_mant[3] == SNG_EXP(2))
356		exp++;
357	if (exp >= SNG_EXP_INFNAN) {
358		/* overflow to inf or to max single */
359		fe->fe_cx |= FSR_OF | FSR_NX;
360		if (toinf(fe, sign))
361			return (sign | SNG_EXP(SNG_EXP_INFNAN));
362		return (sign | SNG_EXP(SNG_EXP_INFNAN - 1) | SNG_MASK);
363	}
364done:
365	/* phew, made it */
366	return (sign | SNG_EXP(exp) | (fp->fp_mant[3] & SNG_MASK));
367}
368
369/*
370 * fpn -> double (32 bit high-order result returned; 32-bit low order result
371 * left in res[1]).  Assumes <= 61 bits in double precision fraction.
372 *
373 * This code mimics fpu_ftos; see it for comments.
374 */
375u_int
376fpu_ftod(struct fpemu *fe, struct fpn *fp, u_int *res)
377{
378	register u_int sign = fp->fp_sign << 31;
379	register int exp;
380
381#define	DBL_EXP(e)	((e) << (DBL_FRACBITS & 31))
382#define	DBL_MASK	(DBL_EXP(1) - 1)
383
384	if (ISNAN(fp)) {
385		(void) fpu_shr(fp, FP_NMANT - 1 - DBL_FRACBITS);
386		exp = DBL_EXP_INFNAN;
387		goto done;
388	}
389	if (ISINF(fp)) {
390		sign |= DBL_EXP(DBL_EXP_INFNAN);
391		goto zero;
392	}
393	if (ISZERO(fp)) {
394zero:		res[1] = 0;
395		return (sign);
396	}
397
398	if ((exp = fp->fp_exp + DBL_EXP_BIAS) <= 0) {
399		(void) fpu_shr(fp, FP_NMANT - FP_NG - DBL_FRACBITS - exp);
400		if (round(fe, fp) && fp->fp_mant[2] == DBL_EXP(1)) {
401			res[1] = 0;
402			return (sign | DBL_EXP(1) | 0);
403		}
404		if ((fe->fe_cx & FSR_NX) ||
405		    (fe->fe_fsr & (FSR_UF << FSR_TEM_SHIFT)))
406			fe->fe_cx |= FSR_UF;
407		exp = 0;
408		goto done;
409	}
410	(void) fpu_shr(fp, FP_NMANT - FP_NG - 1 - DBL_FRACBITS);
411	if (round(fe, fp) && fp->fp_mant[2] == DBL_EXP(2))
412		exp++;
413	if (exp >= DBL_EXP_INFNAN) {
414		fe->fe_cx |= FSR_OF | FSR_NX;
415		if (toinf(fe, sign)) {
416			res[1] = 0;
417			return (sign | DBL_EXP(DBL_EXP_INFNAN) | 0);
418		}
419		res[1] = ~0;
420		return (sign | DBL_EXP(DBL_EXP_INFNAN) | DBL_MASK);
421	}
422done:
423	res[1] = fp->fp_mant[3];
424	return (sign | DBL_EXP(exp) | (fp->fp_mant[2] & DBL_MASK));
425}
426
427/*
428 * fpn -> extended (32 bit high-order result returned; low-order fraction
429 * words left in res[1]..res[3]).  Like ftod, which is like ftos ... but
430 * our internal format *is* extended precision, plus 2 bits for guard/round,
431 * so we can avoid a small bit of work.
432 */
433u_int
434fpu_ftoq(struct fpemu *fe, struct fpn *fp, u_int *res)
435{
436	register u_int sign = fp->fp_sign << 31;
437	register int exp;
438
439#define	EXT_EXP(e)	((e) << (EXT_FRACBITS & 31))
440#define	EXT_MASK	(EXT_EXP(1) - 1)
441
442	if (ISNAN(fp)) {
443		(void) fpu_shr(fp, 2);	/* since we are not rounding */
444		exp = EXT_EXP_INFNAN;
445		goto done;
446	}
447	if (ISINF(fp)) {
448		sign |= EXT_EXP(EXT_EXP_INFNAN);
449		goto zero;
450	}
451	if (ISZERO(fp)) {
452zero:		res[1] = res[2] = res[3] = 0;
453		return (sign);
454	}
455
456	if ((exp = fp->fp_exp + EXT_EXP_BIAS) <= 0) {
457		(void) fpu_shr(fp, FP_NMANT - FP_NG - EXT_FRACBITS - exp);
458		if (round(fe, fp) && fp->fp_mant[0] == EXT_EXP(1)) {
459			res[1] = res[2] = res[3] = 0;
460			return (sign | EXT_EXP(1) | 0);
461		}
462		if ((fe->fe_cx & FSR_NX) ||
463		    (fe->fe_fsr & (FSR_UF << FSR_TEM_SHIFT)))
464			fe->fe_cx |= FSR_UF;
465		exp = 0;
466		goto done;
467	}
468	/* Since internal == extended, no need to shift here. */
469	if (round(fe, fp) && fp->fp_mant[0] == EXT_EXP(2))
470		exp++;
471	if (exp >= EXT_EXP_INFNAN) {
472		fe->fe_cx |= FSR_OF | FSR_NX;
473		if (toinf(fe, sign)) {
474			res[1] = res[2] = res[3] = 0;
475			return (sign | EXT_EXP(EXT_EXP_INFNAN) | 0);
476		}
477		res[1] = res[2] = res[3] = ~0;
478		return (sign | EXT_EXP(EXT_EXP_INFNAN) | EXT_MASK);
479	}
480done:
481	res[1] = fp->fp_mant[1];
482	res[2] = fp->fp_mant[2];
483	res[3] = fp->fp_mant[3];
484	return (sign | EXT_EXP(exp) | (fp->fp_mant[0] & EXT_MASK));
485}
486
487/*
488 * Implode an fpn, writing the result into the given space.
489 */
490void
491fpu_implode(struct fpemu *fe, struct fpn *fp, int type, u_int *space)
492{
493
494	DPRINTF(FPE_REG, ("\n imploding: "));
495	DUMPFPN(FPE_REG, fp);
496	DPRINTF(FPE_REG, ("\n"));
497
498	switch (type) {
499
500#ifdef SUN4U
501	case FTYPE_LNG:
502		space[0] = fpu_ftox(fe, fp, space);
503		break;
504#endif /* SUN4U */
505
506	case FTYPE_INT:
507		space[0] = fpu_ftoi(fe, fp);
508		break;
509
510	case FTYPE_SNG:
511		space[0] = fpu_ftos(fe, fp);
512		break;
513
514	case FTYPE_DBL:
515		space[0] = fpu_ftod(fe, fp, space);
516		break;
517
518	case FTYPE_EXT:
519		/* funky rounding precision options ?? */
520		space[0] = fpu_ftoq(fe, fp, space);
521		break;
522
523	default:
524		panic("fpu_implode");
525	}
526#ifdef SUN4U
527	DPRINTF(FPE_REG, ("fpu_implode: %x %x %x %x\n",
528		space[0], space[1], space[2], space[3]));
529#else
530	DPRINTF(FPE_REG, ("fpu_implode: %x %x\n",
531		space[0], space[1]));
532#endif
533}
534