fpu_explode.c revision 91174
1/* 2 * Copyright (c) 1992, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This software was developed by the Computer Systems Engineering group 6 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 7 * contributed to Berkeley. 8 * 9 * All advertising materials mentioning features or use of this software 10 * must display the following acknowledgement: 11 * This product includes software developed by the University of 12 * California, Lawrence Berkeley Laboratory. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in the 21 * documentation and/or other materials provided with the distribution. 22 * 3. All advertising materials mentioning features or use of this software 23 * must display the following acknowledgement: 24 * This product includes software developed by the University of 25 * California, Berkeley and its contributors. 26 * 4. Neither the name of the University nor the names of its contributors 27 * may be used to endorse or promote products derived from this software 28 * without specific prior written permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40 * SUCH DAMAGE. 41 * 42 * @(#)fpu_explode.c 8.1 (Berkeley) 6/11/93 43 * from: NetBSD: fpu_explode.c,v 1.5 2000/08/03 18:32:08 eeh Exp 44 * 45 * $FreeBSD: head/lib/libc/sparc64/fpu/fpu_explode.c 91174 2002-02-23 21:37:18Z tmm $ 46 */ 47 48/* 49 * FPU subroutines: `explode' the machine's `packed binary' format numbers 50 * into our internal format. 51 */ 52 53#include <sys/param.h> 54 55#include <machine/frame.h> 56#include <machine/fp.h> 57#include <machine/fsr.h> 58#include <machine/ieee.h> 59#include <machine/instr.h> 60 61#include "fpu_arith.h" 62#include "fpu_emu.h" 63#include "fpu_extern.h" 64 65/* 66 * N.B.: in all of the following, we assume the FP format is 67 * 68 * --------------------------- 69 * | s | exponent | fraction | 70 * --------------------------- 71 * 72 * (which represents -1**s * 1.fraction * 2**exponent), so that the 73 * sign bit is way at the top (bit 31), the exponent is next, and 74 * then the remaining bits mark the fraction. A zero exponent means 75 * zero or denormalized (0.fraction rather than 1.fraction), and the 76 * maximum possible exponent, 2bias+1, signals inf (fraction==0) or NaN. 77 * 78 * Since the sign bit is always the topmost bit---this holds even for 79 * integers---we set that outside all the *tof functions. Each function 80 * returns the class code for the new number (but note that we use 81 * FPC_QNAN for all NaNs; fpu_explode will fix this if appropriate). 82 */ 83 84/* 85 * int -> fpn. 86 */ 87int 88__fpu_itof(fp, i) 89 register struct fpn *fp; 90 register u_int i; 91{ 92 93 if (i == 0) 94 return (FPC_ZERO); 95 /* 96 * The value FP_1 represents 2^FP_LG, so set the exponent 97 * there and let normalization fix it up. Convert negative 98 * numbers to sign-and-magnitude. Note that this relies on 99 * fpu_norm()'s handling of `supernormals'; see fpu_subr.c. 100 */ 101 fp->fp_exp = FP_LG; 102 fp->fp_mant[0] = (int)i < 0 ? -i : i; 103 fp->fp_mant[1] = 0; 104 fp->fp_mant[2] = 0; 105 fp->fp_mant[3] = 0; 106 __fpu_norm(fp); 107 return (FPC_NUM); 108} 109 110/* 111 * 64-bit int -> fpn. 112 */ 113int 114__fpu_xtof(fp, i) 115 register struct fpn *fp; 116 register u_int64_t i; 117{ 118 119 if (i == 0) 120 return (FPC_ZERO); 121 /* 122 * The value FP_1 represents 2^FP_LG, so set the exponent 123 * there and let normalization fix it up. Convert negative 124 * numbers to sign-and-magnitude. Note that this relies on 125 * fpu_norm()'s handling of `supernormals'; see fpu_subr.c. 126 */ 127 fp->fp_exp = FP_LG2; 128 *((int64_t*)fp->fp_mant) = (int64_t)i < 0 ? -i : i; 129 fp->fp_mant[2] = 0; 130 fp->fp_mant[3] = 0; 131 __fpu_norm(fp); 132 return (FPC_NUM); 133} 134 135#define mask(nbits) ((1L << (nbits)) - 1) 136 137/* 138 * All external floating formats convert to internal in the same manner, 139 * as defined here. Note that only normals get an implied 1.0 inserted. 140 */ 141#define FP_TOF(exp, expbias, allfrac, f0, f1, f2, f3) \ 142 if (exp == 0) { \ 143 if (allfrac == 0) \ 144 return (FPC_ZERO); \ 145 fp->fp_exp = 1 - expbias; \ 146 fp->fp_mant[0] = f0; \ 147 fp->fp_mant[1] = f1; \ 148 fp->fp_mant[2] = f2; \ 149 fp->fp_mant[3] = f3; \ 150 __fpu_norm(fp); \ 151 return (FPC_NUM); \ 152 } \ 153 if (exp == (2 * expbias + 1)) { \ 154 if (allfrac == 0) \ 155 return (FPC_INF); \ 156 fp->fp_mant[0] = f0; \ 157 fp->fp_mant[1] = f1; \ 158 fp->fp_mant[2] = f2; \ 159 fp->fp_mant[3] = f3; \ 160 return (FPC_QNAN); \ 161 } \ 162 fp->fp_exp = exp - expbias; \ 163 fp->fp_mant[0] = FP_1 | f0; \ 164 fp->fp_mant[1] = f1; \ 165 fp->fp_mant[2] = f2; \ 166 fp->fp_mant[3] = f3; \ 167 return (FPC_NUM) 168 169/* 170 * 32-bit single precision -> fpn. 171 * We assume a single occupies at most (64-FP_LG) bits in the internal 172 * format: i.e., needs at most fp_mant[0] and fp_mant[1]. 173 */ 174int 175__fpu_stof(fp, i) 176 register struct fpn *fp; 177 register u_int i; 178{ 179 register int exp; 180 register u_int frac, f0, f1; 181#define SNG_SHIFT (SNG_FRACBITS - FP_LG) 182 183 exp = (i >> (32 - 1 - SNG_EXPBITS)) & mask(SNG_EXPBITS); 184 frac = i & mask(SNG_FRACBITS); 185 f0 = frac >> SNG_SHIFT; 186 f1 = frac << (32 - SNG_SHIFT); 187 FP_TOF(exp, SNG_EXP_BIAS, frac, f0, f1, 0, 0); 188} 189 190/* 191 * 64-bit double -> fpn. 192 * We assume this uses at most (96-FP_LG) bits. 193 */ 194int 195__fpu_dtof(fp, i, j) 196 register struct fpn *fp; 197 register u_int i, j; 198{ 199 register int exp; 200 register u_int frac, f0, f1, f2; 201#define DBL_SHIFT (DBL_FRACBITS - 32 - FP_LG) 202 203 exp = (i >> (32 - 1 - DBL_EXPBITS)) & mask(DBL_EXPBITS); 204 frac = i & mask(DBL_FRACBITS - 32); 205 f0 = frac >> DBL_SHIFT; 206 f1 = (frac << (32 - DBL_SHIFT)) | (j >> DBL_SHIFT); 207 f2 = j << (32 - DBL_SHIFT); 208 frac |= j; 209 FP_TOF(exp, DBL_EXP_BIAS, frac, f0, f1, f2, 0); 210} 211 212/* 213 * 128-bit extended -> fpn. 214 */ 215int 216__fpu_qtof(fp, i, j, k, l) 217 register struct fpn *fp; 218 register u_int i, j, k, l; 219{ 220 register int exp; 221 register u_int frac, f0, f1, f2, f3; 222#define EXT_SHIFT (-(EXT_FRACBITS - 3 * 32 - FP_LG)) /* left shift! */ 223 224 /* 225 * Note that ext and fpn `line up', hence no shifting needed. 226 */ 227 exp = (i >> (32 - 1 - EXT_EXPBITS)) & mask(EXT_EXPBITS); 228 frac = i & mask(EXT_FRACBITS - 3 * 32); 229 f0 = (frac << EXT_SHIFT) | (j >> (32 - EXT_SHIFT)); 230 f1 = (j << EXT_SHIFT) | (k >> (32 - EXT_SHIFT)); 231 f2 = (k << EXT_SHIFT) | (l >> (32 - EXT_SHIFT)); 232 f3 = l << EXT_SHIFT; 233 frac |= j | k | l; 234 FP_TOF(exp, EXT_EXP_BIAS, frac, f0, f1, f2, f3); 235} 236 237/* 238 * Explode the contents of a register / regpair / regquad. 239 * If the input is a signalling NaN, an NV (invalid) exception 240 * will be set. (Note that nothing but NV can occur until ALU 241 * operations are performed.) 242 */ 243void 244__fpu_explode(fe, fp, type, reg) 245 struct fpemu *fe; 246 struct fpn *fp; 247 int type, reg; 248{ 249 u_int s; 250 u_int64_t l; 251 252 l = __fpu_getreg64(reg & ~1); 253 s = __fpu_getreg(reg); 254 fp->fp_sign = s >> 31; 255 fp->fp_sticky = 0; 256 switch (type) { 257 case FTYPE_LNG: 258 s = __fpu_xtof(fp, l); 259 break; 260 261 case FTYPE_INT: 262 s = __fpu_itof(fp, s); 263 break; 264 265 case FTYPE_SNG: 266 s = __fpu_stof(fp, s); 267 break; 268 269 case FTYPE_DBL: 270 s = __fpu_dtof(fp, s, __fpu_getreg(reg + 1)); 271 break; 272 273 case FTYPE_EXT: 274 s = __fpu_qtof(fp, s, __fpu_getreg(reg + 1), 275 __fpu_getreg(reg + 2), 276 __fpu_getreg(reg + 3)); 277 break; 278 279 default: 280 __fpu_panic("fpu_explode"); 281 } 282 283 if (s == FPC_QNAN && (fp->fp_mant[0] & FP_QUIETBIT) == 0) { 284 /* 285 * Input is a signalling NaN. All operations that return 286 * an input NaN operand put it through a ``NaN conversion'', 287 * which basically just means ``turn on the quiet bit''. 288 * We do this here so that all NaNs internally look quiet 289 * (we can tell signalling ones by their class). 290 */ 291 fp->fp_mant[0] |= FP_QUIETBIT; 292 fe->fe_cx = FSR_NV; /* assert invalid operand */ 293 s = FPC_SNAN; 294 } 295 fp->fp_class = s; 296 DPRINTF(FPE_REG, ("fpu_explode: %%%c%d => ", (type == FTYPE_LNG) ? 'x' : 297 ((type == FTYPE_INT) ? 'i' : 298 ((type == FTYPE_SNG) ? 's' : 299 ((type == FTYPE_DBL) ? 'd' : 300 ((type == FTYPE_EXT) ? 'q' : '?')))), 301 reg)); 302 DUMPFPN(FPE_REG, fp); 303 DPRINTF(FPE_REG, ("\n")); 304} 305