Deleted Added
full compact
fpu_explode.c (95587) fpu_explode.c (96422)
1/*
2 * Copyright (c) 1992, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This software was developed by the Computer Systems Engineering group
6 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
7 * contributed to Berkeley.
8 *
9 * All advertising materials mentioning features or use of this software
10 * must display the following acknowledgement:
11 * This product includes software developed by the University of
12 * California, Lawrence Berkeley Laboratory.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
22 * 3. All advertising materials mentioning features or use of this software
23 * must display the following acknowledgement:
24 * This product includes software developed by the University of
25 * California, Berkeley and its contributors.
26 * 4. Neither the name of the University nor the names of its contributors
27 * may be used to endorse or promote products derived from this software
28 * without specific prior written permission.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 * SUCH DAMAGE.
41 *
42 * @(#)fpu_explode.c 8.1 (Berkeley) 6/11/93
43 * $NetBSD: fpu_explode.c,v 1.5 2000/08/03 18:32:08 eeh Exp $
44 */
45
46#include <sys/cdefs.h>
1/*
2 * Copyright (c) 1992, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This software was developed by the Computer Systems Engineering group
6 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
7 * contributed to Berkeley.
8 *
9 * All advertising materials mentioning features or use of this software
10 * must display the following acknowledgement:
11 * This product includes software developed by the University of
12 * California, Lawrence Berkeley Laboratory.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
22 * 3. All advertising materials mentioning features or use of this software
23 * must display the following acknowledgement:
24 * This product includes software developed by the University of
25 * California, Berkeley and its contributors.
26 * 4. Neither the name of the University nor the names of its contributors
27 * may be used to endorse or promote products derived from this software
28 * without specific prior written permission.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 * SUCH DAMAGE.
41 *
42 * @(#)fpu_explode.c 8.1 (Berkeley) 6/11/93
43 * $NetBSD: fpu_explode.c,v 1.5 2000/08/03 18:32:08 eeh Exp $
44 */
45
46#include <sys/cdefs.h>
47__FBSDID("$FreeBSD: head/lib/libc/sparc64/fpu/fpu_explode.c 95587 2002-04-27 21:56:28Z jake $");
47__FBSDID("$FreeBSD: head/lib/libc/sparc64/fpu/fpu_explode.c 96422 2002-05-11 21:20:05Z jake $");
48
49/*
50 * FPU subroutines: `explode' the machine's `packed binary' format numbers
51 * into our internal format.
52 */
53
54#include <sys/param.h>
55
56#include <machine/frame.h>
57#include <machine/fp.h>
58#include <machine/fsr.h>
59#include <machine/ieee.h>
60#include <machine/instr.h>
61
62#include "fpu_arith.h"
63#include "fpu_emu.h"
64#include "fpu_extern.h"
65#include "__sparc_utrap_private.h"
66
67/*
68 * N.B.: in all of the following, we assume the FP format is
69 *
70 * ---------------------------
71 * | s | exponent | fraction |
72 * ---------------------------
73 *
74 * (which represents -1**s * 1.fraction * 2**exponent), so that the
75 * sign bit is way at the top (bit 31), the exponent is next, and
76 * then the remaining bits mark the fraction. A zero exponent means
77 * zero or denormalized (0.fraction rather than 1.fraction), and the
78 * maximum possible exponent, 2bias+1, signals inf (fraction==0) or NaN.
79 *
80 * Since the sign bit is always the topmost bit---this holds even for
81 * integers---we set that outside all the *tof functions. Each function
82 * returns the class code for the new number (but note that we use
83 * FPC_QNAN for all NaNs; fpu_explode will fix this if appropriate).
84 */
85
86/*
87 * int -> fpn.
88 */
89int
90__fpu_itof(fp, i)
91 struct fpn *fp;
92 u_int i;
93{
94
95 if (i == 0)
96 return (FPC_ZERO);
97 /*
98 * The value FP_1 represents 2^FP_LG, so set the exponent
99 * there and let normalization fix it up. Convert negative
100 * numbers to sign-and-magnitude. Note that this relies on
101 * fpu_norm()'s handling of `supernormals'; see fpu_subr.c.
102 */
103 fp->fp_exp = FP_LG;
104 fp->fp_mant[0] = (int)i < 0 ? -i : i;
105 fp->fp_mant[1] = 0;
106 fp->fp_mant[2] = 0;
107 fp->fp_mant[3] = 0;
108 __fpu_norm(fp);
109 return (FPC_NUM);
110}
111
112/*
113 * 64-bit int -> fpn.
114 */
115int
116__fpu_xtof(fp, i)
117 struct fpn *fp;
118 u_int64_t i;
119{
120
121 if (i == 0)
122 return (FPC_ZERO);
123 /*
124 * The value FP_1 represents 2^FP_LG, so set the exponent
125 * there and let normalization fix it up. Convert negative
126 * numbers to sign-and-magnitude. Note that this relies on
127 * fpu_norm()'s handling of `supernormals'; see fpu_subr.c.
128 */
129 fp->fp_exp = FP_LG2;
130 *((int64_t*)fp->fp_mant) = (int64_t)i < 0 ? -i : i;
131 fp->fp_mant[2] = 0;
132 fp->fp_mant[3] = 0;
133 __fpu_norm(fp);
134 return (FPC_NUM);
135}
136
137#define mask(nbits) ((1L << (nbits)) - 1)
138
139/*
140 * All external floating formats convert to internal in the same manner,
141 * as defined here. Note that only normals get an implied 1.0 inserted.
142 */
143#define FP_TOF(exp, expbias, allfrac, f0, f1, f2, f3) \
144 if (exp == 0) { \
145 if (allfrac == 0) \
146 return (FPC_ZERO); \
147 fp->fp_exp = 1 - expbias; \
148 fp->fp_mant[0] = f0; \
149 fp->fp_mant[1] = f1; \
150 fp->fp_mant[2] = f2; \
151 fp->fp_mant[3] = f3; \
152 __fpu_norm(fp); \
153 return (FPC_NUM); \
154 } \
155 if (exp == (2 * expbias + 1)) { \
156 if (allfrac == 0) \
157 return (FPC_INF); \
158 fp->fp_mant[0] = f0; \
159 fp->fp_mant[1] = f1; \
160 fp->fp_mant[2] = f2; \
161 fp->fp_mant[3] = f3; \
162 return (FPC_QNAN); \
163 } \
164 fp->fp_exp = exp - expbias; \
165 fp->fp_mant[0] = FP_1 | f0; \
166 fp->fp_mant[1] = f1; \
167 fp->fp_mant[2] = f2; \
168 fp->fp_mant[3] = f3; \
169 return (FPC_NUM)
170
171/*
172 * 32-bit single precision -> fpn.
173 * We assume a single occupies at most (64-FP_LG) bits in the internal
174 * format: i.e., needs at most fp_mant[0] and fp_mant[1].
175 */
176int
177__fpu_stof(fp, i)
178 struct fpn *fp;
179 u_int i;
180{
181 int exp;
182 u_int frac, f0, f1;
183#define SNG_SHIFT (SNG_FRACBITS - FP_LG)
184
185 exp = (i >> (32 - 1 - SNG_EXPBITS)) & mask(SNG_EXPBITS);
186 frac = i & mask(SNG_FRACBITS);
187 f0 = frac >> SNG_SHIFT;
188 f1 = frac << (32 - SNG_SHIFT);
189 FP_TOF(exp, SNG_EXP_BIAS, frac, f0, f1, 0, 0);
190}
191
192/*
193 * 64-bit double -> fpn.
194 * We assume this uses at most (96-FP_LG) bits.
195 */
196int
197__fpu_dtof(fp, i, j)
198 struct fpn *fp;
199 u_int i, j;
200{
201 int exp;
202 u_int frac, f0, f1, f2;
203#define DBL_SHIFT (DBL_FRACBITS - 32 - FP_LG)
204
205 exp = (i >> (32 - 1 - DBL_EXPBITS)) & mask(DBL_EXPBITS);
206 frac = i & mask(DBL_FRACBITS - 32);
207 f0 = frac >> DBL_SHIFT;
208 f1 = (frac << (32 - DBL_SHIFT)) | (j >> DBL_SHIFT);
209 f2 = j << (32 - DBL_SHIFT);
210 frac |= j;
211 FP_TOF(exp, DBL_EXP_BIAS, frac, f0, f1, f2, 0);
212}
213
214/*
215 * 128-bit extended -> fpn.
216 */
217int
218__fpu_qtof(fp, i, j, k, l)
219 struct fpn *fp;
220 u_int i, j, k, l;
221{
222 int exp;
223 u_int frac, f0, f1, f2, f3;
224#define EXT_SHIFT (-(EXT_FRACBITS - 3 * 32 - FP_LG)) /* left shift! */
225
226 /*
227 * Note that ext and fpn `line up', hence no shifting needed.
228 */
229 exp = (i >> (32 - 1 - EXT_EXPBITS)) & mask(EXT_EXPBITS);
230 frac = i & mask(EXT_FRACBITS - 3 * 32);
231 f0 = (frac << EXT_SHIFT) | (j >> (32 - EXT_SHIFT));
232 f1 = (j << EXT_SHIFT) | (k >> (32 - EXT_SHIFT));
233 f2 = (k << EXT_SHIFT) | (l >> (32 - EXT_SHIFT));
234 f3 = l << EXT_SHIFT;
235 frac |= j | k | l;
236 FP_TOF(exp, EXT_EXP_BIAS, frac, f0, f1, f2, f3);
237}
238
239/*
240 * Explode the contents of a / regpair / regquad.
241 * If the input is a signalling NaN, an NV (invalid) exception
242 * will be set. (Note that nothing but NV can occur until ALU
243 * operations are performed.)
244 */
245void
246__fpu_explode(fe, fp, type, reg)
247 struct fpemu *fe;
248 struct fpn *fp;
249 int type, reg;
250{
48
49/*
50 * FPU subroutines: `explode' the machine's `packed binary' format numbers
51 * into our internal format.
52 */
53
54#include <sys/param.h>
55
56#include <machine/frame.h>
57#include <machine/fp.h>
58#include <machine/fsr.h>
59#include <machine/ieee.h>
60#include <machine/instr.h>
61
62#include "fpu_arith.h"
63#include "fpu_emu.h"
64#include "fpu_extern.h"
65#include "__sparc_utrap_private.h"
66
67/*
68 * N.B.: in all of the following, we assume the FP format is
69 *
70 * ---------------------------
71 * | s | exponent | fraction |
72 * ---------------------------
73 *
74 * (which represents -1**s * 1.fraction * 2**exponent), so that the
75 * sign bit is way at the top (bit 31), the exponent is next, and
76 * then the remaining bits mark the fraction. A zero exponent means
77 * zero or denormalized (0.fraction rather than 1.fraction), and the
78 * maximum possible exponent, 2bias+1, signals inf (fraction==0) or NaN.
79 *
80 * Since the sign bit is always the topmost bit---this holds even for
81 * integers---we set that outside all the *tof functions. Each function
82 * returns the class code for the new number (but note that we use
83 * FPC_QNAN for all NaNs; fpu_explode will fix this if appropriate).
84 */
85
86/*
87 * int -> fpn.
88 */
89int
90__fpu_itof(fp, i)
91 struct fpn *fp;
92 u_int i;
93{
94
95 if (i == 0)
96 return (FPC_ZERO);
97 /*
98 * The value FP_1 represents 2^FP_LG, so set the exponent
99 * there and let normalization fix it up. Convert negative
100 * numbers to sign-and-magnitude. Note that this relies on
101 * fpu_norm()'s handling of `supernormals'; see fpu_subr.c.
102 */
103 fp->fp_exp = FP_LG;
104 fp->fp_mant[0] = (int)i < 0 ? -i : i;
105 fp->fp_mant[1] = 0;
106 fp->fp_mant[2] = 0;
107 fp->fp_mant[3] = 0;
108 __fpu_norm(fp);
109 return (FPC_NUM);
110}
111
112/*
113 * 64-bit int -> fpn.
114 */
115int
116__fpu_xtof(fp, i)
117 struct fpn *fp;
118 u_int64_t i;
119{
120
121 if (i == 0)
122 return (FPC_ZERO);
123 /*
124 * The value FP_1 represents 2^FP_LG, so set the exponent
125 * there and let normalization fix it up. Convert negative
126 * numbers to sign-and-magnitude. Note that this relies on
127 * fpu_norm()'s handling of `supernormals'; see fpu_subr.c.
128 */
129 fp->fp_exp = FP_LG2;
130 *((int64_t*)fp->fp_mant) = (int64_t)i < 0 ? -i : i;
131 fp->fp_mant[2] = 0;
132 fp->fp_mant[3] = 0;
133 __fpu_norm(fp);
134 return (FPC_NUM);
135}
136
137#define mask(nbits) ((1L << (nbits)) - 1)
138
139/*
140 * All external floating formats convert to internal in the same manner,
141 * as defined here. Note that only normals get an implied 1.0 inserted.
142 */
143#define FP_TOF(exp, expbias, allfrac, f0, f1, f2, f3) \
144 if (exp == 0) { \
145 if (allfrac == 0) \
146 return (FPC_ZERO); \
147 fp->fp_exp = 1 - expbias; \
148 fp->fp_mant[0] = f0; \
149 fp->fp_mant[1] = f1; \
150 fp->fp_mant[2] = f2; \
151 fp->fp_mant[3] = f3; \
152 __fpu_norm(fp); \
153 return (FPC_NUM); \
154 } \
155 if (exp == (2 * expbias + 1)) { \
156 if (allfrac == 0) \
157 return (FPC_INF); \
158 fp->fp_mant[0] = f0; \
159 fp->fp_mant[1] = f1; \
160 fp->fp_mant[2] = f2; \
161 fp->fp_mant[3] = f3; \
162 return (FPC_QNAN); \
163 } \
164 fp->fp_exp = exp - expbias; \
165 fp->fp_mant[0] = FP_1 | f0; \
166 fp->fp_mant[1] = f1; \
167 fp->fp_mant[2] = f2; \
168 fp->fp_mant[3] = f3; \
169 return (FPC_NUM)
170
171/*
172 * 32-bit single precision -> fpn.
173 * We assume a single occupies at most (64-FP_LG) bits in the internal
174 * format: i.e., needs at most fp_mant[0] and fp_mant[1].
175 */
176int
177__fpu_stof(fp, i)
178 struct fpn *fp;
179 u_int i;
180{
181 int exp;
182 u_int frac, f0, f1;
183#define SNG_SHIFT (SNG_FRACBITS - FP_LG)
184
185 exp = (i >> (32 - 1 - SNG_EXPBITS)) & mask(SNG_EXPBITS);
186 frac = i & mask(SNG_FRACBITS);
187 f0 = frac >> SNG_SHIFT;
188 f1 = frac << (32 - SNG_SHIFT);
189 FP_TOF(exp, SNG_EXP_BIAS, frac, f0, f1, 0, 0);
190}
191
192/*
193 * 64-bit double -> fpn.
194 * We assume this uses at most (96-FP_LG) bits.
195 */
196int
197__fpu_dtof(fp, i, j)
198 struct fpn *fp;
199 u_int i, j;
200{
201 int exp;
202 u_int frac, f0, f1, f2;
203#define DBL_SHIFT (DBL_FRACBITS - 32 - FP_LG)
204
205 exp = (i >> (32 - 1 - DBL_EXPBITS)) & mask(DBL_EXPBITS);
206 frac = i & mask(DBL_FRACBITS - 32);
207 f0 = frac >> DBL_SHIFT;
208 f1 = (frac << (32 - DBL_SHIFT)) | (j >> DBL_SHIFT);
209 f2 = j << (32 - DBL_SHIFT);
210 frac |= j;
211 FP_TOF(exp, DBL_EXP_BIAS, frac, f0, f1, f2, 0);
212}
213
214/*
215 * 128-bit extended -> fpn.
216 */
217int
218__fpu_qtof(fp, i, j, k, l)
219 struct fpn *fp;
220 u_int i, j, k, l;
221{
222 int exp;
223 u_int frac, f0, f1, f2, f3;
224#define EXT_SHIFT (-(EXT_FRACBITS - 3 * 32 - FP_LG)) /* left shift! */
225
226 /*
227 * Note that ext and fpn `line up', hence no shifting needed.
228 */
229 exp = (i >> (32 - 1 - EXT_EXPBITS)) & mask(EXT_EXPBITS);
230 frac = i & mask(EXT_FRACBITS - 3 * 32);
231 f0 = (frac << EXT_SHIFT) | (j >> (32 - EXT_SHIFT));
232 f1 = (j << EXT_SHIFT) | (k >> (32 - EXT_SHIFT));
233 f2 = (k << EXT_SHIFT) | (l >> (32 - EXT_SHIFT));
234 f3 = l << EXT_SHIFT;
235 frac |= j | k | l;
236 FP_TOF(exp, EXT_EXP_BIAS, frac, f0, f1, f2, f3);
237}
238
239/*
240 * Explode the contents of a / regpair / regquad.
241 * If the input is a signalling NaN, an NV (invalid) exception
242 * will be set. (Note that nothing but NV can occur until ALU
243 * operations are performed.)
244 */
245void
246__fpu_explode(fe, fp, type, reg)
247 struct fpemu *fe;
248 struct fpn *fp;
249 int type, reg;
250{
251 u_int s;
252 u_int64_t l;
251 u_int32_t s, *sp;
252 u_int64_t l[2];
253
253
254 l = __fpu_getreg64(reg & ~1);
255 s = __fpu_getreg(reg);
256 fp->fp_sign = s >> 31;
254 if (type == FTYPE_LNG || type == FTYPE_DBL || type == FTYPE_EXT) {
255 l[0] = __fpu_getreg64(reg & ~1);
256 sp = (u_int32_t *)l;
257 fp->fp_sign = sp[0] >> 31;
258 } else {
259 s = __fpu_getreg(reg);
260 fp->fp_sign = s >> 31;
261 }
257 fp->fp_sticky = 0;
258 switch (type) {
259 case FTYPE_LNG:
262 fp->fp_sticky = 0;
263 switch (type) {
264 case FTYPE_LNG:
260 s = __fpu_xtof(fp, l);
265 s = __fpu_xtof(fp, l[0]);
261 break;
262
263 case FTYPE_INT:
264 s = __fpu_itof(fp, s);
265 break;
266
267 case FTYPE_SNG:
268 s = __fpu_stof(fp, s);
269 break;
270
271 case FTYPE_DBL:
266 break;
267
268 case FTYPE_INT:
269 s = __fpu_itof(fp, s);
270 break;
271
272 case FTYPE_SNG:
273 s = __fpu_stof(fp, s);
274 break;
275
276 case FTYPE_DBL:
272 s = __fpu_dtof(fp, s, __fpu_getreg(reg + 1));
277 s = __fpu_dtof(fp, sp[0], sp[1]);
273 break;
274
275 case FTYPE_EXT:
278 break;
279
280 case FTYPE_EXT:
276 s = __fpu_qtof(fp, s, __fpu_getreg(reg + 1),
277 __fpu_getreg(reg + 2),
278 __fpu_getreg(reg + 3));
281 l[1] = __fpu_getreg64((reg & ~1) + 2);
282 s = __fpu_qtof(fp, sp[0], sp[1], sp[2], sp[3]);
279 break;
280
281 default:
282 __utrap_panic("fpu_explode");
283 }
284
285 if (s == FPC_QNAN && (fp->fp_mant[0] & FP_QUIETBIT) == 0) {
286 /*
287 * Input is a signalling NaN. All operations that return
288 * an input NaN operand put it through a ``NaN conversion'',
289 * which basically just means ``turn on the quiet bit''.
290 * We do this here so that all NaNs internally look quiet
291 * (we can tell signalling ones by their class).
292 */
293 fp->fp_mant[0] |= FP_QUIETBIT;
294 fe->fe_cx = FSR_NV; /* assert invalid operand */
295 s = FPC_SNAN;
296 }
297 fp->fp_class = s;
298 DPRINTF(FPE_REG, ("fpu_explode: %%%c%d => ", (type == FTYPE_LNG) ? 'x' :
299 ((type == FTYPE_INT) ? 'i' :
300 ((type == FTYPE_SNG) ? 's' :
301 ((type == FTYPE_DBL) ? 'd' :
302 ((type == FTYPE_EXT) ? 'q' : '?')))),
303 reg));
304 DUMPFPN(FPE_REG, fp);
305 DPRINTF(FPE_REG, ("\n"));
306}
283 break;
284
285 default:
286 __utrap_panic("fpu_explode");
287 }
288
289 if (s == FPC_QNAN && (fp->fp_mant[0] & FP_QUIETBIT) == 0) {
290 /*
291 * Input is a signalling NaN. All operations that return
292 * an input NaN operand put it through a ``NaN conversion'',
293 * which basically just means ``turn on the quiet bit''.
294 * We do this here so that all NaNs internally look quiet
295 * (we can tell signalling ones by their class).
296 */
297 fp->fp_mant[0] |= FP_QUIETBIT;
298 fe->fe_cx = FSR_NV; /* assert invalid operand */
299 s = FPC_SNAN;
300 }
301 fp->fp_class = s;
302 DPRINTF(FPE_REG, ("fpu_explode: %%%c%d => ", (type == FTYPE_LNG) ? 'x' :
303 ((type == FTYPE_INT) ? 'i' :
304 ((type == FTYPE_SNG) ? 's' :
305 ((type == FTYPE_DBL) ? 'd' :
306 ((type == FTYPE_EXT) ? 'q' : '?')))),
307 reg));
308 DUMPFPN(FPE_REG, fp);
309 DPRINTF(FPE_REG, ("\n"));
310}