1/* __gmp_extract_double -- convert from double to array of mp_limb_t. 2 3Copyright 1996, 1999-2002, 2006, 2012 Free Software Foundation, Inc. 4 5This file is part of the GNU MP Library. 6 7The GNU MP Library is free software; you can redistribute it and/or modify 8it under the terms of either: 9 10 * the GNU Lesser General Public License as published by the Free 11 Software Foundation; either version 3 of the License, or (at your 12 option) any later version. 13 14or 15 16 * the GNU General Public License as published by the Free Software 17 Foundation; either version 2 of the License, or (at your option) any 18 later version. 19 20or both in parallel, as here. 21 22The GNU MP Library is distributed in the hope that it will be useful, but 23WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 24or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 25for more details. 26 27You should have received copies of the GNU General Public License and the 28GNU Lesser General Public License along with the GNU MP Library. If not, 29see https://www.gnu.org/licenses/. */ 30 31#include "gmp-impl.h" 32 33#ifdef XDEBUG 34#undef _GMP_IEEE_FLOATS 35#endif 36 37#ifndef _GMP_IEEE_FLOATS 38#define _GMP_IEEE_FLOATS 0 39#endif 40 41/* Extract a non-negative double in d. */ 42 43int 44__gmp_extract_double (mp_ptr rp, double d) 45{ 46 long exp; 47 unsigned sc; 48#ifdef _LONG_LONG_LIMB 49#define BITS_PER_PART 64 /* somewhat bogus */ 50 unsigned long long int manl; 51#else 52#define BITS_PER_PART GMP_LIMB_BITS 53 unsigned long int manh, manl; 54#endif 55 56 /* BUGS 57 58 1. Should handle Inf and NaN in IEEE specific code. 59 2. Handle Inf and NaN also in default code, to avoid hangs. 60 3. Generalize to handle all GMP_LIMB_BITS >= 32. 61 4. This lits is incomplete and misspelled. 62 */ 63 64 ASSERT (d >= 0.0); 65 66 if (d == 0.0) 67 { 68 MPN_ZERO (rp, LIMBS_PER_DOUBLE); 69 return 0; 70 } 71 72#if _GMP_IEEE_FLOATS 73 { 74#if defined (__alpha) && __GNUC__ == 2 && __GNUC_MINOR__ == 8 75 /* Work around alpha-specific bug in GCC 2.8.x. */ 76 volatile 77#endif 78 union ieee_double_extract x; 79 x.d = d; 80 exp = x.s.exp; 81#if BITS_PER_PART == 64 /* generalize this to BITS_PER_PART > BITS_IN_MANTISSA */ 82 manl = (((mp_limb_t) 1 << 63) 83 | ((mp_limb_t) x.s.manh << 43) | ((mp_limb_t) x.s.manl << 11)); 84 if (exp == 0) 85 { 86 /* Denormalized number. Don't try to be clever about this, 87 since it is not an important case to make fast. */ 88 exp = 1; 89 do 90 { 91 manl = manl << 1; 92 exp--; 93 } 94 while ((manl & GMP_LIMB_HIGHBIT) == 0); 95 } 96#endif 97#if BITS_PER_PART == 32 98 manh = ((mp_limb_t) 1 << 31) | (x.s.manh << 11) | (x.s.manl >> 21); 99 manl = x.s.manl << 11; 100 if (exp == 0) 101 { 102 /* Denormalized number. Don't try to be clever about this, 103 since it is not an important case to make fast. */ 104 exp = 1; 105 do 106 { 107 manh = (manh << 1) | (manl >> 31); 108 manl = manl << 1; 109 exp--; 110 } 111 while ((manh & GMP_LIMB_HIGHBIT) == 0); 112 } 113#endif 114#if BITS_PER_PART != 32 && BITS_PER_PART != 64 115 You need to generalize the code above to handle this. 116#endif 117 exp -= 1022; /* Remove IEEE bias. */ 118 } 119#else 120 { 121 /* Unknown (or known to be non-IEEE) double format. */ 122 exp = 0; 123 if (d >= 1.0) 124 { 125 ASSERT_ALWAYS (d * 0.5 != d); 126 127 while (d >= 32768.0) 128 { 129 d *= (1.0 / 65536.0); 130 exp += 16; 131 } 132 while (d >= 1.0) 133 { 134 d *= 0.5; 135 exp += 1; 136 } 137 } 138 else if (d < 0.5) 139 { 140 while (d < (1.0 / 65536.0)) 141 { 142 d *= 65536.0; 143 exp -= 16; 144 } 145 while (d < 0.5) 146 { 147 d *= 2.0; 148 exp -= 1; 149 } 150 } 151 152 d *= (4.0 * ((unsigned long int) 1 << (BITS_PER_PART - 2))); 153#if BITS_PER_PART == 64 154 manl = d; 155#endif 156#if BITS_PER_PART == 32 157 manh = d; 158 manl = (d - manh) * (4.0 * ((unsigned long int) 1 << (BITS_PER_PART - 2))); 159#endif 160 } 161#endif /* IEEE */ 162 163 sc = (unsigned) (exp + 64 * GMP_NUMB_BITS) % GMP_NUMB_BITS; 164 165 /* We add something here to get rounding right. */ 166 exp = (exp + 64 * GMP_NUMB_BITS) / GMP_NUMB_BITS - 64 * GMP_NUMB_BITS / GMP_NUMB_BITS + 1; 167 168#if BITS_PER_PART == 64 && LIMBS_PER_DOUBLE == 2 169#if GMP_NAIL_BITS == 0 170 if (sc != 0) 171 { 172 rp[1] = manl >> (GMP_LIMB_BITS - sc); 173 rp[0] = manl << sc; 174 } 175 else 176 { 177 rp[1] = manl; 178 rp[0] = 0; 179 exp--; 180 } 181#else 182 if (sc > GMP_NAIL_BITS) 183 { 184 rp[1] = manl >> (GMP_LIMB_BITS - sc); 185 rp[0] = (manl << (sc - GMP_NAIL_BITS)) & GMP_NUMB_MASK; 186 } 187 else 188 { 189 if (sc == 0) 190 { 191 rp[1] = manl >> GMP_NAIL_BITS; 192 rp[0] = (manl << GMP_NUMB_BITS - GMP_NAIL_BITS) & GMP_NUMB_MASK; 193 exp--; 194 } 195 else 196 { 197 rp[1] = manl >> (GMP_LIMB_BITS - sc); 198 rp[0] = (manl >> (GMP_NAIL_BITS - sc)) & GMP_NUMB_MASK; 199 } 200 } 201#endif 202#endif 203 204#if BITS_PER_PART == 64 && LIMBS_PER_DOUBLE == 3 205 if (sc > GMP_NAIL_BITS) 206 { 207 rp[2] = manl >> (GMP_LIMB_BITS - sc); 208 rp[1] = (manl << sc - GMP_NAIL_BITS) & GMP_NUMB_MASK; 209 if (sc >= 2 * GMP_NAIL_BITS) 210 rp[0] = 0; 211 else 212 rp[0] = (manl << GMP_NUMB_BITS - GMP_NAIL_BITS + sc) & GMP_NUMB_MASK; 213 } 214 else 215 { 216 if (sc == 0) 217 { 218 rp[2] = manl >> GMP_NAIL_BITS; 219 rp[1] = (manl << GMP_NUMB_BITS - GMP_NAIL_BITS) & GMP_NUMB_MASK; 220 rp[0] = 0; 221 exp--; 222 } 223 else 224 { 225 rp[2] = manl >> (GMP_LIMB_BITS - sc); 226 rp[1] = (manl >> GMP_NAIL_BITS - sc) & GMP_NUMB_MASK; 227 rp[0] = (manl << GMP_NUMB_BITS - GMP_NAIL_BITS + sc) & GMP_NUMB_MASK; 228 } 229 } 230#endif 231 232#if BITS_PER_PART == 32 && LIMBS_PER_DOUBLE == 3 233#if GMP_NAIL_BITS == 0 234 if (sc != 0) 235 { 236 rp[2] = manh >> (GMP_LIMB_BITS - sc); 237 rp[1] = (manh << sc) | (manl >> (GMP_LIMB_BITS - sc)); 238 rp[0] = manl << sc; 239 } 240 else 241 { 242 rp[2] = manh; 243 rp[1] = manl; 244 rp[0] = 0; 245 exp--; 246 } 247#else 248 if (sc > GMP_NAIL_BITS) 249 { 250 rp[2] = (manh >> (GMP_LIMB_BITS - sc)); 251 rp[1] = ((manh << (sc - GMP_NAIL_BITS)) | 252 (manl >> (GMP_LIMB_BITS - sc + GMP_NAIL_BITS))) & GMP_NUMB_MASK; 253 if (sc >= 2 * GMP_NAIL_BITS) 254 rp[0] = (manl << sc - 2 * GMP_NAIL_BITS) & GMP_NUMB_MASK; 255 else 256 rp[0] = manl >> (2 * GMP_NAIL_BITS - sc) & GMP_NUMB_MASK; 257 } 258 else 259 { 260 if (sc == 0) 261 { 262 rp[2] = manh >> GMP_NAIL_BITS; 263 rp[1] = ((manh << GMP_NUMB_BITS - GMP_NAIL_BITS) | (manl >> 2 * GMP_NAIL_BITS)) & GMP_NUMB_MASK; 264 rp[0] = (manl << GMP_NUMB_BITS - 2 * GMP_NAIL_BITS) & GMP_NUMB_MASK; 265 exp--; 266 } 267 else 268 { 269 rp[2] = (manh >> (GMP_LIMB_BITS - sc)); 270 rp[1] = (manh >> (GMP_NAIL_BITS - sc)) & GMP_NUMB_MASK; 271 rp[0] = ((manh << (GMP_NUMB_BITS - GMP_NAIL_BITS + sc)) 272 | (manl >> (GMP_LIMB_BITS - (GMP_NUMB_BITS - GMP_NAIL_BITS + sc)))) & GMP_NUMB_MASK; 273 } 274 } 275#endif 276#endif 277 278#if BITS_PER_PART == 32 && LIMBS_PER_DOUBLE > 3 279 if (sc == 0) 280 { 281 int i; 282 283 for (i = LIMBS_PER_DOUBLE - 1; i >= 0; i--) 284 { 285 rp[i] = manh >> (BITS_PER_ULONG - GMP_NUMB_BITS); 286 manh = ((manh << GMP_NUMB_BITS) 287 | (manl >> (BITS_PER_ULONG - GMP_NUMB_BITS))); 288 manl = manl << GMP_NUMB_BITS; 289 } 290 exp--; 291 } 292 else 293 { 294 int i; 295 296 rp[LIMBS_PER_DOUBLE - 1] = (manh >> (GMP_LIMB_BITS - sc)); 297 manh = (manh << sc) | (manl >> (GMP_LIMB_BITS - sc)); 298 manl = (manl << sc); 299 for (i = LIMBS_PER_DOUBLE - 2; i >= 0; i--) 300 { 301 rp[i] = manh >> (BITS_PER_ULONG - GMP_NUMB_BITS); 302 manh = ((manh << GMP_NUMB_BITS) 303 | (manl >> (BITS_PER_ULONG - GMP_NUMB_BITS))); 304 manl = manl << GMP_NUMB_BITS; 305 } 306 } 307#endif 308 309 return exp; 310} 311