1/* mpf_sub -- Subtract two floats. 2 3Copyright 1993, 1994, 1995, 1996, 1999, 2000, 2001, 2002, 2004, 2005, 2011 Free 4Software Foundation, Inc. 5 6This file is part of the GNU MP Library. 7 8The GNU MP Library is free software; you can redistribute it and/or modify 9it under the terms of the GNU Lesser General Public License as published by 10the Free Software Foundation; either version 3 of the License, or (at your 11option) any later version. 12 13The GNU MP Library is distributed in the hope that it will be useful, but 14WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 15or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public 16License for more details. 17 18You should have received a copy of the GNU Lesser General Public License 19along with the GNU MP Library. If not, see http://www.gnu.org/licenses/. */ 20 21#include "gmp.h" 22#include "gmp-impl.h" 23 24void 25mpf_sub (mpf_ptr r, mpf_srcptr u, mpf_srcptr v) 26{ 27 mp_srcptr up, vp; 28 mp_ptr rp, tp; 29 mp_size_t usize, vsize, rsize; 30 mp_size_t prec; 31 mp_exp_t exp; 32 mp_size_t ediff; 33 int negate; 34 TMP_DECL; 35 36 usize = u->_mp_size; 37 vsize = v->_mp_size; 38 39 /* Handle special cases that don't work in generic code below. */ 40 if (usize == 0) 41 { 42 mpf_neg (r, v); 43 return; 44 } 45 if (vsize == 0) 46 { 47 if (r != u) 48 mpf_set (r, u); 49 return; 50 } 51 52 /* If signs of U and V are different, perform addition. */ 53 if ((usize ^ vsize) < 0) 54 { 55 __mpf_struct v_negated; 56 v_negated._mp_size = -vsize; 57 v_negated._mp_exp = v->_mp_exp; 58 v_negated._mp_d = v->_mp_d; 59 mpf_add (r, u, &v_negated); 60 return; 61 } 62 63 TMP_MARK; 64 65 /* Signs are now known to be the same. */ 66 negate = usize < 0; 67 68 /* Make U be the operand with the largest exponent. */ 69 if (u->_mp_exp < v->_mp_exp) 70 { 71 mpf_srcptr t; 72 t = u; u = v; v = t; 73 negate ^= 1; 74 usize = u->_mp_size; 75 vsize = v->_mp_size; 76 } 77 78 usize = ABS (usize); 79 vsize = ABS (vsize); 80 up = u->_mp_d; 81 vp = v->_mp_d; 82 rp = r->_mp_d; 83 prec = r->_mp_prec + 1; 84 exp = u->_mp_exp; 85 ediff = u->_mp_exp - v->_mp_exp; 86 87 /* If ediff is 0 or 1, we might have a situation where the operands are 88 extremely close. We need to scan the operands from the most significant 89 end ignore the initial parts that are equal. */ 90 if (ediff <= 1) 91 { 92 if (ediff == 0) 93 { 94 /* Skip leading limbs in U and V that are equal. */ 95 if (up[usize - 1] == vp[vsize - 1]) 96 { 97 /* This loop normally exits immediately. Optimize for that. */ 98 do 99 { 100 usize--; 101 vsize--; 102 exp--; 103 104 if (usize == 0) 105 { 106 /* u cancels high limbs of v, result is rest of v */ 107 negate ^= 1; 108 cancellation: 109 /* strip high zeros before truncating to prec */ 110 while (vsize != 0 && vp[vsize - 1] == 0) 111 { 112 vsize--; 113 exp--; 114 } 115 if (vsize > prec) 116 { 117 vp += vsize - prec; 118 vsize = prec; 119 } 120 MPN_COPY_INCR (rp, vp, vsize); 121 rsize = vsize; 122 goto done; 123 } 124 if (vsize == 0) 125 { 126 vp = up; 127 vsize = usize; 128 goto cancellation; 129 } 130 } 131 while (up[usize - 1] == vp[vsize - 1]); 132 } 133 134 if (up[usize - 1] < vp[vsize - 1]) 135 { 136 /* For simplicity, swap U and V. Note that since the loop above 137 wouldn't have exited unless up[usize - 1] and vp[vsize - 1] 138 were non-equal, this if-statement catches all cases where U 139 is smaller than V. */ 140 MPN_SRCPTR_SWAP (up,usize, vp,vsize); 141 negate ^= 1; 142 /* negating ediff not necessary since it is 0. */ 143 } 144 145 /* Check for 146 x+1 00000000 ... 147 x ffffffff ... */ 148 if (up[usize - 1] != vp[vsize - 1] + 1) 149 goto general_case; 150 usize--; 151 vsize--; 152 exp--; 153 } 154 else /* ediff == 1 */ 155 { 156 /* Check for 157 1 00000000 ... 158 0 ffffffff ... */ 159 160 if (up[usize - 1] != 1 || vp[vsize - 1] != GMP_NUMB_MAX 161 || (usize >= 2 && up[usize - 2] != 0)) 162 goto general_case; 163 164 usize--; 165 exp--; 166 } 167 168 /* Skip sequences of 00000000/ffffffff */ 169 while (vsize != 0 && usize != 0 && up[usize - 1] == 0 170 && vp[vsize - 1] == GMP_NUMB_MAX) 171 { 172 usize--; 173 vsize--; 174 exp--; 175 } 176 177 if (usize == 0) 178 { 179 while (vsize != 0 && vp[vsize - 1] == GMP_NUMB_MAX) 180 { 181 vsize--; 182 exp--; 183 } 184 } 185 186 if (usize > prec - 1) 187 { 188 up += usize - (prec - 1); 189 usize = prec - 1; 190 } 191 if (vsize > prec - 1) 192 { 193 vp += vsize - (prec - 1); 194 vsize = prec - 1; 195 } 196 197 tp = TMP_ALLOC_LIMBS (prec); 198 { 199 mp_limb_t cy_limb; 200 if (vsize == 0) 201 { 202 mp_size_t size, i; 203 size = usize; 204 for (i = 0; i < size; i++) 205 tp[i] = up[i]; 206 tp[size] = 1; 207 rsize = size + 1; 208 exp++; 209 goto normalize; 210 } 211 if (usize == 0) 212 { 213 mp_size_t size, i; 214 size = vsize; 215 for (i = 0; i < size; i++) 216 tp[i] = ~vp[i] & GMP_NUMB_MASK; 217 cy_limb = 1 - mpn_add_1 (tp, tp, vsize, (mp_limb_t) 1); 218 rsize = vsize; 219 if (cy_limb == 0) 220 { 221 tp[rsize] = 1; 222 rsize++; 223 exp++; 224 } 225 goto normalize; 226 } 227 if (usize >= vsize) 228 { 229 /* uuuu */ 230 /* vv */ 231 mp_size_t size; 232 size = usize - vsize; 233 MPN_COPY (tp, up, size); 234 cy_limb = mpn_sub_n (tp + size, up + size, vp, vsize); 235 rsize = usize; 236 } 237 else /* (usize < vsize) */ 238 { 239 /* uuuu */ 240 /* vvvvvvv */ 241 mp_size_t size, i; 242 size = vsize - usize; 243 for (i = 0; i < size; i++) 244 tp[i] = ~vp[i] & GMP_NUMB_MASK; 245 cy_limb = mpn_sub_n (tp + size, up, vp + size, usize); 246 cy_limb+= mpn_sub_1 (tp + size, tp + size, usize, (mp_limb_t) 1); 247 cy_limb-= mpn_add_1 (tp, tp, vsize, (mp_limb_t) 1); 248 rsize = vsize; 249 } 250 if (cy_limb == 0) 251 { 252 tp[rsize] = 1; 253 rsize++; 254 exp++; 255 } 256 goto normalize; 257 } 258 } 259 260general_case: 261 /* If U extends beyond PREC, ignore the part that does. */ 262 if (usize > prec) 263 { 264 up += usize - prec; 265 usize = prec; 266 } 267 268 /* If V extends beyond PREC, ignore the part that does. 269 Note that this may make vsize negative. */ 270 if (vsize + ediff > prec) 271 { 272 vp += vsize + ediff - prec; 273 vsize = prec - ediff; 274 } 275 276 if (ediff >= prec) 277 { 278 /* V completely cancelled. */ 279 if (rp != up) 280 MPN_COPY (rp, up, usize); 281 rsize = usize; 282 } 283 else 284 { 285 /* Allocate temp space for the result. Allocate 286 just vsize + ediff later??? */ 287 tp = TMP_ALLOC_LIMBS (prec); 288 289 /* Locate the least significant non-zero limb in (the needed 290 parts of) U and V, to simplify the code below. */ 291 for (;;) 292 { 293 if (vsize == 0) 294 { 295 MPN_COPY (rp, up, usize); 296 rsize = usize; 297 goto done; 298 } 299 if (vp[0] != 0) 300 break; 301 vp++, vsize--; 302 } 303 for (;;) 304 { 305 if (usize == 0) 306 { 307 MPN_COPY (rp, vp, vsize); 308 rsize = vsize; 309 negate ^= 1; 310 goto done; 311 } 312 if (up[0] != 0) 313 break; 314 up++, usize--; 315 } 316 317 /* uuuu | uuuu | uuuu | uuuu | uuuu */ 318 /* vvvvvvv | vv | vvvvv | v | vv */ 319 320 if (usize > ediff) 321 { 322 /* U and V partially overlaps. */ 323 if (ediff == 0) 324 { 325 /* Have to compare the leading limbs of u and v 326 to determine whether to compute u - v or v - u. */ 327 if (usize >= vsize) 328 { 329 /* uuuu */ 330 /* vv */ 331 mp_size_t size; 332 size = usize - vsize; 333 MPN_COPY (tp, up, size); 334 mpn_sub_n (tp + size, up + size, vp, vsize); 335 rsize = usize; 336 } 337 else /* (usize < vsize) */ 338 { 339 /* uuuu */ 340 /* vvvvvvv */ 341 mp_size_t size, i; 342 size = vsize - usize; 343 tp[0] = -vp[0] & GMP_NUMB_MASK; 344 for (i = 1; i < size; i++) 345 tp[i] = ~vp[i] & GMP_NUMB_MASK; 346 mpn_sub_n (tp + size, up, vp + size, usize); 347 mpn_sub_1 (tp + size, tp + size, usize, (mp_limb_t) 1); 348 rsize = vsize; 349 } 350 } 351 else 352 { 353 if (vsize + ediff <= usize) 354 { 355 /* uuuu */ 356 /* v */ 357 mp_size_t size; 358 size = usize - ediff - vsize; 359 MPN_COPY (tp, up, size); 360 mpn_sub (tp + size, up + size, usize - size, vp, vsize); 361 rsize = usize; 362 } 363 else 364 { 365 /* uuuu */ 366 /* vvvvv */ 367 mp_size_t size, i; 368 size = vsize + ediff - usize; 369 tp[0] = -vp[0] & GMP_NUMB_MASK; 370 for (i = 1; i < size; i++) 371 tp[i] = ~vp[i] & GMP_NUMB_MASK; 372 mpn_sub (tp + size, up, usize, vp + size, usize - ediff); 373 mpn_sub_1 (tp + size, tp + size, usize, (mp_limb_t) 1); 374 rsize = vsize + ediff; 375 } 376 } 377 } 378 else 379 { 380 /* uuuu */ 381 /* vv */ 382 mp_size_t size, i; 383 size = vsize + ediff - usize; 384 tp[0] = -vp[0] & GMP_NUMB_MASK; 385 for (i = 1; i < vsize; i++) 386 tp[i] = ~vp[i] & GMP_NUMB_MASK; 387 for (i = vsize; i < size; i++) 388 tp[i] = GMP_NUMB_MAX; 389 mpn_sub_1 (tp + size, up, usize, (mp_limb_t) 1); 390 rsize = size + usize; 391 } 392 393 normalize: 394 /* Full normalize. Optimize later. */ 395 while (rsize != 0 && tp[rsize - 1] == 0) 396 { 397 rsize--; 398 exp--; 399 } 400 MPN_COPY (rp, tp, rsize); 401 } 402 403 done: 404 r->_mp_size = negate ? -rsize : rsize; 405 if (rsize == 0) 406 exp = 0; 407 r->_mp_exp = exp; 408 TMP_FREE; 409} 410