1/* mpfr_round_raw_generic, mpfr_round_raw2, mpfr_round_raw, mpfr_prec_round,
2   mpfr_can_round, mpfr_can_round_raw -- various rounding functions
3
4Copyright 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Free Software Foundation, Inc.
5Contributed by the AriC and Caramel projects, INRIA.
6
7This file is part of the GNU MPFR Library.
8
9The GNU MPFR Library is free software; you can redistribute it and/or modify
10it under the terms of the GNU Lesser General Public License as published by
11the Free Software Foundation; either version 3 of the License, or (at your
12option) any later version.
13
14The GNU MPFR Library is distributed in the hope that it will be useful, but
15WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
16or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
17License for more details.
18
19You should have received a copy of the GNU Lesser General Public License
20along with the GNU MPFR Library; see the file COPYING.LESSER.  If not, see
21http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
2251 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
23
24#include "mpfr-impl.h"
25
26#define mpfr_round_raw_generic mpfr_round_raw
27#define flag 0
28#define use_inexp 1
29#include "round_raw_generic.c"
30
31#define mpfr_round_raw_generic mpfr_round_raw_2
32#define flag 1
33#define use_inexp 0
34#include "round_raw_generic.c"
35
36/* Seems to be unused. Remove comment to implement it.
37#define mpfr_round_raw_generic mpfr_round_raw_3
38#define flag 1
39#define use_inexp 1
40#include "round_raw_generic.c"
41*/
42
43#define mpfr_round_raw_generic mpfr_round_raw_4
44#define flag 0
45#define use_inexp 0
46#include "round_raw_generic.c"
47
48int
49mpfr_prec_round (mpfr_ptr x, mpfr_prec_t prec, mpfr_rnd_t rnd_mode)
50{
51  mp_limb_t *tmp, *xp;
52  int carry, inexact;
53  mpfr_prec_t nw, ow;
54  MPFR_TMP_DECL(marker);
55
56  MPFR_ASSERTN(prec >= MPFR_PREC_MIN && prec <= MPFR_PREC_MAX);
57
58  nw = MPFR_PREC2LIMBS (prec); /* needed allocated limbs */
59
60  /* check if x has enough allocated space for the significand */
61  /* Get the number of limbs from the precision.
62     (Compatible with all allocation methods) */
63  ow = MPFR_LIMB_SIZE (x);
64  if (nw > ow)
65    {
66      /* FIXME: Variable can't be created using custom allocation,
67         MPFR_DECL_INIT or GROUP_ALLOC: How to detect? */
68      ow = MPFR_GET_ALLOC_SIZE(x);
69      if (nw > ow)
70       {
71         /* Realloc significand */
72         mpfr_limb_ptr tmpx = (mpfr_limb_ptr) (*__gmp_reallocate_func)
73           (MPFR_GET_REAL_PTR(x), MPFR_MALLOC_SIZE(ow), MPFR_MALLOC_SIZE(nw));
74         MPFR_SET_MANT_PTR(x, tmpx); /* mant ptr must be set
75                                        before alloc size */
76         MPFR_SET_ALLOC_SIZE(x, nw); /* new number of allocated limbs */
77       }
78    }
79
80  if (MPFR_UNLIKELY( MPFR_IS_SINGULAR(x) ))
81    {
82      MPFR_PREC(x) = prec; /* Special value: need to set prec */
83      if (MPFR_IS_NAN(x))
84        MPFR_RET_NAN;
85      MPFR_ASSERTD(MPFR_IS_INF(x) || MPFR_IS_ZERO(x));
86      return 0; /* infinity and zero are exact */
87    }
88
89  /* x is a non-zero real number */
90
91  MPFR_TMP_MARK(marker);
92  tmp = MPFR_TMP_LIMBS_ALLOC (nw);
93  xp = MPFR_MANT(x);
94  carry = mpfr_round_raw (tmp, xp, MPFR_PREC(x), MPFR_IS_NEG(x),
95                          prec, rnd_mode, &inexact);
96  MPFR_PREC(x) = prec;
97
98  if (MPFR_UNLIKELY(carry))
99    {
100      mpfr_exp_t exp = MPFR_EXP (x);
101
102      if (MPFR_UNLIKELY(exp == __gmpfr_emax))
103        (void) mpfr_overflow(x, rnd_mode, MPFR_SIGN(x));
104      else
105        {
106          MPFR_ASSERTD (exp < __gmpfr_emax);
107          MPFR_SET_EXP (x, exp + 1);
108          xp[nw - 1] = MPFR_LIMB_HIGHBIT;
109          if (nw - 1 > 0)
110            MPN_ZERO(xp, nw - 1);
111        }
112    }
113  else
114    MPN_COPY(xp, tmp, nw);
115
116  MPFR_TMP_FREE(marker);
117  return inexact;
118}
119
120/* assumption: GMP_NUMB_BITS is a power of 2 */
121
122/* assuming b is an approximation to x in direction rnd1 with error at
123   most 2^(MPFR_EXP(b)-err), returns 1 if one is able to round exactly
124   x to precision prec with direction rnd2, and 0 otherwise.
125
126   Side effects: none.
127*/
128
129int
130mpfr_can_round (mpfr_srcptr b, mpfr_exp_t err, mpfr_rnd_t rnd1,
131                mpfr_rnd_t rnd2, mpfr_prec_t prec)
132{
133  if (MPFR_UNLIKELY(MPFR_IS_SINGULAR(b)))
134    return 0; /* We cannot round if Zero, Nan or Inf */
135  else
136    return mpfr_can_round_raw (MPFR_MANT(b), MPFR_LIMB_SIZE(b),
137                               MPFR_SIGN(b), err, rnd1, rnd2, prec);
138}
139
140int
141mpfr_can_round_raw (const mp_limb_t *bp, mp_size_t bn, int neg, mpfr_exp_t err0,
142                    mpfr_rnd_t rnd1, mpfr_rnd_t rnd2, mpfr_prec_t prec)
143{
144  mpfr_prec_t err;
145  mp_size_t k, k1, tn;
146  int s, s1;
147  mp_limb_t cc, cc2;
148  mp_limb_t *tmp;
149  MPFR_TMP_DECL(marker);
150
151  if (MPFR_UNLIKELY(err0 < 0 || (mpfr_uexp_t) err0 <= prec))
152    return 0;  /* can't round */
153  else if (MPFR_UNLIKELY (prec > (mpfr_prec_t) bn * GMP_NUMB_BITS))
154    { /* then ulp(b) < precision < error */
155      return rnd2 == MPFR_RNDN && (mpfr_uexp_t) err0 - 2 >= prec;
156      /* can round only in rounding to the nearest and err0 >= prec + 2 */
157    }
158
159  MPFR_ASSERT_SIGN(neg);
160  neg = MPFR_IS_NEG_SIGN(neg);
161
162  /* if the error is smaller than ulp(b), then anyway it will propagate
163     up to ulp(b) */
164  err = ((mpfr_uexp_t) err0 > (mpfr_prec_t) bn * GMP_NUMB_BITS) ?
165    (mpfr_prec_t) bn * GMP_NUMB_BITS : (mpfr_prec_t) err0;
166
167  /* warning: if k = m*GMP_NUMB_BITS, consider limb m-1 and not m */
168  k = (err - 1) / GMP_NUMB_BITS;
169  MPFR_UNSIGNED_MINUS_MODULO(s, err);
170  /* the error corresponds to bit s in limb k, the most significant limb
171     being limb 0 */
172
173  k1 = (prec - 1) / GMP_NUMB_BITS;
174  MPFR_UNSIGNED_MINUS_MODULO(s1, prec);
175  /* the last significant bit is bit s1 in limb k1 */
176
177  /* don't need to consider the k1 most significant limbs */
178  k -= k1;
179  bn -= k1;
180  prec -= (mpfr_prec_t) k1 * GMP_NUMB_BITS;
181
182  /* if when adding or subtracting (1 << s) in bp[bn-1-k], it does not
183     change bp[bn-1] >> s1, then we can round */
184  MPFR_TMP_MARK(marker);
185  tn = bn;
186  k++; /* since we work with k+1 everywhere */
187  tmp = MPFR_TMP_LIMBS_ALLOC (tn);
188  if (bn > k)
189    MPN_COPY (tmp, bp, bn - k);
190
191  MPFR_ASSERTD (k > 0);
192
193  /* Transform RNDD and RNDU to Zero / Away */
194  MPFR_ASSERTD((neg == 0) || (neg ==1));
195  if (MPFR_IS_RNDUTEST_OR_RNDDNOTTEST(rnd1, neg))
196    rnd1 = MPFR_RNDZ;
197
198  switch (rnd1)
199    {
200    case MPFR_RNDZ:
201      /* Round to Zero */
202      cc = (bp[bn - 1] >> s1) & 1;
203      /* mpfr_round_raw2 returns 1 if one should add 1 at ulp(b,prec),
204         and 0 otherwise */
205      cc ^= mpfr_round_raw2 (bp, bn, neg, rnd2, prec);
206      /* cc is the new value of bit s1 in bp[bn-1] */
207      /* now round b + 2^(MPFR_EXP(b)-err) */
208      cc2 = mpn_add_1 (tmp + bn - k, bp + bn - k, k, MPFR_LIMB_ONE << s);
209      break;
210    case MPFR_RNDN:
211      /* Round to nearest */
212       /* first round b+2^(MPFR_EXP(b)-err) */
213      cc = mpn_add_1 (tmp + bn - k, bp + bn - k, k, MPFR_LIMB_ONE << s);
214      cc = (tmp[bn - 1] >> s1) & 1; /* gives 0 when cc=1 */
215      cc ^= mpfr_round_raw2 (tmp, bn, neg, rnd2, prec);
216      /* now round b-2^(MPFR_EXP(b)-err) */
217      cc2 = mpn_sub_1 (tmp + bn - k, bp + bn - k, k, MPFR_LIMB_ONE << s);
218      break;
219    default:
220      /* Round away */
221      cc = (bp[bn - 1] >> s1) & 1;
222      cc ^= mpfr_round_raw2 (bp, bn, neg, rnd2, prec);
223      /* now round b +/- 2^(MPFR_EXP(b)-err) */
224      cc2 = mpn_sub_1 (tmp + bn - k, bp + bn - k, k, MPFR_LIMB_ONE << s);
225      break;
226    }
227
228  /* if cc2 is 1, then a carry or borrow propagates to the next limb */
229  if (cc2 && cc)
230    {
231      MPFR_TMP_FREE(marker);
232      return 0;
233    }
234
235  cc2 = (tmp[bn - 1] >> s1) & 1;
236  cc2 ^= mpfr_round_raw2 (tmp, bn, neg, rnd2, prec);
237
238  MPFR_TMP_FREE(marker);
239  return cc == cc2;
240}
241