mul_2exp.c revision 1.1.1.2
1/* mpf_mul_2exp -- Multiply a float by 2^n.
2
3Copyright 1993, 1994, 1996, 2000-2002, 2004 Free Software Foundation, Inc.
4
5This file is part of the GNU MP Library.
6
7The GNU MP Library is free software; you can redistribute it and/or modify
8it under the terms of either:
9
10  * the GNU Lesser General Public License as published by the Free
11    Software Foundation; either version 3 of the License, or (at your
12    option) any later version.
13
14or
15
16  * the GNU General Public License as published by the Free Software
17    Foundation; either version 2 of the License, or (at your option) any
18    later version.
19
20or both in parallel, as here.
21
22The GNU MP Library is distributed in the hope that it will be useful, but
23WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
24or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
25for more details.
26
27You should have received copies of the GNU General Public License and the
28GNU Lesser General Public License along with the GNU MP Library.  If not,
29see https://www.gnu.org/licenses/.  */
30
31#include "gmp.h"
32#include "gmp-impl.h"
33
34
35/* Multiples of GMP_NUMB_BITS in exp simply mean an amount added to EXP(u)
36   to set EXP(r).  The remainder exp%GMP_NUMB_BITS is then a left shift for
37   the limb data.
38
39   If exp%GMP_NUMB_BITS == 0 then there's no shifting, we effectively just
40   do an mpz_set with changed EXP(r).  Like mpz_set we take prec+1 limbs in
41   this case.  Although just prec would suffice, it's nice to have
42   mpf_mul_2exp with exp==0 come out the same as mpz_set.
43
44   When shifting we take up to prec many limbs from the input.  Our shift is
45   cy = mpn_lshift (PTR(r), PTR(u)+k, size, ...), where k is the number of
46   low limbs dropped from u, and the carry out is stored to PTR(r)[size].
47
48   It may be noted that the low limb PTR(r)[0] doesn't incorporate bits from
49   PTR(u)[k-1] (when k>=1 makes that limb available).  Taking just prec
50   limbs from the input (with the high non-zero) is enough bits for the
51   application requested precision, there's no need for extra work.
52
53   If r==u the shift will have overlapping operands.  When k==0 (ie. when
54   usize <= prec), the overlap is supported by lshift (ie. dst == src).
55
56   But when r==u and k>=1 (ie. usize > prec), we would have an invalid
57   overlap (ie. mpn_lshift (rp, rp+k, ...)).  In this case we must instead
58   use mpn_rshift (PTR(r)+1, PTR(u)+k, size, NUMB-shift) with the carry out
59   stored to PTR(r)[0].  An rshift by NUMB-shift bits like this gives
60   identical data, it's just its overlap restrictions which differ.
61
62   Enhancements:
63
64   The way mpn_lshift is used means successive mpf_mul_2exp calls on the
65   same operand will accumulate low zero limbs, until prec+1 limbs is
66   reached.  This is wasteful for subsequent operations.  When abs_usize <=
67   prec, we should test the low exp%GMP_NUMB_BITS many bits of PTR(u)[0],
68   ie. those which would be shifted out by an mpn_rshift.  If they're zero
69   then use that mpn_rshift.  */
70
71void
72mpf_mul_2exp (mpf_ptr r, mpf_srcptr u, mp_bitcnt_t exp)
73{
74  mp_srcptr up;
75  mp_ptr rp = r->_mp_d;
76  mp_size_t usize;
77  mp_size_t abs_usize;
78  mp_size_t prec = r->_mp_prec;
79  mp_exp_t uexp = u->_mp_exp;
80
81  usize = u->_mp_size;
82
83  if (UNLIKELY (usize == 0))
84    {
85      r->_mp_size = 0;
86      r->_mp_exp = 0;
87      return;
88    }
89
90  abs_usize = ABS (usize);
91  up = u->_mp_d;
92
93  if (exp % GMP_NUMB_BITS == 0)
94    {
95      prec++;			/* retain more precision here as we don't need
96				   to account for carry-out here */
97      if (abs_usize > prec)
98	{
99	  up += abs_usize - prec;
100	  abs_usize = prec;
101	}
102      if (rp != up)
103	MPN_COPY_INCR (rp, up, abs_usize);
104      r->_mp_exp = uexp + exp / GMP_NUMB_BITS;
105    }
106  else
107    {
108      mp_limb_t cy_limb;
109      mp_size_t adj;
110      if (abs_usize > prec)
111	{
112	  up += abs_usize - prec;
113	  abs_usize = prec;
114	  /* Use mpn_rshift since mpn_lshift operates downwards, and we
115	     therefore would clobber part of U before using that part, in case
116	     R is the same variable as U.  */
117	  cy_limb = mpn_rshift (rp + 1, up, abs_usize,
118				GMP_NUMB_BITS - exp % GMP_NUMB_BITS);
119	  rp[0] = cy_limb;
120	  adj = rp[abs_usize] != 0;
121	}
122      else
123	{
124	  cy_limb = mpn_lshift (rp, up, abs_usize, exp % GMP_NUMB_BITS);
125	  rp[abs_usize] = cy_limb;
126	  adj = cy_limb != 0;
127	}
128
129      abs_usize += adj;
130      r->_mp_exp = uexp + exp / GMP_NUMB_BITS + adj;
131    }
132  r->_mp_size = usize >= 0 ? abs_usize : -abs_usize;
133}
134