1/* mpn_addmul_1 -- multiply the N long limb vector pointed to by UP by VL,
2   add the N least significant limbs of the product to the limb vector
3   pointed to by RP.  Return the most significant limb of the product,
4   adjusted for carry-out from the addition.
5
6Copyright 1992, 1993, 1994, 1996, 2000, 2002, 2004 Free Software Foundation,
7Inc.
8
9This file is part of the GNU MP Library.
10
11The GNU MP Library is free software; you can redistribute it and/or modify
12it under the terms of the GNU Lesser General Public License as published by
13the Free Software Foundation; either version 3 of the License, or (at your
14option) any later version.
15
16The GNU MP Library is distributed in the hope that it will be useful, but
17WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
18or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
19License for more details.
20
21You should have received a copy of the GNU Lesser General Public License
22along with the GNU MP Library.  If not, see http://www.gnu.org/licenses/.  */
23
24#include "gmp.h"
25#include "gmp-impl.h"
26#include "longlong.h"
27
28
29#if GMP_NAIL_BITS == 0
30
31mp_limb_t
32mpn_addmul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl)
33{
34  mp_limb_t ul, cl, hpl, lpl, rl;
35
36  ASSERT (n >= 1);
37  ASSERT (MPN_SAME_OR_SEPARATE_P (rp, up, n));
38
39  cl = 0;
40  do
41    {
42      ul = *up++;
43      umul_ppmm (hpl, lpl, ul, vl);
44
45      lpl += cl;
46      cl = (lpl < cl) + hpl;
47
48      rl = *rp;
49      lpl = rl + lpl;
50      cl += lpl < rl;
51      *rp++ = lpl;
52    }
53  while (--n != 0);
54
55  return cl;
56}
57
58#endif
59
60#if GMP_NAIL_BITS == 1
61
62mp_limb_t
63mpn_addmul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl)
64{
65  mp_limb_t shifted_vl, ul, rl, lpl, hpl, prev_hpl, cl, xl, c1, c2, c3;
66
67  ASSERT (n >= 1);
68  ASSERT (MPN_SAME_OR_SEPARATE_P (rp, up, n));
69  ASSERT_MPN (rp, n);
70  ASSERT_MPN (up, n);
71  ASSERT_LIMB (vl);
72
73  shifted_vl = vl << GMP_NAIL_BITS;
74  cl = 0;
75  prev_hpl = 0;
76  do
77    {
78      ul = *up++;
79      rl = *rp;
80      umul_ppmm (hpl, lpl, ul, shifted_vl);
81      lpl >>= GMP_NAIL_BITS;
82      ADDC_LIMB (c1, xl, prev_hpl, lpl);
83      ADDC_LIMB (c2, xl, xl, rl);
84      ADDC_LIMB (c3, xl, xl, cl);
85      cl = c1 + c2 + c3;
86      *rp++ = xl;
87      prev_hpl = hpl;
88    }
89  while (--n != 0);
90
91  return prev_hpl + cl;
92}
93
94#endif
95
96#if GMP_NAIL_BITS >= 2
97
98mp_limb_t
99mpn_addmul_1 (mp_ptr rp, mp_srcptr up, mp_size_t n, mp_limb_t vl)
100{
101  mp_limb_t shifted_vl, ul, rl, lpl, hpl, prev_hpl, xw, cl, xl;
102
103  ASSERT (n >= 1);
104  ASSERT (MPN_SAME_OR_SEPARATE_P (rp, up, n));
105  ASSERT_MPN (rp, n);
106  ASSERT_MPN (up, n);
107  ASSERT_LIMB (vl);
108
109  shifted_vl = vl << GMP_NAIL_BITS;
110  cl = 0;
111  prev_hpl = 0;
112  do
113    {
114      ul = *up++;
115      rl = *rp;
116      umul_ppmm (hpl, lpl, ul, shifted_vl);
117      lpl >>= GMP_NAIL_BITS;
118      xw = prev_hpl + lpl + rl + cl;
119      cl = xw >> GMP_NUMB_BITS;
120      xl = xw & GMP_NUMB_MASK;
121      *rp++ = xl;
122      prev_hpl = hpl;
123    }
124  while (--n != 0);
125
126  return prev_hpl + cl;
127}
128
129#endif
130