Lines Matching defs:n*

25 along with the GNU MP Library.  If not, see http://www.gnu.org/licenses/.  */
33 Asymptotically fast algorithms for the numerical multiplication and division
45 It might be possible to avoid a small number of MPN_COPYs by using a
74 /* Find the best k to use for a mod 2^(m*GMP_NUMB_BITS)+1 FFT for m >= n.
77 some gmp-mparam.h is not updated. */
105 mpn_fft_best_k (mp_size_t n, int sqr)
115 tab_n = tab->n;
117 if (n <= thres)
137 mpn_fft_best_k (mp_size_t n, int sqr)
142 if (n < mpn_fft_table[sqr][i])
146 if (i == 0 || n < 4 * mpn_fft_table[sqr][i - 1])
156 /* Returns smallest possible number of limbs >= pl for a fft of size 2^k,
159 Don't declare static: needed by tuneup.
190 /* r <- a*2^d mod 2^(n*GMP_NUMB_BITS)+1 with a = {a, n+1}
191 Assumes a is semi-normalized, i.e. a[n] <= 1.
192 r and a must have n+1 limbs, and not overlap.
195 mpn_fft_mul_2exp_modF (mp_ptr r, mp_srcptr a, unsigned int d, mp_size_t n)
203 if (d >= n) /* negate */
205 /* r[0..d-1] <-- lshift(a[n-d]..a[n-1], sh)
206 r[d..n-1] <-- -lshift(a[0]..a[n-d-1], sh) */
208 d -= n;
211 /* no out shift below since a[n] <= 1 */
212 mpn_lshift (r, a + n - d, d + 1, sh);
214 cc = mpn_lshiftc (r + d, a, n - d, sh);
218 MPN_COPY (r, a + n - d, d);
219 rd = a[n];
220 mpn_com (r + d, a, n - d);
226 /* now add 1 in r[d], subtract 1 in r[n], i.e. add 1 in r[0] */
228 r[n] = 0;
229 /* cc < 2^sh <= 2^(GMP_NUMB_BITS-1) thus no overflow here */
241 /* r[0..d-1] <-- -lshift(a[n-d]..a[n-1], sh)
242 r[d..n-1] <-- lshift(a[0]..a[n-d-1], sh) */
245 /* no out bits below since a[n] <= 1 */
246 mpn_lshiftc (r, a + n - d, d + 1, sh);
248 /* {r, d+1} = {a+n-d, d+1} << sh */
249 cc = mpn_lshift (r + d, a, n - d, sh); /* {r+d, n-d} = {a, n-d}<<sh */
253 /* r[d] is not used below, but we save a test for d=0 */
254 mpn_com (r, a + n - d, d + 1);
255 rd = a[n];
256 MPN_COPY (r + d, a, n - d);
260 /* now complement {r, d}, subtract cc from r[0], subtract rd from r[d] */
262 /* if d=0 we just have r[0]=a[n] << sh */
265 /* now add 1 in r[0], subtract 1 in r[d] */
267 cc = mpn_add_1 (r, r, n, CNST_LIMB(1));
272 /* now subtract cc and rd from r[d..n] */
274 r[n] = -mpn_sub_1 (r + d, r + d, n - d, cc);
275 r[n] -= mpn_sub_1 (r + d, r + d, n - d, rd);
276 if (r[n] & GMP_LIMB_HIGHBIT)
277 r[n] = mpn_add_1 (r, r, n, CNST_LIMB(1));
282 /* r <- a+b mod 2^(n*GMP_NUMB_BITS)+1.
283 Assumes a and b are semi-normalized.
286 mpn_fft_add_modF (mp_ptr r, mp_srcptr a, mp_srcptr b, int n)
290 c = a[n] + b[n] + mpn_add_n (r, a, b, n);
297 r[n] = c - x;
298 MPN_DECR_U (r, n + 1, x);
303 r[n] = 1; /* r[n] - c = 1 */
304 MPN_DECR_U (r, n + 1, c - 1);
308 r[n] = c;
313 /* r <- a-b mod 2^(n*GMP_NUMB_BITS)+1.
314 Assumes a and b are semi-normalized.
317 mpn_fft_sub_modF (mp_ptr r, mp_srcptr a, mp_srcptr b, int n)
321 c = a[n] - b[n] - mpn_sub_n (r, a, b, n);
328 r[n] = x + c;
329 MPN_INCR_U (r, n + 1, x);
334 r[n] = 0;
335 MPN_INCR_U (r, n + 1, -c);
339 r[n] = c;
345 N=n*GMP_NUMB_BITS, and 2^omega is a primitive root mod 2^N+1
350 mp_size_t omega, mp_size_t n, mp_size_t inc, mp_ptr tp)
356 cy = mpn_add_n_sub_n (Ap[0], Ap[inc], Ap[0], Ap[inc], n + 1) & 1;
358 MPN_COPY (tp, Ap[0], n + 1);
359 mpn_add_n (Ap[0], Ap[0], Ap[inc], n + 1);
360 cy = mpn_sub_n (Ap[inc], tp, Ap[inc], n + 1);
362 if (Ap[0][n] > 1) /* can be 2 or 3 */
363 Ap[0][n] = 1 - mpn_sub_1 (Ap[0], Ap[0], n, Ap[0][n] - 1);
364 if (cy) /* Ap[inc][n] can be -1 or -2 */
365 Ap[inc][n] = mpn_add_1 (Ap[inc], Ap[inc], n, ~Ap[inc][n] + 1);
372 mpn_fft_fft (Ap, K >> 1, ll-1, 2 * omega, n, inc * 2, tp);
373 mpn_fft_fft (Ap+inc, K >> 1, ll-1, 2 * omega, n, inc * 2, tp);
380 mpn_fft_mul_2exp_modF (tp, Ap[inc], lk[0] * omega, n);
381 mpn_fft_sub_modF (Ap[inc], Ap[0], tp, n);
382 mpn_fft_add_modF (Ap[0], Ap[0], tp, n);
388 N=n*GMP_NUMB_BITS, and 2^omega is a primitive root mod 2^N+1
390 tp must have space for 2*(n+1) limbs.
394 /* Given ap[0..n] with ap[n]<=1, reduce it modulo 2^(n*GMP_NUMB_BITS)+1,
395 by subtracting that modulus if necessary.
397 If ap[0..n] is exactly 2^(n*GMP_NUMB_BITS) then mpn_sub_1 produces a
402 mpn_fft_normalize (mp_ptr ap, mp_size_t n)
404 if (ap[n] != 0)
406 MPN_DECR_U (ap, n + 1, CNST_LIMB(1));
407 if (ap[n] == 0)
411 MPN_ZERO (ap, n);
412 ap[n] = 1;
415 ap[n] = 0;
419 /* a[i] <- a[i]*b[i] mod 2^(n*GMP_NUMB_BITS)+1 for 0 <= i < K */
421 mpn_fft_mul_modF_K (mp_ptr *ap, mp_ptr *bp, mp_size_t n, int K)
429 if (n >= (sqr ? SQR_FFT_MODF_THRESHOLD : MUL_FFT_MODF_THRESHOLD))
431 int k, K2, nprime2, Nprime2, M2, maxLK, l, Mp2;
435 k = mpn_fft_best_k (n, sqr);
437 ASSERT_ALWAYS((n & (K2 - 1)) == 0);
439 M2 = n * GMP_NUMB_BITS >> k;
440 l = n >> k;
443 nprime2 = Nprime2 / GMP_NUMB_BITS;
445 /* we should ensure that nprime2 is a multiple of the next K */
446 if (nprime2 >= (sqr ? SQR_FFT_MODF_THRESHOLD : MUL_FFT_MODF_THRESHOLD))
451 K3 = 1L << mpn_fft_best_k (nprime2, sqr);
452 if ((nprime2 & (K3 - 1)) == 0)
454 nprime2 = (nprime2 + K3 - 1) & -K3;
455 Nprime2 = nprime2 * GMP_LIMB_BITS;
456 /* warning: since nprime2 changed, K3 may change too! */
459 ASSERT_ALWAYS(nprime2 < n); /* otherwise we'll loop */
465 A = TMP_ALLOC_LIMBS (2 * (nprime2 + 1) << k);
466 T = TMP_ALLOC_LIMBS (2 * (nprime2 + 1));
467 B = A + ((nprime2 + 1) << k);
473 TRACE (printf ("recurse: %ldx%ld limbs -> %d times %dx%d (%1.2f)\n", n,
474 n, K2, nprime2, nprime2, 2.0*(double)n/nprime2/K2));
478 mpn_fft_normalize (*ap, n);
480 mpn_fft_normalize (*bp, n);
482 mpn_mul_fft_decompose (A, Ap, K2, nprime2, *ap, (l << k) + 1, l, Mp2, T);
484 mpn_mul_fft_decompose (B, Bp, K2, nprime2, *bp, (l << k) + 1, l, Mp2, T);
486 cy = mpn_mul_fft_internal (*ap, n, k, Ap, Bp, A, B, nprime2,
488 (*ap)[n] = cy;
495 int n2 = 2 * n;
496 tp = TMP_ALLOC_LIMBS (n2);
497 tpn = tp + n;
498 TRACE (printf (" mpn_mul_n %d of %ld limbs\n", K, n));
504 mpn_sqr (tp, a, n);
506 mpn_mul_n (tp, b, a, n);
507 if (a[n] != 0)
508 cc = mpn_add_n (tpn, tpn, b, n);
511 if (b[n] != 0)
512 cc += mpn_add_n (tpn, tpn, a, n) + a[n];
515 /* FIXME: use MPN_INCR_U here, since carry is not expected. */
516 cc = mpn_add_1 (tp, tp, n2, cc);
519 a[n] = mpn_sub_n (a, tp, tpn, n) && mpn_add_1 (a, a, n, CNST_LIMB(1));
528 Assumes the Ap[] are pseudo-normalized, i.e. 0 <= Ap[][n] <= 1.
532 mpn_fft_fftinv (mp_ptr *Ap, int K, mp_size_t omega, mp_size_t n, mp_ptr tp)
538 cy = mpn_add_n_sub_n (Ap[0], Ap[1], Ap[0], Ap[1], n + 1) & 1;
540 MPN_COPY (tp, Ap[0], n + 1);
541 mpn_add_n (Ap[0], Ap[0], Ap[1], n + 1);
542 cy = mpn_sub_n (Ap[1], tp, Ap[1], n + 1);
544 if (Ap[0][n] > 1) /* can be 2 or 3 */
545 Ap[0][n] = 1 - mpn_sub_1 (Ap[0], Ap[0], n, Ap[0][n] - 1);
546 if (cy) /* Ap[1][n] can be -1 or -2 */
547 Ap[1][n] = mpn_add_1 (Ap[1], Ap[1], n, ~Ap[1][n] + 1);
553 mpn_fft_fftinv (Ap, K2, 2 * omega, n, tp);
554 mpn_fft_fftinv (Ap + K2, K2, 2 * omega, n, tp);
561 mpn_fft_mul_2exp_modF (tp, Ap[K2], j * omega, n);
562 mpn_fft_sub_modF (Ap[K2], Ap[0], tp, n);
563 mpn_fft_add_modF (Ap[0], Ap[0], tp, n);
569 /* R <- A/2^k mod 2^(n*GMP_NUMB_BITS)+1 */
571 mpn_fft_div_2exp_modF (mp_ptr r, mp_srcptr a, int k, mp_size_t n)
576 i = 2 * n * GMP_NUMB_BITS - k;
577 mpn_fft_mul_2exp_modF (r, a, i, n);
578 /* 1/2^k = 2^(2nL-k) mod 2^(n*GMP_NUMB_BITS)+1 */
579 /* normalize so that R < 2^(n*GMP_NUMB_BITS)+1 */
580 mpn_fft_normalize (r, n);
584 /* {rp,n} <- {ap,an} mod 2^(n*GMP_NUMB_BITS)+1, n <= an <= 3*n.
585 Returns carry out, i.e. 1 iff {ap,an} = -1 mod 2^(n*GMP_NUMB_BITS)+1,
586 then {rp,n}=0.
589 mpn_fft_norm_modF (mp_ptr rp, mp_size_t n, mp_ptr ap, mp_size_t an)
596 ASSERT ((n <= an) && (an <= 3 * n));
597 m = an - 2 * n;
600 l = n;
601 /* add {ap, m} and {ap+2n, m} in {rp, m} */
602 cc = mpn_add_n (rp, ap, ap + 2 * n, m);
603 /* copy {ap+m, n-m} to {rp+m, n-m} */
604 rpn = mpn_add_1 (rp + m, ap + m, n - m, cc);
608 l = an - n; /* l <= n */
609 MPN_COPY (rp, ap, n);
613 /* remains to subtract {ap+n, l} from {rp, n+1} */
614 cc = mpn_sub_n (rp, rp, ap + n, l);
615 rpn -= mpn_sub_1 (rp + l, rp + l, n - l, cc);
616 if (rpn < 0) /* necessarily rpn = -1 */
617 rpn = mpn_add_1 (rp, rp, n, CNST_LIMB(1));
621 /* store in A[0..nprime] the first M bits from {n, nl},
622 in A[nprime+1..] the following M bits, ...
624 T must have space for at least (nprime + 1) limbs.
625 We must have nl <= 2*K*l.
628 mpn_mul_fft_decompose (mp_ptr A, mp_ptr *Ap, int K, int nprime, mp_srcptr n,
629 mp_size_t nl, int l, int Mp, mp_ptr T)
637 if (nl > Kl) /* normalize {n, nl} mod 2^(Kl*GMP_NUMB_BITS)+1 */
639 mp_size_t dif = nl - Kl;
648 cy = mpn_sub_n (tmp, n, n + Kl, Kl);
649 n += 2 * Kl;
652 /* now dif > 0 */
656 cy += mpn_sub_n (tmp, tmp, n, Kl);
658 cy -= mpn_add_n (tmp, tmp, n, Kl);
660 n += Kl;
663 /* now dif <= Kl */
665 cy += mpn_sub (tmp, tmp, Kl, n, dif);
667 cy -= mpn_add (tmp, tmp, Kl, n, dif);
673 else /* dif <= Kl, i.e. nl <= 2 * Kl */
675 cy = mpn_sub (tmp, n, Kl, n + Kl, dif);
679 nl = Kl + 1;
680 n = tmp;
685 /* store the next M bits of n into A[0..nprime] */
686 if (nl > 0) /* nl is the number of remaining limbs */
688 j = (l <= nl && i < K - 1) ? l : nl; /* store j next limbs */
689 nl -= j;
690 MPN_COPY (T, n, j);
691 MPN_ZERO (T + j, nprime + 1 - j);
692 n += l;
693 mpn_fft_mul_2exp_modF (A, T, i * Mp, nprime);
696 MPN_ZERO (A, nprime + 1);
697 A += nprime + 1;
699 ASSERT_ALWAYS (nl == 0);
703 /* op <- n*m mod 2^N+1 with fft of size 2^k where N=pl*GMP_NUMB_BITS
706 T must have space for 2 * (nprime + 1) limbs.
712 mp_size_t nprime, mp_size_t l, mp_size_t Mp,
722 mpn_fft_fft (Ap, K, fft_l + k, 2 * Mp, nprime, 1, T);
724 mpn_fft_fft (Bp, K, fft_l + k, 2 * Mp, nprime, 1, T);
727 mpn_fft_mul_modF_K (Ap, sqr ? Ap : Bp, nprime, K);
730 mpn_fft_fftinv (Ap, K, 2 * Mp, nprime, T);
733 Bp[0] = T + nprime + 1;
734 mpn_fft_div_2exp_modF (Bp[0], Ap[0], k, nprime);
738 mpn_fft_div_2exp_modF (Bp[i], Ap[i], k + (K - i) * Mp, nprime);
742 MPN_ZERO (T, nprime + 1);
743 pla = l * (K - 1) + nprime + 1; /* number of required limbs for p */
744 p = B; /* B has K*(n' + 1) limbs, which is >= pla, i.e. enough */
747 for (i = K - 1, lo = l * i + nprime,sh = l * i; i >= 0; i--,lo -= l,sh -= l)
749 mp_ptr n = p + sh;
753 if (mpn_add_n (n, n, Bp[j], nprime + 1))
754 cc += mpn_add_1 (n + nprime + 1, n + nprime + 1,
755 pla - sh - nprime - 1, CNST_LIMB(1));
757 if (mpn_cmp (Bp[j], T, nprime + 1) > 0)
759 cc -= mpn_sub_1 (n, n, pla - sh, CNST_LIMB(1));
811 mp_srcptr n, mp_size_t nl,
816 mp_size_t N, Nprime, nprime, M, Mp, l;
819 int sqr = (n == m && nl == ml);
823 TRACE (printf ("\nmpn_mul_fft pl=%ld nl=%ld ml=%ld k=%d\n", pl, nl, ml, k));
839 nprime = Nprime / GMP_NUMB_BITS;
840 TRACE (printf ("N=%ld K=%d, M=%ld, l=%ld, maxLK=%d, Np=%ld, np=%ld\n",
841 N, K, M, l, maxLK, Nprime, nprime));
842 /* we should ensure that recursively, nprime is a multiple of the next K */
843 if (nprime >= (sqr ? SQR_FFT_MODF_THRESHOLD : MUL_FFT_MODF_THRESHOLD))
848 K2 = 1L << mpn_fft_best_k (nprime, sqr);
849 if ((nprime & (K2 - 1)) == 0)
851 nprime = (nprime + K2 - 1) & -K2;
852 Nprime = nprime * GMP_LIMB_BITS;
853 /* warning: since nprime changed, K2 may change too! */
855 TRACE (printf ("new maxLK=%d, Np=%ld, np=%ld\n", maxLK, Nprime, nprime));
857 ASSERT_ALWAYS (nprime < pl); /* otherwise we'll loop */
859 T = TMP_ALLOC_LIMBS (2 * (nprime + 1));
862 TRACE (printf ("%ldx%ld limbs -> %d times %ldx%ld limbs (%1.2f)\n",
863 pl, pl, K, nprime, nprime, 2.0 * (double) N / Nprime / K);
864 printf (" temp space %ld\n", 2 * K * (nprime + 1)));
866 A = TMP_ALLOC_LIMBS (K * (nprime + 1));
868 mpn_mul_fft_decompose (A, Ap, K, nprime, n, nl, l, Mp, T);
872 pla = l * (K - 1) + nprime + 1; /* number of required limbs for p */
878 B = TMP_ALLOC_LIMBS (K * (nprime + 1));
880 mpn_mul_fft_decompose (B, Bp, K, nprime, m, ml, l, Mp, T);
882 h = mpn_mul_fft_internal (op, pl, k, Ap, Bp, A, B, nprime, l, Mp, fft_l, T, sqr);
889 /* multiply {n, nl} by {m, ml}, and put the result in {op, nl+ml} */
892 mp_srcptr n, mp_size_t nl,
898 int sqr = (n == m && nl == ml);
901 pl = nl + ml; /* total number of limbs of the result */
908 We need that consecutive intervals overlap, i.e. 5*j >= 3*(j+1),
925 TRACE (printf ("mpn_mul_fft_full nl=%ld ml=%ld -> pl2=%ld pl3=%ld k=%d\n",
926 nl, ml, pl2, pl3, k2));
929 cc = mpn_mul_fft (op, pl3, n, nl, m, ml, k3); /* mu */
932 cc = mpn_mul_fft (pad_op, pl2, n, nl, m, ml, k2); /* lambda */
943 /* now lambda-mu = {pad_op, pl2} - cc mod 2^(pl2*GMP_NUMB_BITS)+1 */
964 /* first normalize {pad_op, pl2} before dividing by 2: c2 is the borrow
971 /* now -1 <= cc <= 0 */
974 /* now {pad_op, pl2} is normalized, with 0 <= cc <= 1 */
977 /* now 0 <= cc <= 2, but cc=2 cannot occur since it would give a carry
982 /* now {pad_op,pl2}-cc = (lambda-mu)/(1-2^(l*GMP_NUMB_BITS))
984 c2 = mpn_add_n (op, op, pad_op, pl2); /* no need to add cc (is 0) */
985 /* since pl2+pl3 >= pl, necessary the extra limbs (including cc) are zero */
989 /* since the final result has at most pl limbs, no carry out below */