Lines Matching refs:cc

208   mp_limb_t cc, rd;
224 cc = mpn_lshiftc (r + m, a, n - m, sh);
231 cc = 0;
234 /* add cc to r[0], and add rd to r[m] */
239 /* cc < 2^sh <= 2^(GMP_NUMB_BITS-1) thus no overflow here */
240 cc++;
241 mpn_incr_u (r, cc);
245 cc = (rd == 0) ? 1 : rd;
247 mpn_incr_u (r, cc);
259 cc = mpn_lshift (r + m, a, n - m, sh); /* {r+m, n-m} = {a, n-m}<<sh */
267 cc = 0;
270 /* now complement {r, m}, subtract cc from r[0], subtract rd from r[m] */
276 if (cc-- == 0) /* then add 1 to r[0] */
277 cc = mpn_add_1 (r, r, n, CNST_LIMB(1));
278 cc = mpn_sub_1 (r, r, m, cc) + 1;
279 /* add 1 to cc instead of rd since rd might overflow */
282 /* now subtract cc and rd from r[m..n] */
284 r[n] = -mpn_sub_1 (r + m, r + m, n - m, cc);
535 mp_limb_t cc;
549 cc = mpn_add_n (tpn, tpn, b, n);
551 cc = 0;
553 cc += mpn_add_n (tpn, tpn, a, n) + a[n];
554 if (cc != 0)
556 cc = mpn_add_1 (tp, tp, n2, cc);
557 /* If mpn_add_1 give a carry (cc != 0),
561 tp[0] += cc;
640 mp_limb_t cc;
648 cc = mpn_add_n (rp, ap, ap + 2 * n, m);
650 rpn = mpn_add_1 (rp + m, ap + m, n - m, cc);
660 cc = mpn_sub_n (rp, rp, ap + n, l);
661 rpn -= mpn_sub_1 (rp + l, rp + l, n - l, cc);
764 mp_limb_t cc;
793 cc = 0; /* will accumulate the (signed) carry at p[pla] */
801 cc += mpn_add_1 (n + nprime + 1, n + nprime + 1,
806 cc -= mpn_sub_1 (n, n, pla - sh, CNST_LIMB(1));
807 cc -= mpn_sub_1 (p + lo, p + lo, pla - lo, CNST_LIMB(1));
810 if (cc == -CNST_LIMB(1))
812 if ((cc = mpn_add_1 (p + pla - pl, p + pla - pl, pl, CNST_LIMB(1))))
819 else if (cc == 1)
823 while ((cc = mpn_add_1 (p + pla - 2 * pl, p + pla - 2 * pl, 2 * pl, cc)))
828 cc = mpn_sub_1 (p + pla - pl, p + pla - pl, pl, cc);
829 ASSERT (cc == 0);
833 ASSERT (cc == 0);
950 mp_size_t cc, c2, oldcc;
982 cc = mpn_mul_fft (op, pl3, n, nl, m, ml, k3); /* mu */
983 ASSERT(cc == 0);
985 cc = mpn_mul_fft (pad_op, pl2, n, nl, m, ml, k2); /* lambda */
986 cc = -cc + mpn_sub_n (pad_op, pad_op, op, pl2); /* lambda - low(mu) */
987 /* 0 <= cc <= 1 */
988 ASSERT(0 <= cc && cc <= 1);
991 cc = mpn_add_1 (pad_op + l, pad_op + l, l, (mp_limb_t) c2) - cc;
992 ASSERT(-1 <= cc && cc <= 1);
993 if (cc < 0)
994 cc = mpn_add_1 (pad_op, pad_op, pl2, (mp_limb_t) -cc);
995 ASSERT(0 <= cc && cc <= 1);
996 /* now lambda-mu = {pad_op, pl2} - cc mod 2^(pl2*GMP_NUMB_BITS)+1 */
997 oldcc = cc;
1000 cc += c2 >> 1; /* carry out from high <- low + high */
1011 cc += mpn_add_n (pad_op + l, tmp, pad_op + l, l);
1017 at pad_op + l, cc is the carry at pad_op + pl2 */
1018 /* 0 <= cc <= 2 */
1019 cc -= mpn_sub_1 (pad_op + l, pad_op + l, l, (mp_limb_t) c2);
1020 /* -1 <= cc <= 2 */
1021 if (cc > 0)
1022 cc = -mpn_sub_1 (pad_op, pad_op, pl2, (mp_limb_t) cc);
1023 /* now -1 <= cc <= 0 */
1024 if (cc < 0)
1025 cc = mpn_add_1 (pad_op, pad_op, pl2, (mp_limb_t) -cc);
1026 /* now {pad_op, pl2} is normalized, with 0 <= cc <= 1 */
1028 cc += 1 + mpn_add_1 (pad_op, pad_op, pl2, CNST_LIMB(1));
1029 /* now 0 <= cc <= 2, but cc=2 cannot occur since it would give a carry
1032 if (cc) /* then cc=1 */
1034 /* now {pad_op,pl2}-cc = (lambda-mu)/(1-2^(l*GMP_NUMB_BITS))
1036 c2 = mpn_add_n (op, op, pad_op, pl2); /* no need to add cc (is 0) */
1037 /* since pl2+pl3 >= pl, necessary the extra limbs (including cc) are zero */