Lines Matching refs:result

233 /* Computes result = in << c, returning carry. Can modify in place
234 * (if result == in). 0 < shift < 64.
236 static u64 vli_lshift(u64 *result, const u64 *in, unsigned int shift,
245 result[i] = (temp << shift) | carry;
267 /* Computes result = left + right, returning carry. Can modify in place. */
268 static u64 vli_add(u64 *result, const u64 *left, const u64 *right,
281 result[i] = sum;
287 /* Computes result = left + right, returning carry. Can modify in place. */
288 static u64 vli_uadd(u64 *result, const u64 *left, u64 right,
303 result[i] = sum;
309 /* Computes result = left - right, returning borrow. Can modify in place. */
310 u64 vli_sub(u64 *result, const u64 *left, const u64 *right,
323 result[i] = diff;
330 /* Computes result = left - right, returning borrow. Can modify in place. */
331 static u64 vli_usub(u64 *result, const u64 *left, u64 right,
344 result[i] = diff;
352 uint128_t result;
356 result.m_low = m;
357 result.m_high = m >> 64;
375 result.m_low = (m0 & 0xffffffffull) | (m2 << 32);
376 result.m_high = m3 + (m2 >> 32);
378 return result;
383 uint128_t result;
385 result.m_low = a.m_low + b.m_low;
386 result.m_high = a.m_high + b.m_high + (result.m_low < a.m_low);
388 return result;
391 static void vli_mult(u64 *result, const u64 *left, const u64 *right,
398 /* Compute each digit of result in sequence, maintaining the
418 result[k] = r01.m_low;
424 result[ndigits * 2 - 1] = r01.m_low;
428 static void vli_umult(u64 *result, const u64 *left, u32 right,
440 result[k] = r01.m_low;
444 result[k] = r01.m_low;
446 result[k] = 0;
449 static void vli_square(u64 *result, const u64 *left, unsigned int ndigits)
479 result[k] = r01.m_low;
485 result[ndigits * 2 - 1] = r01.m_low;
488 /* Computes result = (left + right) % mod.
489 * Assumes that left < mod and right < mod, result != mod.
491 static void vli_mod_add(u64 *result, const u64 *left, const u64 *right,
496 carry = vli_add(result, left, right, ndigits);
498 /* result > mod (result = mod + remainder), so subtract mod to
501 if (carry || vli_cmp(result, mod, ndigits) >= 0)
502 vli_sub(result, result, mod, ndigits);
505 /* Computes result = (left - right) % mod.
506 * Assumes that left < mod and right < mod, result != mod.
508 static void vli_mod_sub(u64 *result, const u64 *left, const u64 *right,
511 u64 borrow = vli_sub(result, left, right, ndigits);
514 * Since -x % d == d - x, we can get the correct result from
515 * result + mod (with overflow).
518 vli_add(result, result, mod, ndigits);
522 * Computes result = product % mod
530 static void vli_mmod_special(u64 *result, const u64 *product,
547 vli_set(result, r, ndigits);
551 * Computes result = product % mod
564 static void vli_mmod_special2(u64 *result, const u64 *product,
605 vli_set(result, r, ndigits);
609 * Computes result = product % mod, where product is 2N words long.
613 static void vli_mmod_slow(u64 *result, u64 *product, const u64 *mod,
651 vli_set(result, v[i], ndigits);
654 /* Computes result = product % mod using Barrett's reduction with precomputed
663 static void vli_mmod_barrett(u64 *result, u64 *product, const u64 *mod,
682 vli_set(result, r, ndigits);
689 static void vli_mmod_fast_192(u64 *result, const u64 *product,
695 vli_set(result, product, ndigits);
698 carry = vli_add(result, result, tmp, ndigits);
703 carry += vli_add(result, result, tmp, ndigits);
707 carry += vli_add(result, result, tmp, ndigits);
709 while (carry || vli_cmp(curve_prime, result, ndigits) != 1)
710 carry -= vli_sub(result, result, curve_prime, ndigits);
713 /* Computes result = product % curve_prime
716 static void vli_mmod_fast_256(u64 *result, const u64 *product,
723 vli_set(result, product, ndigits);
731 carry += vli_add(result, result, tmp, ndigits);
738 carry += vli_add(result, result, tmp, ndigits);
745 carry += vli_add(result, result, tmp, ndigits);
752 carry += vli_add(result, result, tmp, ndigits);
759 carry -= vli_sub(result, result, tmp, ndigits);
766 carry -= vli_sub(result, result, tmp, ndigits);
773 carry -= vli_sub(result, result, tmp, ndigits);
780 carry -= vli_sub(result, result, tmp, ndigits);
784 carry += vli_add(result, result, curve_prime, ndigits);
787 while (carry || vli_cmp(curve_prime, result, ndigits) != 1)
788 carry -= vli_sub(result, result, curve_prime, ndigits);
796 /* Computes result = product % curve_prime
799 static void vli_mmod_fast_384(u64 *result, const u64 *product,
806 vli_set(result, product, ndigits);
816 carry += vli_add(result, result, tmp, ndigits);
825 carry += vli_add(result, result, tmp, ndigits);
834 carry += vli_add(result, result, tmp, ndigits);
843 carry += vli_add(result, result, tmp, ndigits);
852 carry += vli_add(result, result, tmp, ndigits);
861 carry += vli_add(result, result, tmp, ndigits);
870 carry -= vli_sub(result, result, tmp, ndigits);
879 carry -= vli_sub(result, result, tmp, ndigits);
888 carry -= vli_sub(result, result, tmp, ndigits);
892 carry += vli_add(result, result, curve_prime, ndigits);
895 while (carry || vli_cmp(curve_prime, result, ndigits) != 1)
896 carry -= vli_sub(result, result, curve_prime, ndigits);
905 /* Computes result = product % curve_prime for different curve_primes.
910 static bool vli_mmod_fast(u64 *result, u64 *product,
921 vli_mmod_special(result, product, curve_prime,
926 vli_mmod_special2(result, product, curve_prime,
930 vli_mmod_barrett(result, product, curve_prime, ndigits);
936 vli_mmod_fast_192(result, product, curve_prime, tmp);
939 vli_mmod_fast_256(result, product, curve_prime, tmp);
942 vli_mmod_fast_384(result, product, curve_prime, tmp);
952 /* Computes result = (left * right) % mod.
955 void vli_mod_mult_slow(u64 *result, const u64 *left, const u64 *right,
961 vli_mmod_slow(result, product, mod, ndigits);
965 /* Computes result = (left * right) % curve_prime. */
966 static void vli_mod_mult_fast(u64 *result, const u64 *left, const u64 *right,
972 vli_mmod_fast(result, product, curve);
975 /* Computes result = left^2 % curve_prime. */
976 static void vli_mod_square_fast(u64 *result, const u64 *left,
982 vli_mmod_fast(result, product, curve);
986 /* Computes result = (1 / p_input) % mod. All VLIs are the same size.
990 void vli_mod_inv(u64 *result, const u64 *input, const u64 *mod,
999 vli_clear(result, ndigits);
1061 vli_set(result, u, ndigits);
1280 static void ecc_point_mult(struct ecc_point *result,
1335 vli_set(result->x, rx[0], ndigits);
1336 vli_set(result->y, ry[0], ndigits);
1340 static void ecc_point_add(const struct ecc_point *result,
1349 vli_set(result->x, q->x, ndigits);
1350 vli_set(result->y, q->y, ndigits);
1351 vli_mod_sub(z, result->x, p->x, curve->p, ndigits);
1354 xycz_add(px, py, result->x, result->y, curve);
1356 apply_z(result->x, result->y, z, curve);
1362 void ecc_point_mult_shamir(const struct ecc_point *result,
1369 u64 *rx = result->x;
1370 u64 *ry = result->y;