Lines Matching defs:C1_lo

470   UINT64 C1_lo, C2_lo, tmp_signif_lo;
471 // Note: C1.w[1], C1.w[0] represent C1_hi, C1_lo (all UINT64)
578 C1_lo = x.w[0];
591 C1_lo = 0; // significand low
596 && C1_lo > 0x378d8e63ffffffffull)) {
599 C1_lo = 0;
627 if ((C1_hi == 0x0ull) && (C1_lo == 0x0ull)) {
725 if (C1_hi == 0) { // x_bits is the nr. of bits in C1_lo
726 if (C1_lo >= 0x0020000000000000ull) { // x >= 2^53
729 if (C1_lo >= 0x0000000100000000ull) { // x >= 2^32
730 tmp1.d = (double) (C1_lo >> 32); // exact conversion
735 tmp1.d = (double) (C1_lo); // exact conversion
740 tmp1.d = (double) C1_lo; // exact conversion
754 C1_lo >= nr_digits[x_nr_bits].threshold_lo))
768 // 64 x 64 C1_lo * ten2k64[scale]
769 __mul_64x64_to_128MACH (res, C1_lo, ten2k64[scale]);
771 // 64 x 128 C1_lo * ten2k128[scale - 20]
772 __mul_128x64_to_128 (res, C1_lo, ten2k128[scale - 20]);
777 C1.w[0] = C1_lo;
793 tmp_signif_lo = C1_lo;
797 C1_lo = C2_lo;
805 if (C1_hi == 0) { // x_bits is the nr. of bits in C1_lo
806 if (C1_lo >= 0x0020000000000000ull) { // x >= 2^53
808 if (C1_lo >= 0x0000000100000000ull) { // x >= 2^32
809 tmp1.d = (double) (C1_lo >> 32); // exact conversion
813 tmp1.d = (double) (C1_lo); // exact conversion
818 tmp1.d = (double) C1_lo; // exact conversion
833 C1_lo >= nr_digits[x_nr_bits].threshold_lo))
888 __mul_64x64_to_128MACH (C1, ten2k64[scale], C1_lo);
892 C1_lo = C1_lo * ten2k64[scale - 19];
893 __mul_64x64_to_128MACH (C1, ten2k64[19], C1_lo);
898 C1.w[0] = C1_lo;
904 C1_lo = C1.w[0];
914 && C1_lo == 0x38c15b0a00000000ull && x_sign != y_sign
939 C1_lo = 0x378d8e63ffffffffull;
946 C1_lo = C1_lo + 1;
947 if (C1_lo == 0) { // rounding overflow in the low 64 bits
951 && C1_lo == 0x378d8e6400000000ull) {
954 C1_lo = 0x38c15b0a00000000ull; // 10^33
958 C1_lo = 0x0ull;
970 C1_lo = C1_lo - 1;
971 if (C1_lo == 0xffffffffffffffffull)
979 && C1_lo == 0x38c15b09ffffffffull) {
982 C1_lo = 0x378d8e63ffffffffull;
993 res.w[0] = C1_lo;
1001 || C1_lo != ten2k64[q1 - 1]))
1003 || C1_lo != ten2k128[q1 - 21].w[0]))) {
1020 __mul_64x64_to_128MACH (C1, ten2k64[scale], C1_lo);
1024 C1_lo = C1_lo * ten2k64[scale - 19];
1025 __mul_64x64_to_128MACH (C1, ten2k64[19], C1_lo);
1030 C1.w[0] = C1_lo;
1036 C1_lo = C1.w[0];
1042 C1_lo = C1_lo + 1;
1043 if (C1_lo == 0) { // rounding overflow in the low 64 bits
1047 && C1_lo == 0x378d8e6400000000ull) {
1050 C1_lo = 0x38c15b0a00000000ull; // 10^33
1054 C1_lo = 0x0ull;
1067 C1_lo = C1_lo - 1;
1068 if (C1_lo == 0xffffffffffffffffull)
1076 && C1_lo == 0x38c15b09ffffffffull) {
1079 C1_lo = 0x378d8e63ffffffffull;
1090 res.w[0] = C1_lo;
1092 && (q1 < P34 || ((C1_lo & 0x1) == 0))) {
1104 __mul_64x64_to_128MACH (C1, ten2k64[scale], C1_lo);
1108 C1_lo = C1_lo * ten2k64[scale - 19];
1109 __mul_64x64_to_128MACH (C1, ten2k64[19], C1_lo);
1114 C1.w[0] = C1_lo;
1120 C1_lo = C1.w[0];
1123 && (C1_lo & 0x01)) || (rnd_mode == ROUNDING_TIES_AWAY
1128 C1_lo = C1_lo + 1;
1129 if (C1_lo == 0) { // rounding overflow in the low 64 bits
1133 && C1_lo == 0x378d8e6400000000ull) {
1136 C1_lo = 0x38c15b0a00000000ull; // 10^33
1140 C1_lo = 0x0ull;
1148 && (C1_lo & 0x01)) || (rnd_mode == ROUNDING_DOWN
1155 C1_lo = C1_lo - 1;
1156 if (C1_lo == 0xffffffffffffffffull)
1164 && C1_lo == 0x38c15b09ffffffffull) {
1167 C1_lo = 0x378d8e63ffffffffull;
1177 res.w[0] = C1_lo;
1179 // (C2_lo == halfulp64 && q1 == P34 && ((C1_lo & 0x1) == 1)), i.e.
1191 __mul_64x64_to_128MACH (C1, ten2k64[scale], C1_lo);
1195 C1_lo = C1_lo * ten2k64[scale - 19];
1196 __mul_64x64_to_128MACH (C1, ten2k64[19], C1_lo);
1201 C1.w[0] = C1_lo;
1207 C1_lo = C1.w[0];
1210 && C1_lo == 0x378d8e6400000000ull) {
1213 C1_lo = 0x38c15b0a00000000ull; // 10^33
1226 C1_lo = C1_lo - 1;
1227 if (C1_lo == 0xffffffffffffffffull)
1230 if (C1_hi == 0x0000314dc6448d93ull && C1_lo == 0x38c15b09ffffffffull) { // 10^33 - 1
1232 C1_lo = 0x378d8e63ffffffffull;
1245 C1_lo = C1_lo + 1;
1246 if (C1_lo == 0) { // rounding overflow in the low 64 bits
1250 && C1_lo == 0x378d8e6400000000ull) {
1253 C1_lo = 0x38c15b0a00000000ull; // 10^33
1257 C1_lo = 0x0ull;
1270 res.w[0] = C1_lo;
1289 __mul_64x64_to_128MACH (C1, ten2k64[scale], C1_lo);
1293 C1_lo = C1_lo * ten2k64[scale - 19];
1294 __mul_64x64_to_128MACH (C1, ten2k64[19], C1_lo);
1299 C1.w[0] = C1_lo;
1304 C1_lo = C1.w[0];
1311 C1_lo = C1_lo + 1;
1312 if (C1_lo == 0) { // rounding overflow in the low 64 bits
1316 && C1_lo == 0x378d8e6400000000ull) {
1319 C1_lo = 0x38c15b0a00000000ull; // 10^33
1323 C1_lo = 0x0ull;
1336 C1_lo = C1_lo - 1;
1337 if (C1_lo == 0xffffffffffffffffull)
1345 && C1_lo == 0x38c15b09ffffffffull) {
1348 C1_lo = 0x378d8e63ffffffffull;
1359 res.w[0] = C1_lo;
1362 && (q1 < P34 || ((C1_lo & 0x1) == 0))) {
1375 __mul_64x64_to_128MACH (C1, ten2k64[scale], C1_lo);
1379 C1_lo = C1_lo * ten2k64[scale - 19];
1380 __mul_64x64_to_128MACH (C1, ten2k64[19], C1_lo);
1385 C1.w[0] = C1_lo;
1391 C1_lo = C1.w[0];
1397 C1_lo = C1_lo + 1;
1398 if (C1_lo == 0) { // rounding overflow in the low 64 bits
1402 && C1_lo == 0x378d8e6400000000ull) {
1405 C1_lo = 0x38c15b0a00000000ull; // 10^33
1409 C1_lo = 0x0ull;
1420 C1_lo = C1_lo - 1;
1421 if (C1_lo == 0xffffffffffffffffull)
1429 && C1_lo == 0x38c15b09ffffffffull) {
1432 C1_lo = 0x378d8e63ffffffffull;
1443 res.w[0] = C1_lo;
1457 __mul_64x64_to_128MACH (C1, ten2k64[scale], C1_lo);
1461 C1_lo = C1_lo * ten2k64[scale - 19];
1462 __mul_64x64_to_128MACH (C1, ten2k64[19], C1_lo);
1467 C1.w[0] = C1_lo;
1472 C1_lo = C1.w[0];
1485 C1_lo = C1_lo - 1;
1486 if (C1_lo == 0xffffffffffffffffull)
1489 if (C1_hi == 0x0000314dc6448d93ull && C1_lo == 0x38c15b09ffffffffull) { // 10^33 - 1
1491 C1_lo = 0x378d8e63ffffffffull;
1504 C1_lo = C1_lo + 1;
1505 if (C1_lo == 0) { // rounding overflow in the low 64 bits
1509 && C1_lo == 0x378d8e6400000000ull) {
1512 C1_lo = 0x38c15b0a00000000ull; // 10^33
1516 C1_lo = 0x0ull;
1529 res.w[0] = C1_lo;
1553 __mul_128x64_to_128 (C1, C1_lo, ten2k128[scale - 20]);
1557 __mul_64x64_to_128MACH (C1, C1_lo, ten2k64[scale]);
1560 C1.w[0] = C1_lo;
1768 C1_lo = C1.w[0];
1791 C1_lo = C1_lo + 1;
1792 if (C1_lo == 0) { // rounding overflow in the low 64 bits
1796 && C1_lo == 0x378d8e6400000000ull) {
1799 C1_lo = 0x38c15b0a00000000ull; // 10^33
1811 C1_lo = C1_lo - 1;
1812 if (C1_lo == 0xffffffffffffffffull)
1815 if (C1_hi == 0x0000314dc6448d93ull && C1_lo == 0x38c15b09ffffffffull) { // 10^33 - 1
1817 C1_lo = 0x378d8e63ffffffffull;
1827 res.w[0] = C1_lo;
1841 __mul_128x64_to_128 (C1, C1_lo, ten2k128[scale - 20]);
1843 C1_lo = C1.w[0];
1847 __mul_64x64_to_128MACH (C1, C1_lo, ten2k64[scale]);
1850 C1.w[0] = C1_lo;
1854 C1_lo = C1.w[0];
1856 C1.w[0] = C1_lo; // C1.w[1] = C1_hi;
1861 C1_lo = C1_lo + C2_lo;
1863 if (C1_lo < C1.w[0])
1866 C1_lo = C1_lo - C2_lo;
1868 if (C1_lo > C1.w[0])
1871 if (C1_lo == 0 && C1_hi == 0) {
1885 C1_lo = ~C1_lo;
1886 C1_lo++;
1888 if (C1_lo == 0x0)
1895 res.w[0] = C1_lo;
1905 __mul_128x64_to_128 (C1, C1_lo, ten2k128[scale - 20]);
1909 __mul_64x64_to_128MACH (C1, C1_lo, ten2k64[scale]);
1912 C1.w[0] = C1_lo;
1917 C1.w[0] = C1_lo; // only the low part is necessary
1920 C1_lo = C1.w[0];
1924 C1_lo = C1_lo + C2_lo;
1926 if (C1_lo < C1.w[0])
1929 if (C1_hi > 0x0001ed09bead87c0ull || (C1_hi == 0x0001ed09bead87c0ull && C1_lo >= 0x378d8e6400000000ull)) { // C1 >= 10^34
1934 if (C1_lo >= 0xfffffffffffffffbull) { // low half add has carry
1935 C1_lo = C1_lo + 5;
1938 C1_lo = C1_lo + 5;
1944 C1.w[0] = C1_lo; // C''
2011 C1_lo = P256.w[2];
2043 C1_lo = C1_lo + 1;
2044 if (C1_lo == 0) { // rounding overflow in the low 64 bits
2048 && C1_lo == 0x378d8e6400000000ull) {
2051 C1_lo = 0x38c15b0a00000000ull; // 10^33
2064 C1_lo = C1_lo - 1;
2065 if (C1_lo == 0xffffffffffffffffull)
2068 if (C1_hi == 0x0000314dc6448d93ull && C1_lo == 0x38c15b09ffffffffull) { // 10^33 - 1
2070 C1_lo = 0x378d8e63ffffffffull;
2082 C1_lo = 0x0ull;
2085 C1_lo = 0x378d8e63ffffffffull;
2096 C1_lo = C1_lo - C2_lo;
2098 if (C1_lo > C1.w[0])
2101 if (C1_lo == 0 && C1_hi == 0) {
2115 C1_lo = ~C1_lo;
2116 C1_lo++;
2118 if (C1_lo == 0x0)
2125 res.w[0] = C1_lo;
2146 __mul_128x64_to_128 (C1, C1_lo, ten2k128[scale - 20]);
2150 __mul_64x64_to_128MACH (C1, C1_lo, ten2k64[scale]);
2153 C1.w[0] = C1_lo;
2158 C1.w[0] = C1_lo;
2573 C1_lo = C1.w[0];
2596 C1_lo = C1_lo + 1;
2597 if (C1_lo == 0) { // rounding overflow in the low 64 bits
2601 && C1_lo == 0x378d8e6400000000ull) {
2604 C1_lo = 0x38c15b0a00000000ull; // 10^33
2616 C1_lo = C1_lo - 1;
2617 if (C1_lo == 0xffffffffffffffffull)
2620 if (C1_hi == 0x0000314dc6448d93ull && C1_lo == 0x38c15b09ffffffffull) { // 10^33 - 1
2622 C1_lo = 0x378d8e63ffffffffull;
2634 C1_lo = 0x0ull;
2637 C1_lo = 0x378d8e63ffffffffull;
2648 res.w[0] = C1_lo;
2670 __mul_128x64_to_128 (C1, C1_lo, ten2k128[scale - 20]);
2674 __mul_64x64_to_128MACH (C1, C1_lo, ten2k64[scale]);
2677 C1.w[0] = C1_lo;
2682 C1.w[0] = C1_lo; // only the low part is necessary
2685 C1_lo = C1.w[0];
2689 C1_lo = C1_lo + C2_lo;
2691 if (C1_lo < C1.w[0])
2694 if (C1_hi > 0x0001ed09bead87c0ull || (C1_hi == 0x0001ed09bead87c0ull && C1_lo >= 0x378d8e6400000000ull)) { // C1 >= 10^34
2699 if (C1_lo >= 0xfffffffffffffffbull) { // low half add has carry
2700 C1_lo = C1_lo + 5;
2703 C1_lo = C1_lo + 5;
2709 C1.w[0] = C1_lo; // C''
2776 C1_lo = P256.w[2];
2806 C1_lo = C1_lo + 1;
2807 if (C1_lo == 0) { // rounding overflow in the low 64 bits
2811 && C1_lo == 0x378d8e6400000000ull) {
2814 C1_lo = 0x38c15b0a00000000ull; // 10^33
2824 C1_lo = C1_lo - 1;
2825 if (C1_lo == 0xffffffffffffffffull)
2828 if (C1_hi == 0x0000314dc6448d93ull && C1_lo == 0x38c15b09ffffffffull) { // 10^33 - 1
2830 C1_lo = 0x378d8e63ffffffffull;
2842 C1_lo = 0x0ull;
2845 C1_lo = 0x378d8e63ffffffffull;
2857 res.w[0] = C1_lo;
2859 C1_lo = C2_lo - C1_lo;
2861 if (C1_lo > C2_lo)
2864 C1_lo = ~C1_lo;
2865 C1_lo++;
2867 if (C1_lo == 0x0)
2872 if (C1_lo == 0 && C1_hi == 0) {
2887 res.w[0] = C1_lo;