Lines Matching refs:CY

38 __div_128_by_128 (UINT128 * pCQ, UINT128 * pCR, UINT128 CX, UINT128 CY) {
46 if (!CY.w[1]) {
50 pCQ->w[0] = CX.w[0] / CY.w[0];
53 pCR->w[0] = CX.w[0] - pCQ->w[0] * CY.w[0];
65 dq = dx / (BINARY80) CY.w[0];
71 Rh = CX.w[0] - Ql * CY.w[0];
72 Ql2 = Rh / CY.w[0];
73 pCR->w[0] = Rh - Ql2 * CY.w[0];
80 // now CY.w[1] > 0
85 ly = (BINARY80) CY.w[1] * (BINARY80) t64.d + (BINARY80) CY.w[0];
92 /*if(__unsigned_compare_ge_128(CX,CY))
95 __sub_128_128((*pCR), CX, CY);
105 if (CY.w[1] >= 16 || pCQ->w[0] <= 0x1000000000000000ull) {
107 __mul_64x128_full (Ph, CQB, (pCQ->w[0]), CY);
109 if (__unsigned_compare_ge_128 (CA, CY)) {
110 __sub_128_128 (CA, CA, CY);
112 if (__unsigned_compare_ge_128 (CA, CY)) {
113 __sub_128_128 (CA, CA, CY);
122 __mul_64x128_full (Ph, CQB, (pCQ->w[0]), CY);
125 CB8.w[1] = (CY.w[1] << 3) | (CY.w[0] >> 61);
126 CB8.w[0] = CY.w[0] << 3;
127 CB4.w[1] = (CY.w[1] << 2) | (CY.w[0] >> 62);
128 CB4.w[0] = CY.w[0] << 2;
129 CB2.w[1] = (CY.w[1] << 1) | (CY.w[0] >> 63);
130 CB2.w[0] = CY.w[0] << 1;
144 if (__unsigned_compare_ge_128 (CA, CY)) {
146 __sub_128_128 (CA, CA, CY);
160 __div_256_by_128 (UINT128 * pCQ, UINT256 * pCA4, UINT128 CY) {
177 ly = (BINARY80) CY.w[1] * l128 + (BINARY80) CY.w[0] * l64;
184 // CQ2*CY
185 __mul_128x128_to_256 (CQ2Y, CY, CQ2);
196 // (*pCA4) -CQ2Y, guaranteed below 5*2^49*CY < 5*2^(49+128)
210 __mul_64x128_short (CQ3Y, Q3, CY);
214 if ((*pCA4).w[1] > CY.w[1]
215 || ((*pCA4).w[1] == CY.w[1] && (*pCA4).w[0] >= CY.w[0])) {
217 __sub_borrow_out ((*pCA4).w[0], carry64, (*pCA4).w[0], CY.w[0]);
218 (*pCA4).w[1] = (*pCA4).w[1] - CY.w[1] - carry64;
219 if ((*pCA4).w[1] > CY.w[1]
220 || ((*pCA4).w[1] == CY.w[1] && (*pCA4).w[0] >= CY.w[0])) {
223 CY.w[0]);
224 (*pCA4).w[1] = (*pCA4).w[1] - CY.w[1] - carry64;
244 __mul_64x128_short (CQ3Y, Q3, CY);
248 if ((SINT64) (*pCA4).w[1] > (SINT64) CY.w[1]
249 || ((*pCA4).w[1] == CY.w[1] && (*pCA4).w[0] >= CY.w[0])) {
251 __sub_borrow_out ((*pCA4).w[0], carry64, (*pCA4).w[0], CY.w[0]);
252 (*pCA4).w[1] = (*pCA4).w[1] - CY.w[1] - carry64;
255 __add_carry_out ((*pCA4).w[0], carry64, (*pCA4).w[0], CY.w[0]);
256 (*pCA4).w[1] = (*pCA4).w[1] + CY.w[1] + carry64;
272 __div_128_by_128 (UINT128 * pCQ, UINT128 * pCR, UINT128 CX0, UINT128 CY) {
278 if (!CX0.w[1] && !CY.w[1]) {
279 pCQ->w[0] = CX0.w[0] / CY.w[0];
282 pCR->w[0] = CX0.w[0] - pCQ->w[0] * CY.w[0];
292 ly = (double) CY.w[1] * t64.d + (double) CY.w[0];
295 CY36.w[1] = CY.w[0] >> (64 - 36);
296 CY36.w[0] = CY.w[0] << 36;
301 if (!CY.w[1] && !CY36.w[1] && (CX.w[1] >= CY36.w[0])) {
304 // 2^(-60)*CX/CY
309 // Q*CY
310 __mul_64x64_to_128 (A2, Q, CY.w[0]);
326 CY51.w[1] = (CY.w[1] << 51) | (CY.w[0] >> (64 - 51));
327 CY51.w[0] = CY.w[0] << 51;
329 if (CY.w[1] < (UINT64) (1 << (64 - 51))
333 // 2^(-49)*CX/CY
339 // Q*CY
340 __mul_64x64_to_128 (A2, Q, CY.w[0]);
341 A2.w[1] += Q * CY.w[1];
359 __mul_64x64_to_128 (A2, Q, CY.w[0]);
360 A2.w[1] += Q * CY.w[1];
365 CX.w[0] += CY.w[0];
366 if (CX.w[0] < CY.w[0])
368 CX.w[1] += CY.w[1];
371 CX.w[0] += CY.w[0];
372 if (CX.w[0] < CY.w[0])
374 CX.w[1] += CY.w[1];
376 } else if (__unsigned_compare_ge_128 (CX, CY)) {
378 __sub_128_128 (CX, CX, CY);
393 __div_256_by_128 (UINT128 * pCQ, UINT256 * pCA4, UINT128 CY) {
418 ly = (double) CY.w[1] * t64.d + (double) CY.w[0];
421 CY36.w[2] = CY.w[1] >> (64 - 36);
422 CY36.w[1] = (CY.w[1] << 36) | (CY.w[0] >> (64 - 36));
423 CY36.w[0] = CY.w[0] << 36;
433 // 2^(-60)*CA4/CY
438 // Q*CY
439 __mul_64x128_to_192 (CA2, Q, CY);
462 CY51.w[2] = CY.w[1] >> (64 - 51);
463 CY51.w[1] = (CY.w[1] << 51) | (CY.w[0] >> (64 - 51));
464 CY51.w[0] = CY.w[0] << 51;
472 // 2^(-49)*CA4/CY
478 // Q*CY
479 __mul_64x64_to_128 (A2, Q, CY.w[0]);
480 __mul_64x64_to_128 (A2h, Q, CY.w[1]);
505 __mul_64x64_to_128 (A2, Q, CY.w[0]);
506 A2.w[1] += Q * CY.w[1];
511 CA4.w[0] += CY.w[0];
512 if (CA4.w[0] < CY.w[0])
514 CA4.w[1] += CY.w[1];
517 CA4.w[0] += CY.w[0];
518 if (CA4.w[0] < CY.w[0])
520 CA4.w[1] += CY.w[1];
522 } else if (__unsigned_compare_ge_128 (CA4, CY)) {
524 __sub_128_128 (CA4, CA4, CY);