bn_asm.c revision 277195
1/* crypto/bn/bn_asm.c */ 2/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) 3 * All rights reserved. 4 * 5 * This package is an SSL implementation written 6 * by Eric Young (eay@cryptsoft.com). 7 * The implementation was written so as to conform with Netscapes SSL. 8 * 9 * This library is free for commercial and non-commercial use as long as 10 * the following conditions are aheared to. The following conditions 11 * apply to all code found in this distribution, be it the RC4, RSA, 12 * lhash, DES, etc., code; not just the SSL code. The SSL documentation 13 * included with this distribution is covered by the same copyright terms 14 * except that the holder is Tim Hudson (tjh@cryptsoft.com). 15 * 16 * Copyright remains Eric Young's, and as such any Copyright notices in 17 * the code are not to be removed. 18 * If this package is used in a product, Eric Young should be given attribution 19 * as the author of the parts of the library used. 20 * This can be in the form of a textual message at program startup or 21 * in documentation (online or textual) provided with the package. 22 * 23 * Redistribution and use in source and binary forms, with or without 24 * modification, are permitted provided that the following conditions 25 * are met: 26 * 1. Redistributions of source code must retain the copyright 27 * notice, this list of conditions and the following disclaimer. 28 * 2. Redistributions in binary form must reproduce the above copyright 29 * notice, this list of conditions and the following disclaimer in the 30 * documentation and/or other materials provided with the distribution. 31 * 3. All advertising materials mentioning features or use of this software 32 * must display the following acknowledgement: 33 * "This product includes cryptographic software written by 34 * Eric Young (eay@cryptsoft.com)" 35 * The word 'cryptographic' can be left out if the rouines from the library 36 * being used are not cryptographic related :-). 37 * 4. If you include any Windows specific code (or a derivative thereof) from 38 * the apps directory (application code) you must include an acknowledgement: 39 * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" 40 * 41 * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND 42 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 43 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 44 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 45 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 46 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 47 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 50 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 51 * SUCH DAMAGE. 52 * 53 * The licence and distribution terms for any publically available version or 54 * derivative of this code cannot be changed. i.e. this code cannot simply be 55 * copied and put under another distribution licence 56 * [including the GNU Public Licence.] 57 */ 58 59#ifndef BN_DEBUG 60# undef NDEBUG /* avoid conflicting definitions */ 61# define NDEBUG 62#endif 63 64#include <stdio.h> 65#include <assert.h> 66#include "cryptlib.h" 67#include "bn_lcl.h" 68 69#if defined(BN_LLONG) || defined(BN_UMULT_HIGH) 70 71BN_ULONG bn_mul_add_words(BN_ULONG *rp, const BN_ULONG *ap, int num, BN_ULONG w) 72 { 73 BN_ULONG c1=0; 74 75 assert(num >= 0); 76 if (num <= 0) return(c1); 77 78#ifndef OPENSSL_SMALL_FOOTPRINT 79 while (num&~3) 80 { 81 mul_add(rp[0],ap[0],w,c1); 82 mul_add(rp[1],ap[1],w,c1); 83 mul_add(rp[2],ap[2],w,c1); 84 mul_add(rp[3],ap[3],w,c1); 85 ap+=4; rp+=4; num-=4; 86 } 87#endif 88 while (num) 89 { 90 mul_add(rp[0],ap[0],w,c1); 91 ap++; rp++; num--; 92 } 93 94 return(c1); 95 } 96 97BN_ULONG bn_mul_words(BN_ULONG *rp, const BN_ULONG *ap, int num, BN_ULONG w) 98 { 99 BN_ULONG c1=0; 100 101 assert(num >= 0); 102 if (num <= 0) return(c1); 103 104#ifndef OPENSSL_SMALL_FOOTPRINT 105 while (num&~3) 106 { 107 mul(rp[0],ap[0],w,c1); 108 mul(rp[1],ap[1],w,c1); 109 mul(rp[2],ap[2],w,c1); 110 mul(rp[3],ap[3],w,c1); 111 ap+=4; rp+=4; num-=4; 112 } 113#endif 114 while (num) 115 { 116 mul(rp[0],ap[0],w,c1); 117 ap++; rp++; num--; 118 } 119 return(c1); 120 } 121 122void bn_sqr_words(BN_ULONG *r, const BN_ULONG *a, int n) 123 { 124 assert(n >= 0); 125 if (n <= 0) return; 126 127#ifndef OPENSSL_SMALL_FOOTPRINT 128 while (n&~3) 129 { 130 sqr(r[0],r[1],a[0]); 131 sqr(r[2],r[3],a[1]); 132 sqr(r[4],r[5],a[2]); 133 sqr(r[6],r[7],a[3]); 134 a+=4; r+=8; n-=4; 135 } 136#endif 137 while (n) 138 { 139 sqr(r[0],r[1],a[0]); 140 a++; r+=2; n--; 141 } 142 } 143 144#else /* !(defined(BN_LLONG) || defined(BN_UMULT_HIGH)) */ 145 146BN_ULONG bn_mul_add_words(BN_ULONG *rp, const BN_ULONG *ap, int num, BN_ULONG w) 147 { 148 BN_ULONG c=0; 149 BN_ULONG bl,bh; 150 151 assert(num >= 0); 152 if (num <= 0) return((BN_ULONG)0); 153 154 bl=LBITS(w); 155 bh=HBITS(w); 156 157#ifndef OPENSSL_SMALL_FOOTPRINT 158 while (num&~3) 159 { 160 mul_add(rp[0],ap[0],bl,bh,c); 161 mul_add(rp[1],ap[1],bl,bh,c); 162 mul_add(rp[2],ap[2],bl,bh,c); 163 mul_add(rp[3],ap[3],bl,bh,c); 164 ap+=4; rp+=4; num-=4; 165 } 166#endif 167 while (num) 168 { 169 mul_add(rp[0],ap[0],bl,bh,c); 170 ap++; rp++; num--; 171 } 172 return(c); 173 } 174 175BN_ULONG bn_mul_words(BN_ULONG *rp, const BN_ULONG *ap, int num, BN_ULONG w) 176 { 177 BN_ULONG carry=0; 178 BN_ULONG bl,bh; 179 180 assert(num >= 0); 181 if (num <= 0) return((BN_ULONG)0); 182 183 bl=LBITS(w); 184 bh=HBITS(w); 185 186#ifndef OPENSSL_SMALL_FOOTPRINT 187 while (num&~3) 188 { 189 mul(rp[0],ap[0],bl,bh,carry); 190 mul(rp[1],ap[1],bl,bh,carry); 191 mul(rp[2],ap[2],bl,bh,carry); 192 mul(rp[3],ap[3],bl,bh,carry); 193 ap+=4; rp+=4; num-=4; 194 } 195#endif 196 while (num) 197 { 198 mul(rp[0],ap[0],bl,bh,carry); 199 ap++; rp++; num--; 200 } 201 return(carry); 202 } 203 204void bn_sqr_words(BN_ULONG *r, const BN_ULONG *a, int n) 205 { 206 assert(n >= 0); 207 if (n <= 0) return; 208 209#ifndef OPENSSL_SMALL_FOOTPRINT 210 while (n&~3) 211 { 212 sqr64(r[0],r[1],a[0]); 213 sqr64(r[2],r[3],a[1]); 214 sqr64(r[4],r[5],a[2]); 215 sqr64(r[6],r[7],a[3]); 216 a+=4; r+=8; n-=4; 217 } 218#endif 219 while (n) 220 { 221 sqr64(r[0],r[1],a[0]); 222 a++; r+=2; n--; 223 } 224 } 225 226#endif /* !(defined(BN_LLONG) || defined(BN_UMULT_HIGH)) */ 227 228#if defined(BN_LLONG) && defined(BN_DIV2W) 229 230BN_ULONG bn_div_words(BN_ULONG h, BN_ULONG l, BN_ULONG d) 231 { 232 return((BN_ULONG)(((((BN_ULLONG)h)<<BN_BITS2)|l)/(BN_ULLONG)d)); 233 } 234 235#else 236 237/* Divide h,l by d and return the result. */ 238/* I need to test this some more :-( */ 239BN_ULONG bn_div_words(BN_ULONG h, BN_ULONG l, BN_ULONG d) 240 { 241 BN_ULONG dh,dl,q,ret=0,th,tl,t; 242 int i,count=2; 243 244 if (d == 0) return(BN_MASK2); 245 246 i=BN_num_bits_word(d); 247 assert((i == BN_BITS2) || (h <= (BN_ULONG)1<<i)); 248 249 i=BN_BITS2-i; 250 if (h >= d) h-=d; 251 252 if (i) 253 { 254 d<<=i; 255 h=(h<<i)|(l>>(BN_BITS2-i)); 256 l<<=i; 257 } 258 dh=(d&BN_MASK2h)>>BN_BITS4; 259 dl=(d&BN_MASK2l); 260 for (;;) 261 { 262 if ((h>>BN_BITS4) == dh) 263 q=BN_MASK2l; 264 else 265 q=h/dh; 266 267 th=q*dh; 268 tl=dl*q; 269 for (;;) 270 { 271 t=h-th; 272 if ((t&BN_MASK2h) || 273 ((tl) <= ( 274 (t<<BN_BITS4)| 275 ((l&BN_MASK2h)>>BN_BITS4)))) 276 break; 277 q--; 278 th-=dh; 279 tl-=dl; 280 } 281 t=(tl>>BN_BITS4); 282 tl=(tl<<BN_BITS4)&BN_MASK2h; 283 th+=t; 284 285 if (l < tl) th++; 286 l-=tl; 287 if (h < th) 288 { 289 h+=d; 290 q--; 291 } 292 h-=th; 293 294 if (--count == 0) break; 295 296 ret=q<<BN_BITS4; 297 h=((h<<BN_BITS4)|(l>>BN_BITS4))&BN_MASK2; 298 l=(l&BN_MASK2l)<<BN_BITS4; 299 } 300 ret|=q; 301 return(ret); 302 } 303#endif /* !defined(BN_LLONG) && defined(BN_DIV2W) */ 304 305#ifdef BN_LLONG 306BN_ULONG bn_add_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b, int n) 307 { 308 BN_ULLONG ll=0; 309 310 assert(n >= 0); 311 if (n <= 0) return((BN_ULONG)0); 312 313#ifndef OPENSSL_SMALL_FOOTPRINT 314 while (n&~3) 315 { 316 ll+=(BN_ULLONG)a[0]+b[0]; 317 r[0]=(BN_ULONG)ll&BN_MASK2; 318 ll>>=BN_BITS2; 319 ll+=(BN_ULLONG)a[1]+b[1]; 320 r[1]=(BN_ULONG)ll&BN_MASK2; 321 ll>>=BN_BITS2; 322 ll+=(BN_ULLONG)a[2]+b[2]; 323 r[2]=(BN_ULONG)ll&BN_MASK2; 324 ll>>=BN_BITS2; 325 ll+=(BN_ULLONG)a[3]+b[3]; 326 r[3]=(BN_ULONG)ll&BN_MASK2; 327 ll>>=BN_BITS2; 328 a+=4; b+=4; r+=4; n-=4; 329 } 330#endif 331 while (n) 332 { 333 ll+=(BN_ULLONG)a[0]+b[0]; 334 r[0]=(BN_ULONG)ll&BN_MASK2; 335 ll>>=BN_BITS2; 336 a++; b++; r++; n--; 337 } 338 return((BN_ULONG)ll); 339 } 340#else /* !BN_LLONG */ 341BN_ULONG bn_add_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b, int n) 342 { 343 BN_ULONG c,l,t; 344 345 assert(n >= 0); 346 if (n <= 0) return((BN_ULONG)0); 347 348 c=0; 349#ifndef OPENSSL_SMALL_FOOTPRINT 350 while (n&~3) 351 { 352 t=a[0]; 353 t=(t+c)&BN_MASK2; 354 c=(t < c); 355 l=(t+b[0])&BN_MASK2; 356 c+=(l < t); 357 r[0]=l; 358 t=a[1]; 359 t=(t+c)&BN_MASK2; 360 c=(t < c); 361 l=(t+b[1])&BN_MASK2; 362 c+=(l < t); 363 r[1]=l; 364 t=a[2]; 365 t=(t+c)&BN_MASK2; 366 c=(t < c); 367 l=(t+b[2])&BN_MASK2; 368 c+=(l < t); 369 r[2]=l; 370 t=a[3]; 371 t=(t+c)&BN_MASK2; 372 c=(t < c); 373 l=(t+b[3])&BN_MASK2; 374 c+=(l < t); 375 r[3]=l; 376 a+=4; b+=4; r+=4; n-=4; 377 } 378#endif 379 while(n) 380 { 381 t=a[0]; 382 t=(t+c)&BN_MASK2; 383 c=(t < c); 384 l=(t+b[0])&BN_MASK2; 385 c+=(l < t); 386 r[0]=l; 387 a++; b++; r++; n--; 388 } 389 return((BN_ULONG)c); 390 } 391#endif /* !BN_LLONG */ 392 393BN_ULONG bn_sub_words(BN_ULONG *r, const BN_ULONG *a, const BN_ULONG *b, int n) 394 { 395 BN_ULONG t1,t2; 396 int c=0; 397 398 assert(n >= 0); 399 if (n <= 0) return((BN_ULONG)0); 400 401#ifndef OPENSSL_SMALL_FOOTPRINT 402 while (n&~3) 403 { 404 t1=a[0]; t2=b[0]; 405 r[0]=(t1-t2-c)&BN_MASK2; 406 if (t1 != t2) c=(t1 < t2); 407 t1=a[1]; t2=b[1]; 408 r[1]=(t1-t2-c)&BN_MASK2; 409 if (t1 != t2) c=(t1 < t2); 410 t1=a[2]; t2=b[2]; 411 r[2]=(t1-t2-c)&BN_MASK2; 412 if (t1 != t2) c=(t1 < t2); 413 t1=a[3]; t2=b[3]; 414 r[3]=(t1-t2-c)&BN_MASK2; 415 if (t1 != t2) c=(t1 < t2); 416 a+=4; b+=4; r+=4; n-=4; 417 } 418#endif 419 while (n) 420 { 421 t1=a[0]; t2=b[0]; 422 r[0]=(t1-t2-c)&BN_MASK2; 423 if (t1 != t2) c=(t1 < t2); 424 a++; b++; r++; n--; 425 } 426 return(c); 427 } 428 429#if defined(BN_MUL_COMBA) && !defined(OPENSSL_SMALL_FOOTPRINT) 430 431#undef bn_mul_comba8 432#undef bn_mul_comba4 433#undef bn_sqr_comba8 434#undef bn_sqr_comba4 435 436/* mul_add_c(a,b,c0,c1,c2) -- c+=a*b for three word number c=(c2,c1,c0) */ 437/* mul_add_c2(a,b,c0,c1,c2) -- c+=2*a*b for three word number c=(c2,c1,c0) */ 438/* sqr_add_c(a,i,c0,c1,c2) -- c+=a[i]^2 for three word number c=(c2,c1,c0) */ 439/* sqr_add_c2(a,i,c0,c1,c2) -- c+=2*a[i]*a[j] for three word number c=(c2,c1,c0) */ 440 441/* 442 * Keep in mind that carrying into high part of multiplication result 443 * can not overflow, because it cannot be all-ones. 444 */ 445#ifdef BN_LLONG 446#define mul_add_c(a,b,c0,c1,c2) \ 447 t=(BN_ULLONG)a*b; \ 448 t1=(BN_ULONG)Lw(t); \ 449 t2=(BN_ULONG)Hw(t); \ 450 c0=(c0+t1)&BN_MASK2; if ((c0) < t1) t2++; \ 451 c1=(c1+t2)&BN_MASK2; if ((c1) < t2) c2++; 452 453#define mul_add_c2(a,b,c0,c1,c2) \ 454 t=(BN_ULLONG)a*b; \ 455 tt=(t+t)&BN_MASK; \ 456 if (tt < t) c2++; \ 457 t1=(BN_ULONG)Lw(tt); \ 458 t2=(BN_ULONG)Hw(tt); \ 459 c0=(c0+t1)&BN_MASK2; \ 460 if ((c0 < t1) && (((++t2)&BN_MASK2) == 0)) c2++; \ 461 c1=(c1+t2)&BN_MASK2; if ((c1) < t2) c2++; 462 463#define sqr_add_c(a,i,c0,c1,c2) \ 464 t=(BN_ULLONG)a[i]*a[i]; \ 465 t1=(BN_ULONG)Lw(t); \ 466 t2=(BN_ULONG)Hw(t); \ 467 c0=(c0+t1)&BN_MASK2; if ((c0) < t1) t2++; \ 468 c1=(c1+t2)&BN_MASK2; if ((c1) < t2) c2++; 469 470#define sqr_add_c2(a,i,j,c0,c1,c2) \ 471 mul_add_c2((a)[i],(a)[j],c0,c1,c2) 472 473#elif defined(BN_UMULT_LOHI) 474 475#define mul_add_c(a,b,c0,c1,c2) { \ 476 BN_ULONG ta=(a),tb=(b); \ 477 BN_UMULT_LOHI(t1,t2,ta,tb); \ 478 c0 += t1; t2 += (c0<t1)?1:0; \ 479 c1 += t2; c2 += (c1<t2)?1:0; \ 480 } 481 482#define mul_add_c2(a,b,c0,c1,c2) { \ 483 BN_ULONG ta=(a),tb=(b),t0; \ 484 BN_UMULT_LOHI(t0,t1,ta,tb); \ 485 c0 += t0; t2 = t1+((c0<t0)?1:0);\ 486 c1 += t2; c2 += (c1<t2)?1:0; \ 487 c0 += t0; t1 += (c0<t0)?1:0; \ 488 c1 += t1; c2 += (c1<t1)?1:0; \ 489 } 490 491#define sqr_add_c(a,i,c0,c1,c2) { \ 492 BN_ULONG ta=(a)[i]; \ 493 BN_UMULT_LOHI(t1,t2,ta,ta); \ 494 c0 += t1; t2 += (c0<t1)?1:0; \ 495 c1 += t2; c2 += (c1<t2)?1:0; \ 496 } 497 498#define sqr_add_c2(a,i,j,c0,c1,c2) \ 499 mul_add_c2((a)[i],(a)[j],c0,c1,c2) 500 501#elif defined(BN_UMULT_HIGH) 502 503#define mul_add_c(a,b,c0,c1,c2) { \ 504 BN_ULONG ta=(a),tb=(b); \ 505 t1 = ta * tb; \ 506 t2 = BN_UMULT_HIGH(ta,tb); \ 507 c0 += t1; t2 += (c0<t1)?1:0; \ 508 c1 += t2; c2 += (c1<t2)?1:0; \ 509 } 510 511#define mul_add_c2(a,b,c0,c1,c2) { \ 512 BN_ULONG ta=(a),tb=(b),t0; \ 513 t1 = BN_UMULT_HIGH(ta,tb); \ 514 t0 = ta * tb; \ 515 c0 += t0; t2 = t1+((c0<t0)?1:0);\ 516 c1 += t2; c2 += (c1<t2)?1:0; \ 517 c0 += t0; t1 += (c0<t0)?1:0; \ 518 c1 += t1; c2 += (c1<t1)?1:0; \ 519 } 520 521#define sqr_add_c(a,i,c0,c1,c2) { \ 522 BN_ULONG ta=(a)[i]; \ 523 t1 = ta * ta; \ 524 t2 = BN_UMULT_HIGH(ta,ta); \ 525 c0 += t1; t2 += (c0<t1)?1:0; \ 526 c1 += t2; c2 += (c1<t2)?1:0; \ 527 } 528 529#define sqr_add_c2(a,i,j,c0,c1,c2) \ 530 mul_add_c2((a)[i],(a)[j],c0,c1,c2) 531 532#else /* !BN_LLONG */ 533#define mul_add_c(a,b,c0,c1,c2) \ 534 t1=LBITS(a); t2=HBITS(a); \ 535 bl=LBITS(b); bh=HBITS(b); \ 536 mul64(t1,t2,bl,bh); \ 537 c0=(c0+t1)&BN_MASK2; if ((c0) < t1) t2++; \ 538 c1=(c1+t2)&BN_MASK2; if ((c1) < t2) c2++; 539 540#define mul_add_c2(a,b,c0,c1,c2) \ 541 t1=LBITS(a); t2=HBITS(a); \ 542 bl=LBITS(b); bh=HBITS(b); \ 543 mul64(t1,t2,bl,bh); \ 544 if (t2 & BN_TBIT) c2++; \ 545 t2=(t2+t2)&BN_MASK2; \ 546 if (t1 & BN_TBIT) t2++; \ 547 t1=(t1+t1)&BN_MASK2; \ 548 c0=(c0+t1)&BN_MASK2; \ 549 if ((c0 < t1) && (((++t2)&BN_MASK2) == 0)) c2++; \ 550 c1=(c1+t2)&BN_MASK2; if ((c1) < t2) c2++; 551 552#define sqr_add_c(a,i,c0,c1,c2) \ 553 sqr64(t1,t2,(a)[i]); \ 554 c0=(c0+t1)&BN_MASK2; if ((c0) < t1) t2++; \ 555 c1=(c1+t2)&BN_MASK2; if ((c1) < t2) c2++; 556 557#define sqr_add_c2(a,i,j,c0,c1,c2) \ 558 mul_add_c2((a)[i],(a)[j],c0,c1,c2) 559#endif /* !BN_LLONG */ 560 561void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b) 562 { 563#ifdef BN_LLONG 564 BN_ULLONG t; 565#else 566 BN_ULONG bl,bh; 567#endif 568 BN_ULONG t1,t2; 569 BN_ULONG c1,c2,c3; 570 571 c1=0; 572 c2=0; 573 c3=0; 574 mul_add_c(a[0],b[0],c1,c2,c3); 575 r[0]=c1; 576 c1=0; 577 mul_add_c(a[0],b[1],c2,c3,c1); 578 mul_add_c(a[1],b[0],c2,c3,c1); 579 r[1]=c2; 580 c2=0; 581 mul_add_c(a[2],b[0],c3,c1,c2); 582 mul_add_c(a[1],b[1],c3,c1,c2); 583 mul_add_c(a[0],b[2],c3,c1,c2); 584 r[2]=c3; 585 c3=0; 586 mul_add_c(a[0],b[3],c1,c2,c3); 587 mul_add_c(a[1],b[2],c1,c2,c3); 588 mul_add_c(a[2],b[1],c1,c2,c3); 589 mul_add_c(a[3],b[0],c1,c2,c3); 590 r[3]=c1; 591 c1=0; 592 mul_add_c(a[4],b[0],c2,c3,c1); 593 mul_add_c(a[3],b[1],c2,c3,c1); 594 mul_add_c(a[2],b[2],c2,c3,c1); 595 mul_add_c(a[1],b[3],c2,c3,c1); 596 mul_add_c(a[0],b[4],c2,c3,c1); 597 r[4]=c2; 598 c2=0; 599 mul_add_c(a[0],b[5],c3,c1,c2); 600 mul_add_c(a[1],b[4],c3,c1,c2); 601 mul_add_c(a[2],b[3],c3,c1,c2); 602 mul_add_c(a[3],b[2],c3,c1,c2); 603 mul_add_c(a[4],b[1],c3,c1,c2); 604 mul_add_c(a[5],b[0],c3,c1,c2); 605 r[5]=c3; 606 c3=0; 607 mul_add_c(a[6],b[0],c1,c2,c3); 608 mul_add_c(a[5],b[1],c1,c2,c3); 609 mul_add_c(a[4],b[2],c1,c2,c3); 610 mul_add_c(a[3],b[3],c1,c2,c3); 611 mul_add_c(a[2],b[4],c1,c2,c3); 612 mul_add_c(a[1],b[5],c1,c2,c3); 613 mul_add_c(a[0],b[6],c1,c2,c3); 614 r[6]=c1; 615 c1=0; 616 mul_add_c(a[0],b[7],c2,c3,c1); 617 mul_add_c(a[1],b[6],c2,c3,c1); 618 mul_add_c(a[2],b[5],c2,c3,c1); 619 mul_add_c(a[3],b[4],c2,c3,c1); 620 mul_add_c(a[4],b[3],c2,c3,c1); 621 mul_add_c(a[5],b[2],c2,c3,c1); 622 mul_add_c(a[6],b[1],c2,c3,c1); 623 mul_add_c(a[7],b[0],c2,c3,c1); 624 r[7]=c2; 625 c2=0; 626 mul_add_c(a[7],b[1],c3,c1,c2); 627 mul_add_c(a[6],b[2],c3,c1,c2); 628 mul_add_c(a[5],b[3],c3,c1,c2); 629 mul_add_c(a[4],b[4],c3,c1,c2); 630 mul_add_c(a[3],b[5],c3,c1,c2); 631 mul_add_c(a[2],b[6],c3,c1,c2); 632 mul_add_c(a[1],b[7],c3,c1,c2); 633 r[8]=c3; 634 c3=0; 635 mul_add_c(a[2],b[7],c1,c2,c3); 636 mul_add_c(a[3],b[6],c1,c2,c3); 637 mul_add_c(a[4],b[5],c1,c2,c3); 638 mul_add_c(a[5],b[4],c1,c2,c3); 639 mul_add_c(a[6],b[3],c1,c2,c3); 640 mul_add_c(a[7],b[2],c1,c2,c3); 641 r[9]=c1; 642 c1=0; 643 mul_add_c(a[7],b[3],c2,c3,c1); 644 mul_add_c(a[6],b[4],c2,c3,c1); 645 mul_add_c(a[5],b[5],c2,c3,c1); 646 mul_add_c(a[4],b[6],c2,c3,c1); 647 mul_add_c(a[3],b[7],c2,c3,c1); 648 r[10]=c2; 649 c2=0; 650 mul_add_c(a[4],b[7],c3,c1,c2); 651 mul_add_c(a[5],b[6],c3,c1,c2); 652 mul_add_c(a[6],b[5],c3,c1,c2); 653 mul_add_c(a[7],b[4],c3,c1,c2); 654 r[11]=c3; 655 c3=0; 656 mul_add_c(a[7],b[5],c1,c2,c3); 657 mul_add_c(a[6],b[6],c1,c2,c3); 658 mul_add_c(a[5],b[7],c1,c2,c3); 659 r[12]=c1; 660 c1=0; 661 mul_add_c(a[6],b[7],c2,c3,c1); 662 mul_add_c(a[7],b[6],c2,c3,c1); 663 r[13]=c2; 664 c2=0; 665 mul_add_c(a[7],b[7],c3,c1,c2); 666 r[14]=c3; 667 r[15]=c1; 668 } 669 670void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b) 671 { 672#ifdef BN_LLONG 673 BN_ULLONG t; 674#else 675 BN_ULONG bl,bh; 676#endif 677 BN_ULONG t1,t2; 678 BN_ULONG c1,c2,c3; 679 680 c1=0; 681 c2=0; 682 c3=0; 683 mul_add_c(a[0],b[0],c1,c2,c3); 684 r[0]=c1; 685 c1=0; 686 mul_add_c(a[0],b[1],c2,c3,c1); 687 mul_add_c(a[1],b[0],c2,c3,c1); 688 r[1]=c2; 689 c2=0; 690 mul_add_c(a[2],b[0],c3,c1,c2); 691 mul_add_c(a[1],b[1],c3,c1,c2); 692 mul_add_c(a[0],b[2],c3,c1,c2); 693 r[2]=c3; 694 c3=0; 695 mul_add_c(a[0],b[3],c1,c2,c3); 696 mul_add_c(a[1],b[2],c1,c2,c3); 697 mul_add_c(a[2],b[1],c1,c2,c3); 698 mul_add_c(a[3],b[0],c1,c2,c3); 699 r[3]=c1; 700 c1=0; 701 mul_add_c(a[3],b[1],c2,c3,c1); 702 mul_add_c(a[2],b[2],c2,c3,c1); 703 mul_add_c(a[1],b[3],c2,c3,c1); 704 r[4]=c2; 705 c2=0; 706 mul_add_c(a[2],b[3],c3,c1,c2); 707 mul_add_c(a[3],b[2],c3,c1,c2); 708 r[5]=c3; 709 c3=0; 710 mul_add_c(a[3],b[3],c1,c2,c3); 711 r[6]=c1; 712 r[7]=c2; 713 } 714 715void bn_sqr_comba8(BN_ULONG *r, const BN_ULONG *a) 716 { 717#ifdef BN_LLONG 718 BN_ULLONG t,tt; 719#else 720 BN_ULONG bl,bh; 721#endif 722 BN_ULONG t1,t2; 723 BN_ULONG c1,c2,c3; 724 725 c1=0; 726 c2=0; 727 c3=0; 728 sqr_add_c(a,0,c1,c2,c3); 729 r[0]=c1; 730 c1=0; 731 sqr_add_c2(a,1,0,c2,c3,c1); 732 r[1]=c2; 733 c2=0; 734 sqr_add_c(a,1,c3,c1,c2); 735 sqr_add_c2(a,2,0,c3,c1,c2); 736 r[2]=c3; 737 c3=0; 738 sqr_add_c2(a,3,0,c1,c2,c3); 739 sqr_add_c2(a,2,1,c1,c2,c3); 740 r[3]=c1; 741 c1=0; 742 sqr_add_c(a,2,c2,c3,c1); 743 sqr_add_c2(a,3,1,c2,c3,c1); 744 sqr_add_c2(a,4,0,c2,c3,c1); 745 r[4]=c2; 746 c2=0; 747 sqr_add_c2(a,5,0,c3,c1,c2); 748 sqr_add_c2(a,4,1,c3,c1,c2); 749 sqr_add_c2(a,3,2,c3,c1,c2); 750 r[5]=c3; 751 c3=0; 752 sqr_add_c(a,3,c1,c2,c3); 753 sqr_add_c2(a,4,2,c1,c2,c3); 754 sqr_add_c2(a,5,1,c1,c2,c3); 755 sqr_add_c2(a,6,0,c1,c2,c3); 756 r[6]=c1; 757 c1=0; 758 sqr_add_c2(a,7,0,c2,c3,c1); 759 sqr_add_c2(a,6,1,c2,c3,c1); 760 sqr_add_c2(a,5,2,c2,c3,c1); 761 sqr_add_c2(a,4,3,c2,c3,c1); 762 r[7]=c2; 763 c2=0; 764 sqr_add_c(a,4,c3,c1,c2); 765 sqr_add_c2(a,5,3,c3,c1,c2); 766 sqr_add_c2(a,6,2,c3,c1,c2); 767 sqr_add_c2(a,7,1,c3,c1,c2); 768 r[8]=c3; 769 c3=0; 770 sqr_add_c2(a,7,2,c1,c2,c3); 771 sqr_add_c2(a,6,3,c1,c2,c3); 772 sqr_add_c2(a,5,4,c1,c2,c3); 773 r[9]=c1; 774 c1=0; 775 sqr_add_c(a,5,c2,c3,c1); 776 sqr_add_c2(a,6,4,c2,c3,c1); 777 sqr_add_c2(a,7,3,c2,c3,c1); 778 r[10]=c2; 779 c2=0; 780 sqr_add_c2(a,7,4,c3,c1,c2); 781 sqr_add_c2(a,6,5,c3,c1,c2); 782 r[11]=c3; 783 c3=0; 784 sqr_add_c(a,6,c1,c2,c3); 785 sqr_add_c2(a,7,5,c1,c2,c3); 786 r[12]=c1; 787 c1=0; 788 sqr_add_c2(a,7,6,c2,c3,c1); 789 r[13]=c2; 790 c2=0; 791 sqr_add_c(a,7,c3,c1,c2); 792 r[14]=c3; 793 r[15]=c1; 794 } 795 796void bn_sqr_comba4(BN_ULONG *r, const BN_ULONG *a) 797 { 798#ifdef BN_LLONG 799 BN_ULLONG t,tt; 800#else 801 BN_ULONG bl,bh; 802#endif 803 BN_ULONG t1,t2; 804 BN_ULONG c1,c2,c3; 805 806 c1=0; 807 c2=0; 808 c3=0; 809 sqr_add_c(a,0,c1,c2,c3); 810 r[0]=c1; 811 c1=0; 812 sqr_add_c2(a,1,0,c2,c3,c1); 813 r[1]=c2; 814 c2=0; 815 sqr_add_c(a,1,c3,c1,c2); 816 sqr_add_c2(a,2,0,c3,c1,c2); 817 r[2]=c3; 818 c3=0; 819 sqr_add_c2(a,3,0,c1,c2,c3); 820 sqr_add_c2(a,2,1,c1,c2,c3); 821 r[3]=c1; 822 c1=0; 823 sqr_add_c(a,2,c2,c3,c1); 824 sqr_add_c2(a,3,1,c2,c3,c1); 825 r[4]=c2; 826 c2=0; 827 sqr_add_c2(a,3,2,c3,c1,c2); 828 r[5]=c3; 829 c3=0; 830 sqr_add_c(a,3,c1,c2,c3); 831 r[6]=c1; 832 r[7]=c2; 833 } 834 835#ifdef OPENSSL_NO_ASM 836#ifdef OPENSSL_BN_ASM_MONT 837#include <alloca.h> 838/* 839 * This is essentially reference implementation, which may or may not 840 * result in performance improvement. E.g. on IA-32 this routine was 841 * observed to give 40% faster rsa1024 private key operations and 10% 842 * faster rsa4096 ones, while on AMD64 it improves rsa1024 sign only 843 * by 10% and *worsens* rsa4096 sign by 15%. Once again, it's a 844 * reference implementation, one to be used as starting point for 845 * platform-specific assembler. Mentioned numbers apply to compiler 846 * generated code compiled with and without -DOPENSSL_BN_ASM_MONT and 847 * can vary not only from platform to platform, but even for compiler 848 * versions. Assembler vs. assembler improvement coefficients can 849 * [and are known to] differ and are to be documented elsewhere. 850 */ 851int bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np,const BN_ULONG *n0p, int num) 852 { 853 BN_ULONG c0,c1,ml,*tp,n0; 854#ifdef mul64 855 BN_ULONG mh; 856#endif 857 volatile BN_ULONG *vp; 858 int i=0,j; 859 860#if 0 /* template for platform-specific implementation */ 861 if (ap==bp) return bn_sqr_mont(rp,ap,np,n0p,num); 862#endif 863 vp = tp = alloca((num+2)*sizeof(BN_ULONG)); 864 865 n0 = *n0p; 866 867 c0 = 0; 868 ml = bp[0]; 869#ifdef mul64 870 mh = HBITS(ml); 871 ml = LBITS(ml); 872 for (j=0;j<num;++j) 873 mul(tp[j],ap[j],ml,mh,c0); 874#else 875 for (j=0;j<num;++j) 876 mul(tp[j],ap[j],ml,c0); 877#endif 878 879 tp[num] = c0; 880 tp[num+1] = 0; 881 goto enter; 882 883 for(i=0;i<num;i++) 884 { 885 c0 = 0; 886 ml = bp[i]; 887#ifdef mul64 888 mh = HBITS(ml); 889 ml = LBITS(ml); 890 for (j=0;j<num;++j) 891 mul_add(tp[j],ap[j],ml,mh,c0); 892#else 893 for (j=0;j<num;++j) 894 mul_add(tp[j],ap[j],ml,c0); 895#endif 896 c1 = (tp[num] + c0)&BN_MASK2; 897 tp[num] = c1; 898 tp[num+1] = (c1<c0?1:0); 899 enter: 900 c1 = tp[0]; 901 ml = (c1*n0)&BN_MASK2; 902 c0 = 0; 903#ifdef mul64 904 mh = HBITS(ml); 905 ml = LBITS(ml); 906 mul_add(c1,np[0],ml,mh,c0); 907#else 908 mul_add(c1,ml,np[0],c0); 909#endif 910 for(j=1;j<num;j++) 911 { 912 c1 = tp[j]; 913#ifdef mul64 914 mul_add(c1,np[j],ml,mh,c0); 915#else 916 mul_add(c1,ml,np[j],c0); 917#endif 918 tp[j-1] = c1&BN_MASK2; 919 } 920 c1 = (tp[num] + c0)&BN_MASK2; 921 tp[num-1] = c1; 922 tp[num] = tp[num+1] + (c1<c0?1:0); 923 } 924 925 if (tp[num]!=0 || tp[num-1]>=np[num-1]) 926 { 927 c0 = bn_sub_words(rp,tp,np,num); 928 if (tp[num]!=0 || c0==0) 929 { 930 for(i=0;i<num+2;i++) vp[i] = 0; 931 return 1; 932 } 933 } 934 for(i=0;i<num;i++) rp[i] = tp[i], vp[i] = 0; 935 vp[num] = 0; 936 vp[num+1] = 0; 937 return 1; 938 } 939#else 940/* 941 * Return value of 0 indicates that multiplication/convolution was not 942 * performed to signal the caller to fall down to alternative/original 943 * code-path. 944 */ 945int bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np,const BN_ULONG *n0, int num) 946{ return 0; } 947#endif /* OPENSSL_BN_ASM_MONT */ 948#endif 949 950#else /* !BN_MUL_COMBA */ 951 952/* hmm... is it faster just to do a multiply? */ 953#undef bn_sqr_comba4 954void bn_sqr_comba4(BN_ULONG *r, const BN_ULONG *a) 955 { 956 BN_ULONG t[8]; 957 bn_sqr_normal(r,a,4,t); 958 } 959 960#undef bn_sqr_comba8 961void bn_sqr_comba8(BN_ULONG *r, const BN_ULONG *a) 962 { 963 BN_ULONG t[16]; 964 bn_sqr_normal(r,a,8,t); 965 } 966 967void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b) 968 { 969 r[4]=bn_mul_words( &(r[0]),a,4,b[0]); 970 r[5]=bn_mul_add_words(&(r[1]),a,4,b[1]); 971 r[6]=bn_mul_add_words(&(r[2]),a,4,b[2]); 972 r[7]=bn_mul_add_words(&(r[3]),a,4,b[3]); 973 } 974 975void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b) 976 { 977 r[ 8]=bn_mul_words( &(r[0]),a,8,b[0]); 978 r[ 9]=bn_mul_add_words(&(r[1]),a,8,b[1]); 979 r[10]=bn_mul_add_words(&(r[2]),a,8,b[2]); 980 r[11]=bn_mul_add_words(&(r[3]),a,8,b[3]); 981 r[12]=bn_mul_add_words(&(r[4]),a,8,b[4]); 982 r[13]=bn_mul_add_words(&(r[5]),a,8,b[5]); 983 r[14]=bn_mul_add_words(&(r[6]),a,8,b[6]); 984 r[15]=bn_mul_add_words(&(r[7]),a,8,b[7]); 985 } 986 987#ifdef OPENSSL_NO_ASM 988#ifdef OPENSSL_BN_ASM_MONT 989#include <alloca.h> 990int bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np,const BN_ULONG *n0p, int num) 991 { 992 BN_ULONG c0,c1,*tp,n0=*n0p; 993 volatile BN_ULONG *vp; 994 int i=0,j; 995 996 vp = tp = alloca((num+2)*sizeof(BN_ULONG)); 997 998 for(i=0;i<=num;i++) tp[i]=0; 999 1000 for(i=0;i<num;i++) 1001 { 1002 c0 = bn_mul_add_words(tp,ap,num,bp[i]); 1003 c1 = (tp[num] + c0)&BN_MASK2; 1004 tp[num] = c1; 1005 tp[num+1] = (c1<c0?1:0); 1006 1007 c0 = bn_mul_add_words(tp,np,num,tp[0]*n0); 1008 c1 = (tp[num] + c0)&BN_MASK2; 1009 tp[num] = c1; 1010 tp[num+1] += (c1<c0?1:0); 1011 for(j=0;j<=num;j++) tp[j]=tp[j+1]; 1012 } 1013 1014 if (tp[num]!=0 || tp[num-1]>=np[num-1]) 1015 { 1016 c0 = bn_sub_words(rp,tp,np,num); 1017 if (tp[num]!=0 || c0==0) 1018 { 1019 for(i=0;i<num+2;i++) vp[i] = 0; 1020 return 1; 1021 } 1022 } 1023 for(i=0;i<num;i++) rp[i] = tp[i], vp[i] = 0; 1024 vp[num] = 0; 1025 vp[num+1] = 0; 1026 return 1; 1027 } 1028#else 1029int bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np,const BN_ULONG *n0, int num) 1030{ return 0; } 1031#endif /* OPENSSL_BN_ASM_MONT */ 1032#endif 1033 1034#endif /* !BN_MUL_COMBA */ 1035