1/*
2 * Copyright (c) 2000-2001,2011,2014 Apple Inc. All Rights Reserved.
3 *
4 * The contents of this file constitute Original Code as defined in and are
5 * subject to the Apple Public Source License Version 1.2 (the 'License').
6 * You may not use this file except in compliance with the License. Please obtain
7 * a copy of the License at http://www.apple.com/publicsource and read it before
8 * using this file.
9 *
10 * This Original Code and all software distributed under the License are
11 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESS
12 * OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, INCLUDING WITHOUT
13 * LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
14 * PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. Please see the License for the
15 * specific language governing rights and limitations under the License.
16 */
17
18
19/* crypto/bn/bn_asm.c */
20/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
21 * All rights reserved.
22 *
23 * This package is an SSL implementation written
24 * by Eric Young (eay@cryptsoft.com).
25 * The implementation was written so as to conform with Netscapes SSL.
26 *
27 * This library is free for commercial and non-commercial use as long as
28 * the following conditions are aheared to.  The following conditions
29 * apply to all code found in this distribution, be it the RC4, RSA,
30 * lhash, DES, etc., code; not just the SSL code.  The SSL documentation
31 * included with this distribution is covered by the same copyright terms
32 * except that the holder is Tim Hudson (tjh@cryptsoft.com).
33 *
34 * Copyright remains Eric Young's, and as such any Copyright notices in
35 * the code are not to be removed.
36 * If this package is used in a product, Eric Young should be given attribution
37 * as the author of the parts of the library used.
38 * This can be in the form of a textual message at program startup or
39 * in documentation (online or textual) provided with the package.
40 *
41 * Redistribution and use in source and binary forms, with or without
42 * modification, are permitted provided that the following conditions
43 * are met:
44 * 1. Redistributions of source code must retain the copyright
45 *    notice, this list of conditions and the following disclaimer.
46 * 2. Redistributions in binary form must reproduce the above copyright
47 *    notice, this list of conditions and the following disclaimer in the
48 *    documentation and/or other materials provided with the distribution.
49 * 3. All advertising materials mentioning features or use of this software
50 *    must display the following acknowledgement:
51 *    "This product includes cryptographic software written by
52 *     Eric Young (eay@cryptsoft.com)"
53 *    The word 'cryptographic' can be left out if the rouines from the library
54 *    being used are not cryptographic related :-).
55 * 4. If you include any Windows specific code (or a derivative thereof) from
56 *    the apps directory (application code) you must include an acknowledgement:
57 *    "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
58 *
59 * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
62 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69 * SUCH DAMAGE.
70 *
71 * The licence and distribution terms for any publically available version or
72 * derivative of this code cannot be changed.  i.e. this code cannot simply be
73 * copied and put under another distribution licence
74 * [including the GNU Public Licence.]
75 */
76
77#ifndef BN_DEBUG
78# undef NDEBUG /* avoid conflicting definitions */
79# define NDEBUG
80#endif
81
82#include <stdio.h>
83#include <assert.h>
84#include "cryptlib.h"
85#include "bn_lcl.h"
86
87#if defined(BN_LLONG) || defined(BN_UMULT_HIGH)
88
89BN_ULONG bn_mul_add_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
90	{
91	BN_ULONG c1=0;
92
93	assert(num >= 0);
94	if (num <= 0) return(c1);
95
96	while (num&~3)
97		{
98		mul_add(rp[0],ap[0],w,c1);
99		mul_add(rp[1],ap[1],w,c1);
100		mul_add(rp[2],ap[2],w,c1);
101		mul_add(rp[3],ap[3],w,c1);
102		ap+=4; rp+=4; num-=4;
103		}
104	if (num)
105		{
106		mul_add(rp[0],ap[0],w,c1); if (--num==0) return c1;
107		mul_add(rp[1],ap[1],w,c1); if (--num==0) return c1;
108		mul_add(rp[2],ap[2],w,c1); return c1;
109		}
110
111	return(c1);
112	}
113
114BN_ULONG bn_mul_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
115	{
116	BN_ULONG c1=0;
117
118	assert(num >= 0);
119	if (num <= 0) return(c1);
120
121	while (num&~3)
122		{
123		mul(rp[0],ap[0],w,c1);
124		mul(rp[1],ap[1],w,c1);
125		mul(rp[2],ap[2],w,c1);
126		mul(rp[3],ap[3],w,c1);
127		ap+=4; rp+=4; num-=4;
128		}
129	if (num)
130		{
131		mul(rp[0],ap[0],w,c1); if (--num == 0) return c1;
132		mul(rp[1],ap[1],w,c1); if (--num == 0) return c1;
133		mul(rp[2],ap[2],w,c1);
134		}
135	return(c1);
136	}
137
138void bn_sqr_words(BN_ULONG *r, BN_ULONG *a, int n)
139        {
140	assert(n >= 0);
141	if (n <= 0) return;
142	while (n&~3)
143		{
144		sqr(r[0],r[1],a[0]);
145		sqr(r[2],r[3],a[1]);
146		sqr(r[4],r[5],a[2]);
147		sqr(r[6],r[7],a[3]);
148		a+=4; r+=8; n-=4;
149		}
150	if (n)
151		{
152		sqr(r[0],r[1],a[0]); if (--n == 0) return;
153		sqr(r[2],r[3],a[1]); if (--n == 0) return;
154		sqr(r[4],r[5],a[2]);
155		}
156	}
157
158#else /* !(defined(BN_LLONG) || defined(BN_UMULT_HIGH)) */
159
160BN_ULONG bn_mul_add_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
161	{
162	BN_ULONG c=0;
163	BN_ULONG bl,bh;
164
165	assert(num >= 0);
166	if (num <= 0) return((BN_ULONG)0);
167
168	bl=LBITS(w);
169	bh=HBITS(w);
170
171	for (;;)
172		{
173		mul_add(rp[0],ap[0],bl,bh,c);
174		if (--num == 0) break;
175		mul_add(rp[1],ap[1],bl,bh,c);
176		if (--num == 0) break;
177		mul_add(rp[2],ap[2],bl,bh,c);
178		if (--num == 0) break;
179		mul_add(rp[3],ap[3],bl,bh,c);
180		if (--num == 0) break;
181		ap+=4;
182		rp+=4;
183		}
184	return(c);
185	}
186
187BN_ULONG bn_mul_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
188	{
189	BN_ULONG carry=0;
190	BN_ULONG bl,bh;
191
192	assert(num >= 0);
193	if (num <= 0) return((BN_ULONG)0);
194
195	bl=LBITS(w);
196	bh=HBITS(w);
197
198	for (;;)
199		{
200		mul(rp[0],ap[0],bl,bh,carry);
201		if (--num == 0) break;
202		mul(rp[1],ap[1],bl,bh,carry);
203		if (--num == 0) break;
204		mul(rp[2],ap[2],bl,bh,carry);
205		if (--num == 0) break;
206		mul(rp[3],ap[3],bl,bh,carry);
207		if (--num == 0) break;
208		ap+=4;
209		rp+=4;
210		}
211	return(carry);
212	}
213
214void bn_sqr_words(BN_ULONG *r, BN_ULONG *a, int n)
215        {
216	assert(n >= 0);
217	if (n <= 0) return;
218	for (;;)
219		{
220		sqr64(r[0],r[1],a[0]);
221		if (--n == 0) break;
222
223		sqr64(r[2],r[3],a[1]);
224		if (--n == 0) break;
225
226		sqr64(r[4],r[5],a[2]);
227		if (--n == 0) break;
228
229		sqr64(r[6],r[7],a[3]);
230		if (--n == 0) break;
231
232		a+=4;
233		r+=8;
234		}
235	}
236
237#endif /* !(defined(BN_LLONG) || defined(BN_UMULT_HIGH)) */
238
239#if defined(BN_LLONG) && defined(BN_DIV2W)
240
241BN_ULONG bn_div_words(BN_ULONG h, BN_ULONG l, BN_ULONG d)
242	{
243	return((BN_ULONG)(((((BN_ULLONG)h)<<BN_BITS2)|l)/(BN_ULLONG)d));
244	}
245
246#else
247
248/* Divide h-l by d and return the result. */
249/* I need to test this some more :-( */
250BN_ULONG bn_div_words(BN_ULONG h, BN_ULONG l, BN_ULONG d)
251	{
252	BN_ULONG dh,dl,q,ret=0,th,tl,t;
253	int i,count=2;
254
255	if (d == 0) return(BN_MASK2);
256
257	i=BN_num_bits_word(d);
258	if ((i != BN_BITS2) && (h > (BN_ULONG)1<<i))
259		{
260#if !defined(NO_STDIO) && !defined(WIN16)
261		fprintf(stderr,"Division would overflow (%d)\n",i);
262#endif
263		abort();
264		}
265	i=BN_BITS2-i;
266	if (h >= d) h-=d;
267
268	if (i)
269		{
270		d<<=i;
271		h=(h<<i)|(l>>(BN_BITS2-i));
272		l<<=i;
273		}
274	dh=(d&BN_MASK2h)>>BN_BITS4;
275	dl=(d&BN_MASK2l);
276	for (;;)
277		{
278		if ((h>>BN_BITS4) == dh)
279			q=BN_MASK2l;
280		else
281			q=h/dh;
282
283		th=q*dh;
284		tl=dl*q;
285		for (;;)
286			{
287			t=h-th;
288			if ((t&BN_MASK2h) ||
289				((tl) <= (
290					(t<<BN_BITS4)|
291					((l&BN_MASK2h)>>BN_BITS4))))
292				break;
293			q--;
294			th-=dh;
295			tl-=dl;
296			}
297		t=(tl>>BN_BITS4);
298		tl=(tl<<BN_BITS4)&BN_MASK2h;
299		th+=t;
300
301		if (l < tl) th++;
302		l-=tl;
303		if (h < th)
304			{
305			h+=d;
306			q--;
307			}
308		h-=th;
309
310		if (--count == 0) break;
311
312		ret=q<<BN_BITS4;
313		h=((h<<BN_BITS4)|(l>>BN_BITS4))&BN_MASK2;
314		l=(l&BN_MASK2l)<<BN_BITS4;
315		}
316	ret|=q;
317	return(ret);
318	}
319#endif /* !defined(BN_LLONG) && defined(BN_DIV2W) */
320
321#ifdef BN_LLONG
322BN_ULONG bn_add_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
323        {
324	BN_ULLONG ll=0;
325
326	assert(n >= 0);
327	if (n <= 0) return((BN_ULONG)0);
328
329	for (;;)
330		{
331		ll+=(BN_ULLONG)a[0]+b[0];
332		r[0]=(BN_ULONG)ll&BN_MASK2;
333		ll>>=BN_BITS2;
334		if (--n <= 0) break;
335
336		ll+=(BN_ULLONG)a[1]+b[1];
337		r[1]=(BN_ULONG)ll&BN_MASK2;
338		ll>>=BN_BITS2;
339		if (--n <= 0) break;
340
341		ll+=(BN_ULLONG)a[2]+b[2];
342		r[2]=(BN_ULONG)ll&BN_MASK2;
343		ll>>=BN_BITS2;
344		if (--n <= 0) break;
345
346		ll+=(BN_ULLONG)a[3]+b[3];
347		r[3]=(BN_ULONG)ll&BN_MASK2;
348		ll>>=BN_BITS2;
349		if (--n <= 0) break;
350
351		a+=4;
352		b+=4;
353		r+=4;
354		}
355	return((BN_ULONG)ll);
356	}
357#else /* !BN_LLONG */
358BN_ULONG bn_add_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
359        {
360	BN_ULONG c,l,t;
361
362	assert(n >= 0);
363	if (n <= 0) return((BN_ULONG)0);
364
365	c=0;
366	for (;;)
367		{
368		t=a[0];
369		t=(t+c)&BN_MASK2;
370		c=(t < c);
371		l=(t+b[0])&BN_MASK2;
372		c+=(l < t);
373		r[0]=l;
374		if (--n <= 0) break;
375
376		t=a[1];
377		t=(t+c)&BN_MASK2;
378		c=(t < c);
379		l=(t+b[1])&BN_MASK2;
380		c+=(l < t);
381		r[1]=l;
382		if (--n <= 0) break;
383
384		t=a[2];
385		t=(t+c)&BN_MASK2;
386		c=(t < c);
387		l=(t+b[2])&BN_MASK2;
388		c+=(l < t);
389		r[2]=l;
390		if (--n <= 0) break;
391
392		t=a[3];
393		t=(t+c)&BN_MASK2;
394		c=(t < c);
395		l=(t+b[3])&BN_MASK2;
396		c+=(l < t);
397		r[3]=l;
398		if (--n <= 0) break;
399
400		a+=4;
401		b+=4;
402		r+=4;
403		}
404	return((BN_ULONG)c);
405	}
406#endif /* !BN_LLONG */
407
408BN_ULONG bn_sub_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
409        {
410	BN_ULONG t1,t2;
411	int c=0;
412
413	assert(n >= 0);
414	if (n <= 0) return((BN_ULONG)0);
415
416	for (;;)
417		{
418		t1=a[0]; t2=b[0];
419		r[0]=(t1-t2-c)&BN_MASK2;
420		if (t1 != t2) c=(t1 < t2);
421		if (--n <= 0) break;
422
423		t1=a[1]; t2=b[1];
424		r[1]=(t1-t2-c)&BN_MASK2;
425		if (t1 != t2) c=(t1 < t2);
426		if (--n <= 0) break;
427
428		t1=a[2]; t2=b[2];
429		r[2]=(t1-t2-c)&BN_MASK2;
430		if (t1 != t2) c=(t1 < t2);
431		if (--n <= 0) break;
432
433		t1=a[3]; t2=b[3];
434		r[3]=(t1-t2-c)&BN_MASK2;
435		if (t1 != t2) c=(t1 < t2);
436		if (--n <= 0) break;
437
438		a+=4;
439		b+=4;
440		r+=4;
441		}
442	return(c);
443	}
444
445#ifdef BN_MUL_COMBA
446
447#undef bn_mul_comba8
448#undef bn_mul_comba4
449#undef bn_sqr_comba8
450#undef bn_sqr_comba4
451
452/* mul_add_c(a,b,c0,c1,c2)  -- c+=a*b for three word number c=(c2,c1,c0) */
453/* mul_add_c2(a,b,c0,c1,c2) -- c+=2*a*b for three word number c=(c2,c1,c0) */
454/* sqr_add_c(a,i,c0,c1,c2)  -- c+=a[i]^2 for three word number c=(c2,c1,c0) */
455/* sqr_add_c2(a,i,c0,c1,c2) -- c+=2*a[i]*a[j] for three word number c=(c2,c1,c0) */
456
457#ifdef BN_LLONG
458#define mul_add_c(a,b,c0,c1,c2) \
459	t=(BN_ULLONG)a*b; \
460	t1=(BN_ULONG)Lw(t); \
461	t2=(BN_ULONG)Hw(t); \
462	c0=(c0+t1)&BN_MASK2; if ((c0) < t1) t2++; \
463	c1=(c1+t2)&BN_MASK2; if ((c1) < t2) c2++;
464
465#define mul_add_c2(a,b,c0,c1,c2) \
466	t=(BN_ULLONG)a*b; \
467	tt=(t+t)&BN_MASK; \
468	if (tt < t) c2++; \
469	t1=(BN_ULONG)Lw(tt); \
470	t2=(BN_ULONG)Hw(tt); \
471	c0=(c0+t1)&BN_MASK2;  \
472	if ((c0 < t1) && (((++t2)&BN_MASK2) == 0)) c2++; \
473	c1=(c1+t2)&BN_MASK2; if ((c1) < t2) c2++;
474
475#define sqr_add_c(a,i,c0,c1,c2) \
476	t=(BN_ULLONG)a[i]*a[i]; \
477	t1=(BN_ULONG)Lw(t); \
478	t2=(BN_ULONG)Hw(t); \
479	c0=(c0+t1)&BN_MASK2; if ((c0) < t1) t2++; \
480	c1=(c1+t2)&BN_MASK2; if ((c1) < t2) c2++;
481
482#define sqr_add_c2(a,i,j,c0,c1,c2) \
483	mul_add_c2((a)[i],(a)[j],c0,c1,c2)
484
485#elif defined(BN_UMULT_HIGH)
486
487#define mul_add_c(a,b,c0,c1,c2)	{	\
488	BN_ULONG ta=(a),tb=(b);		\
489	t1 = ta * tb;			\
490	t2 = BN_UMULT_HIGH(ta,tb);	\
491	c0 += t1; t2 += (c0<t1)?1:0;	\
492	c1 += t2; c2 += (c1<t2)?1:0;	\
493	}
494
495#define mul_add_c2(a,b,c0,c1,c2) {	\
496	BN_ULONG ta=(a),tb=(b),t0;	\
497	t1 = BN_UMULT_HIGH(ta,tb);	\
498	t0 = ta * tb;			\
499	t2 = t1+t1; c2 += (t2<t1)?1:0;	\
500	t1 = t0+t0; t2 += (t1<t0)?1:0;	\
501	c0 += t1; t2 += (c0<t1)?1:0;	\
502	c1 += t2; c2 += (c1<t2)?1:0;	\
503	}
504
505#define sqr_add_c(a,i,c0,c1,c2)	{	\
506	BN_ULONG ta=(a)[i];		\
507	t1 = ta * ta;			\
508	t2 = BN_UMULT_HIGH(ta,ta);	\
509	c0 += t1; t2 += (c0<t1)?1:0;	\
510	c1 += t2; c2 += (c1<t2)?1:0;	\
511	}
512
513#define sqr_add_c2(a,i,j,c0,c1,c2)	\
514	mul_add_c2((a)[i],(a)[j],c0,c1,c2)
515
516#else /* !BN_LLONG */
517#define mul_add_c(a,b,c0,c1,c2) \
518	t1=LBITS(a); t2=HBITS(a); \
519	bl=LBITS(b); bh=HBITS(b); \
520	mul64(t1,t2,bl,bh); \
521	c0=(c0+t1)&BN_MASK2; if ((c0) < t1) t2++; \
522	c1=(c1+t2)&BN_MASK2; if ((c1) < t2) c2++;
523
524#define mul_add_c2(a,b,c0,c1,c2) \
525	t1=LBITS(a); t2=HBITS(a); \
526	bl=LBITS(b); bh=HBITS(b); \
527	mul64(t1,t2,bl,bh); \
528	if (t2 & BN_TBIT) c2++; \
529	t2=(t2+t2)&BN_MASK2; \
530	if (t1 & BN_TBIT) t2++; \
531	t1=(t1+t1)&BN_MASK2; \
532	c0=(c0+t1)&BN_MASK2;  \
533	if ((c0 < t1) && (((++t2)&BN_MASK2) == 0)) c2++; \
534	c1=(c1+t2)&BN_MASK2; if ((c1) < t2) c2++;
535
536#define sqr_add_c(a,i,c0,c1,c2) \
537	sqr64(t1,t2,(a)[i]); \
538	c0=(c0+t1)&BN_MASK2; if ((c0) < t1) t2++; \
539	c1=(c1+t2)&BN_MASK2; if ((c1) < t2) c2++;
540
541#define sqr_add_c2(a,i,j,c0,c1,c2) \
542	mul_add_c2((a)[i],(a)[j],c0,c1,c2)
543#endif /* !BN_LLONG */
544
545void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
546	{
547#ifdef BN_LLONG
548	BN_ULLONG t;
549#else
550	BN_ULONG bl,bh;
551#endif
552	BN_ULONG t1,t2;
553	BN_ULONG c1,c2,c3;
554
555	c1=0;
556	c2=0;
557	c3=0;
558	mul_add_c(a[0],b[0],c1,c2,c3);
559	r[0]=c1;
560	c1=0;
561	mul_add_c(a[0],b[1],c2,c3,c1);
562	mul_add_c(a[1],b[0],c2,c3,c1);
563	r[1]=c2;
564	c2=0;
565	mul_add_c(a[2],b[0],c3,c1,c2);
566	mul_add_c(a[1],b[1],c3,c1,c2);
567	mul_add_c(a[0],b[2],c3,c1,c2);
568	r[2]=c3;
569	c3=0;
570	mul_add_c(a[0],b[3],c1,c2,c3);
571	mul_add_c(a[1],b[2],c1,c2,c3);
572	mul_add_c(a[2],b[1],c1,c2,c3);
573	mul_add_c(a[3],b[0],c1,c2,c3);
574	r[3]=c1;
575	c1=0;
576	mul_add_c(a[4],b[0],c2,c3,c1);
577	mul_add_c(a[3],b[1],c2,c3,c1);
578	mul_add_c(a[2],b[2],c2,c3,c1);
579	mul_add_c(a[1],b[3],c2,c3,c1);
580	mul_add_c(a[0],b[4],c2,c3,c1);
581	r[4]=c2;
582	c2=0;
583	mul_add_c(a[0],b[5],c3,c1,c2);
584	mul_add_c(a[1],b[4],c3,c1,c2);
585	mul_add_c(a[2],b[3],c3,c1,c2);
586	mul_add_c(a[3],b[2],c3,c1,c2);
587	mul_add_c(a[4],b[1],c3,c1,c2);
588	mul_add_c(a[5],b[0],c3,c1,c2);
589	r[5]=c3;
590	c3=0;
591	mul_add_c(a[6],b[0],c1,c2,c3);
592	mul_add_c(a[5],b[1],c1,c2,c3);
593	mul_add_c(a[4],b[2],c1,c2,c3);
594	mul_add_c(a[3],b[3],c1,c2,c3);
595	mul_add_c(a[2],b[4],c1,c2,c3);
596	mul_add_c(a[1],b[5],c1,c2,c3);
597	mul_add_c(a[0],b[6],c1,c2,c3);
598	r[6]=c1;
599	c1=0;
600	mul_add_c(a[0],b[7],c2,c3,c1);
601	mul_add_c(a[1],b[6],c2,c3,c1);
602	mul_add_c(a[2],b[5],c2,c3,c1);
603	mul_add_c(a[3],b[4],c2,c3,c1);
604	mul_add_c(a[4],b[3],c2,c3,c1);
605	mul_add_c(a[5],b[2],c2,c3,c1);
606	mul_add_c(a[6],b[1],c2,c3,c1);
607	mul_add_c(a[7],b[0],c2,c3,c1);
608	r[7]=c2;
609	c2=0;
610	mul_add_c(a[7],b[1],c3,c1,c2);
611	mul_add_c(a[6],b[2],c3,c1,c2);
612	mul_add_c(a[5],b[3],c3,c1,c2);
613	mul_add_c(a[4],b[4],c3,c1,c2);
614	mul_add_c(a[3],b[5],c3,c1,c2);
615	mul_add_c(a[2],b[6],c3,c1,c2);
616	mul_add_c(a[1],b[7],c3,c1,c2);
617	r[8]=c3;
618	c3=0;
619	mul_add_c(a[2],b[7],c1,c2,c3);
620	mul_add_c(a[3],b[6],c1,c2,c3);
621	mul_add_c(a[4],b[5],c1,c2,c3);
622	mul_add_c(a[5],b[4],c1,c2,c3);
623	mul_add_c(a[6],b[3],c1,c2,c3);
624	mul_add_c(a[7],b[2],c1,c2,c3);
625	r[9]=c1;
626	c1=0;
627	mul_add_c(a[7],b[3],c2,c3,c1);
628	mul_add_c(a[6],b[4],c2,c3,c1);
629	mul_add_c(a[5],b[5],c2,c3,c1);
630	mul_add_c(a[4],b[6],c2,c3,c1);
631	mul_add_c(a[3],b[7],c2,c3,c1);
632	r[10]=c2;
633	c2=0;
634	mul_add_c(a[4],b[7],c3,c1,c2);
635	mul_add_c(a[5],b[6],c3,c1,c2);
636	mul_add_c(a[6],b[5],c3,c1,c2);
637	mul_add_c(a[7],b[4],c3,c1,c2);
638	r[11]=c3;
639	c3=0;
640	mul_add_c(a[7],b[5],c1,c2,c3);
641	mul_add_c(a[6],b[6],c1,c2,c3);
642	mul_add_c(a[5],b[7],c1,c2,c3);
643	r[12]=c1;
644	c1=0;
645	mul_add_c(a[6],b[7],c2,c3,c1);
646	mul_add_c(a[7],b[6],c2,c3,c1);
647	r[13]=c2;
648	c2=0;
649	mul_add_c(a[7],b[7],c3,c1,c2);
650	r[14]=c3;
651	r[15]=c1;
652	}
653
654void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
655	{
656#ifdef BN_LLONG
657	BN_ULLONG t;
658#else
659	BN_ULONG bl,bh;
660#endif
661	BN_ULONG t1,t2;
662	BN_ULONG c1,c2,c3;
663
664	c1=0;
665	c2=0;
666	c3=0;
667	mul_add_c(a[0],b[0],c1,c2,c3);
668	r[0]=c1;
669	c1=0;
670	mul_add_c(a[0],b[1],c2,c3,c1);
671	mul_add_c(a[1],b[0],c2,c3,c1);
672	r[1]=c2;
673	c2=0;
674	mul_add_c(a[2],b[0],c3,c1,c2);
675	mul_add_c(a[1],b[1],c3,c1,c2);
676	mul_add_c(a[0],b[2],c3,c1,c2);
677	r[2]=c3;
678	c3=0;
679	mul_add_c(a[0],b[3],c1,c2,c3);
680	mul_add_c(a[1],b[2],c1,c2,c3);
681	mul_add_c(a[2],b[1],c1,c2,c3);
682	mul_add_c(a[3],b[0],c1,c2,c3);
683	r[3]=c1;
684	c1=0;
685	mul_add_c(a[3],b[1],c2,c3,c1);
686	mul_add_c(a[2],b[2],c2,c3,c1);
687	mul_add_c(a[1],b[3],c2,c3,c1);
688	r[4]=c2;
689	c2=0;
690	mul_add_c(a[2],b[3],c3,c1,c2);
691	mul_add_c(a[3],b[2],c3,c1,c2);
692	r[5]=c3;
693	c3=0;
694	mul_add_c(a[3],b[3],c1,c2,c3);
695	r[6]=c1;
696	r[7]=c2;
697	}
698
699void bn_sqr_comba8(BN_ULONG *r, BN_ULONG *a)
700	{
701#ifdef BN_LLONG
702	BN_ULLONG t,tt;
703#else
704	BN_ULONG bl,bh;
705#endif
706	BN_ULONG t1,t2;
707	BN_ULONG c1,c2,c3;
708
709	c1=0;
710	c2=0;
711	c3=0;
712	sqr_add_c(a,0,c1,c2,c3);
713	r[0]=c1;
714	c1=0;
715	sqr_add_c2(a,1,0,c2,c3,c1);
716	r[1]=c2;
717	c2=0;
718	sqr_add_c(a,1,c3,c1,c2);
719	sqr_add_c2(a,2,0,c3,c1,c2);
720	r[2]=c3;
721	c3=0;
722	sqr_add_c2(a,3,0,c1,c2,c3);
723	sqr_add_c2(a,2,1,c1,c2,c3);
724	r[3]=c1;
725	c1=0;
726	sqr_add_c(a,2,c2,c3,c1);
727	sqr_add_c2(a,3,1,c2,c3,c1);
728	sqr_add_c2(a,4,0,c2,c3,c1);
729	r[4]=c2;
730	c2=0;
731	sqr_add_c2(a,5,0,c3,c1,c2);
732	sqr_add_c2(a,4,1,c3,c1,c2);
733	sqr_add_c2(a,3,2,c3,c1,c2);
734	r[5]=c3;
735	c3=0;
736	sqr_add_c(a,3,c1,c2,c3);
737	sqr_add_c2(a,4,2,c1,c2,c3);
738	sqr_add_c2(a,5,1,c1,c2,c3);
739	sqr_add_c2(a,6,0,c1,c2,c3);
740	r[6]=c1;
741	c1=0;
742	sqr_add_c2(a,7,0,c2,c3,c1);
743	sqr_add_c2(a,6,1,c2,c3,c1);
744	sqr_add_c2(a,5,2,c2,c3,c1);
745	sqr_add_c2(a,4,3,c2,c3,c1);
746	r[7]=c2;
747	c2=0;
748	sqr_add_c(a,4,c3,c1,c2);
749	sqr_add_c2(a,5,3,c3,c1,c2);
750	sqr_add_c2(a,6,2,c3,c1,c2);
751	sqr_add_c2(a,7,1,c3,c1,c2);
752	r[8]=c3;
753	c3=0;
754	sqr_add_c2(a,7,2,c1,c2,c3);
755	sqr_add_c2(a,6,3,c1,c2,c3);
756	sqr_add_c2(a,5,4,c1,c2,c3);
757	r[9]=c1;
758	c1=0;
759	sqr_add_c(a,5,c2,c3,c1);
760	sqr_add_c2(a,6,4,c2,c3,c1);
761	sqr_add_c2(a,7,3,c2,c3,c1);
762	r[10]=c2;
763	c2=0;
764	sqr_add_c2(a,7,4,c3,c1,c2);
765	sqr_add_c2(a,6,5,c3,c1,c2);
766	r[11]=c3;
767	c3=0;
768	sqr_add_c(a,6,c1,c2,c3);
769	sqr_add_c2(a,7,5,c1,c2,c3);
770	r[12]=c1;
771	c1=0;
772	sqr_add_c2(a,7,6,c2,c3,c1);
773	r[13]=c2;
774	c2=0;
775	sqr_add_c(a,7,c3,c1,c2);
776	r[14]=c3;
777	r[15]=c1;
778	}
779
780void bn_sqr_comba4(BN_ULONG *r, BN_ULONG *a)
781	{
782#ifdef BN_LLONG
783	BN_ULLONG t,tt;
784#else
785	BN_ULONG bl,bh;
786#endif
787	BN_ULONG t1,t2;
788	BN_ULONG c1,c2,c3;
789
790	c1=0;
791	c2=0;
792	c3=0;
793	sqr_add_c(a,0,c1,c2,c3);
794	r[0]=c1;
795	c1=0;
796	sqr_add_c2(a,1,0,c2,c3,c1);
797	r[1]=c2;
798	c2=0;
799	sqr_add_c(a,1,c3,c1,c2);
800	sqr_add_c2(a,2,0,c3,c1,c2);
801	r[2]=c3;
802	c3=0;
803	sqr_add_c2(a,3,0,c1,c2,c3);
804	sqr_add_c2(a,2,1,c1,c2,c3);
805	r[3]=c1;
806	c1=0;
807	sqr_add_c(a,2,c2,c3,c1);
808	sqr_add_c2(a,3,1,c2,c3,c1);
809	r[4]=c2;
810	c2=0;
811	sqr_add_c2(a,3,2,c3,c1,c2);
812	r[5]=c3;
813	c3=0;
814	sqr_add_c(a,3,c1,c2,c3);
815	r[6]=c1;
816	r[7]=c2;
817	}
818#else /* !BN_MUL_COMBA */
819
820/* hmm... is it faster just to do a multiply? */
821#undef bn_sqr_comba4
822void bn_sqr_comba4(BN_ULONG *r, BN_ULONG *a)
823	{
824	BN_ULONG t[8];
825	bn_sqr_normal(r,a,4,t);
826	}
827
828#undef bn_sqr_comba8
829void bn_sqr_comba8(BN_ULONG *r, BN_ULONG *a)
830	{
831	BN_ULONG t[16];
832	bn_sqr_normal(r,a,8,t);
833	}
834
835void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
836	{
837	r[4]=bn_mul_words(    &(r[0]),a,4,b[0]);
838	r[5]=bn_mul_add_words(&(r[1]),a,4,b[1]);
839	r[6]=bn_mul_add_words(&(r[2]),a,4,b[2]);
840	r[7]=bn_mul_add_words(&(r[3]),a,4,b[3]);
841	}
842
843void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
844	{
845	r[ 8]=bn_mul_words(    &(r[0]),a,8,b[0]);
846	r[ 9]=bn_mul_add_words(&(r[1]),a,8,b[1]);
847	r[10]=bn_mul_add_words(&(r[2]),a,8,b[2]);
848	r[11]=bn_mul_add_words(&(r[3]),a,8,b[3]);
849	r[12]=bn_mul_add_words(&(r[4]),a,8,b[4]);
850	r[13]=bn_mul_add_words(&(r[5]),a,8,b[5]);
851	r[14]=bn_mul_add_words(&(r[6]),a,8,b[6]);
852	r[15]=bn_mul_add_words(&(r[7]),a,8,b[7]);
853	}
854
855#endif /* !BN_MUL_COMBA */
856