1131702Smbr#!/usr/bin/env perl
2131702Smbr#
3131702Smbr# ====================================================================
4131702Smbr# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
5131702Smbr# project. The module is, however, dual licensed under OpenSSL and
6131702Smbr# CRYPTOGAMS licenses depending on where you obtain it. For further
7131702Smbr# details see http://www.openssl.org/~appro/cryptogams/.
8131702Smbr# ====================================================================
9131702Smbr#
10131702Smbr# Eternal question is what's wrong with compiler generated code? The
11131702Smbr# trick is that it's possible to reduce the number of shifts required
12131702Smbr# to perform rotations by maintaining copy of 32-bit value in upper
13131702Smbr# bits of 64-bit register. Just follow mux2 and shrp instructions...
14131702Smbr# Performance under big-endian OS such as HP-UX is 179MBps*1GHz, which
15131702Smbr# is >50% better than HP C and >2x better than gcc.
16131702Smbr
17131702Smbr$code=<<___;
18131702Smbr.ident  \"sha1-ia64.s, version 1.2\"
19131702Smbr.ident  \"IA-64 ISA artwork by Andy Polyakov <appro\@fy.chalmers.se>\"
20131702Smbr.explicit
21131702Smbr
22131702Smbr___
23131702Smbr
24131702Smbr
25131702Smbrif ($^O eq "hpux") {
26131702Smbr    $ADDP="addp4";
27131702Smbr    for (@ARGV) { $ADDP="add" if (/[\+DD|\-mlp]64/); }
28131702Smbr} else { $ADDP="add"; }
29131702Smbrfor (@ARGV) {	$big_endian=1 if (/\-DB_ENDIAN/);
30131702Smbr		$big_endian=0 if (/\-DL_ENDIAN/);   }
31131702Smbrif (!defined($big_endian))
32131702Smbr	    {	$big_endian=(unpack('L',pack('N',1))==1);   }
33131702Smbr
34131702Smbr#$human=1;
35131702Smbrif ($human) {	# useful for visual code auditing...
36131702Smbr	($A,$B,$C,$D,$E,$T)   = ("A","B","C","D","E","T");
37131702Smbr	($h0,$h1,$h2,$h3,$h4) = ("h0","h1","h2","h3","h4");
38131702Smbr	($K_00_19, $K_20_39, $K_40_59, $K_60_79) =
39131702Smbr	    (	"K_00_19","K_20_39","K_40_59","K_60_79"	);
40131702Smbr	@X= (	"X0", "X1", "X2", "X3", "X4", "X5", "X6", "X7",
41131702Smbr		"X8", "X9","X10","X11","X12","X13","X14","X15"	);
42131702Smbr}
43131702Smbrelse {
44131702Smbr	($A,$B,$C,$D,$E,$T)   = ("loc0","loc1","loc2","loc3","loc4","loc5");
45131702Smbr	($h0,$h1,$h2,$h3,$h4) = ("loc6","loc7","loc8","loc9","loc10");
46131702Smbr	($K_00_19, $K_20_39, $K_40_59, $K_60_79) =
47131702Smbr	    (	"r14", "r15", "loc11", "loc12"	);
48131702Smbr	@X= (	"r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
49131702Smbr		"r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"	);
50131702Smbr}
51131702Smbr
52131702Smbrsub BODY_00_15 {
53131702Smbrlocal	*code=shift;
54131702Smbrlocal	($i,$a,$b,$c,$d,$e,$f)=@_;
55131702Smbr
56131702Smbr$code.=<<___ if ($i==0);
57131702Smbr{ .mmi;	ld1	$X[$i&0xf]=[inp],2	    // MSB
58131702Smbr	ld1	tmp2=[tmp3],2		};;
59131702Smbr{ .mmi;	ld1	tmp0=[inp],2
60131702Smbr	ld1	tmp4=[tmp3],2		    // LSB
61131702Smbr	dep	$X[$i&0xf]=$X[$i&0xf],tmp2,8,8	};;
62131702Smbr___
63131702Smbrif ($i<15) {
64131702Smbr	$code.=<<___;
65131702Smbr{ .mmi;	ld1	$X[($i+1)&0xf]=[inp],2	    // +1
66131702Smbr	dep	tmp1=tmp0,tmp4,8,8	};;
67131702Smbr{ .mmi;	ld1	tmp2=[tmp3],2		    // +1
68131702Smbr	and	tmp4=$c,$b
69131702Smbr	dep	$X[$i&0xf]=$X[$i&0xf],tmp1,16,16	} //;;
70131702Smbr{ .mmi;	andcm	tmp1=$d,$b
71131702Smbr	add	tmp0=$e,$K_00_19
72131702Smbr	dep.z	tmp5=$a,5,27		};; // a<<5
73131702Smbr{ .mmi;	or	tmp4=tmp4,tmp1		    // F_00_19(b,c,d)=(b&c)|(~b&d)
74131702Smbr	add	$f=tmp0,$X[$i&0xf]	    // f=xi+e+K_00_19
75131702Smbr	extr.u	tmp1=$a,27,5		};; // a>>27
76131702Smbr{ .mmi;	ld1	tmp0=[inp],2		    // +1
77131702Smbr	add	$f=$f,tmp4		    // f+=F_00_19(b,c,d)
78131702Smbr	shrp	$b=tmp6,tmp6,2		}   // b=ROTATE(b,30)
79131702Smbr{ .mmi;	ld1	tmp4=[tmp3],2		    // +1
80131702Smbr	or	tmp5=tmp1,tmp5		    // ROTATE(a,5)
81131702Smbr	mux2	tmp6=$a,0x44		};; // see b in next iteration
82131702Smbr{ .mii;	add	$f=$f,tmp5		    // f+=ROTATE(a,5)
83131702Smbr	dep	$X[($i+1)&0xf]=$X[($i+1)&0xf],tmp2,8,8	// +1
84131702Smbr	mux2	$X[$i&0xf]=$X[$i&0xf],0x44	} //;;
85131702Smbr
86131702Smbr___
87131702Smbr	}
88131702Smbrelse	{
89131702Smbr	$code.=<<___;
90131702Smbr{ .mii;	and	tmp3=$c,$b
91131702Smbr	dep	tmp1=tmp0,tmp4,8,8;;
92131702Smbr	dep	$X[$i&0xf]=$X[$i&0xf],tmp1,16,16	} //;;
93131702Smbr{ .mmi;	andcm	tmp1=$d,$b
94131702Smbr	add	tmp0=$e,$K_00_19
95131702Smbr	dep.z	tmp5=$a,5,27		};; // a<<5
96131702Smbr{ .mmi;	or	tmp4=tmp3,tmp1		    // F_00_19(b,c,d)=(b&c)|(~b&d)
97131702Smbr	add	$f=tmp0,$X[$i&0xf]	    // f=xi+e+K_00_19
98131702Smbr	extr.u	tmp1=$a,27,5		}   // a>>27
99131702Smbr{ .mmi;	xor	tmp2=$X[($i+0+1)&0xf],$X[($i+2+1)&0xf]	// +1
100131702Smbr	xor	tmp3=$X[($i+8+1)&0xf],$X[($i+13+1)&0xf] // +1
101131702Smbr	nop.i	0			};;
102131702Smbr{ .mmi;	add	$f=$f,tmp4		    // f+=F_00_19(b,c,d)
103131702Smbr	xor	tmp2=tmp2,tmp3		    // +1
104131702Smbr	shrp	$b=tmp6,tmp6,2		}   // b=ROTATE(b,30)
105131702Smbr{ .mmi; or	tmp1=tmp1,tmp5		    // ROTATE(a,5)
106131702Smbr	mux2	tmp6=$a,0x44		};; // see b in next iteration
107{ .mii;	add	$f=$f,tmp1		    // f+=ROTATE(a,5)
108	shrp	$e=tmp2,tmp2,31		    // f+1=ROTATE(x[0]^x[2]^x[8]^x[13],1)
109	mux2	$X[$i&0xf]=$X[$i&0xf],0x44  };;
110
111___
112	}
113}
114
115sub BODY_16_19 {
116local	*code=shift;
117local	($i,$a,$b,$c,$d,$e,$f)=@_;
118
119$code.=<<___;
120{ .mmi;	mov	$X[$i&0xf]=$f		    // Xupdate
121	and	tmp0=$c,$b
122	dep.z	tmp5=$a,5,27		}   // a<<5
123{ .mmi;	andcm	tmp1=$d,$b
124	add	tmp4=$e,$K_00_19	};;
125{ .mmi;	or	tmp0=tmp0,tmp1		    // F_00_19(b,c,d)=(b&c)|(~b&d)
126	add	$f=$f,tmp4		    // f+=e+K_00_19
127	extr.u	tmp1=$a,27,5		}   // a>>27
128{ .mmi;	xor	tmp2=$X[($i+0+1)&0xf],$X[($i+2+1)&0xf]	// +1
129	xor	tmp3=$X[($i+8+1)&0xf],$X[($i+13+1)&0xf]	// +1
130	nop.i	0			};;
131{ .mmi;	add	$f=$f,tmp0		    // f+=F_00_19(b,c,d)
132	xor	tmp2=tmp2,tmp3		    // +1
133	shrp	$b=tmp6,tmp6,2		}   // b=ROTATE(b,30)
134{ .mmi;	or	tmp1=tmp1,tmp5		    // ROTATE(a,5)
135	mux2	tmp6=$a,0x44		};; // see b in next iteration
136{ .mii;	add	$f=$f,tmp1		    // f+=ROTATE(a,5)
137	shrp	$e=tmp2,tmp2,31		    // f+1=ROTATE(x[0]^x[2]^x[8]^x[13],1)
138	nop.i	0			};;
139
140___
141}
142
143sub BODY_20_39 {
144local	*code=shift;
145local	($i,$a,$b,$c,$d,$e,$f,$Konst)=@_;
146	$Konst = $K_20_39 if (!defined($Konst));
147
148if ($i<79) {
149$code.=<<___;
150{ .mib;	mov	$X[$i&0xf]=$f		    // Xupdate
151	dep.z	tmp5=$a,5,27		}   // a<<5
152{ .mib;	xor	tmp0=$c,$b
153	add	tmp4=$e,$Konst		};;
154{ .mmi;	xor	tmp0=tmp0,$d		    // F_20_39(b,c,d)=b^c^d
155	add	$f=$f,tmp4		    // f+=e+K_20_39
156	extr.u	tmp1=$a,27,5		}   // a>>27
157{ .mmi;	xor	tmp2=$X[($i+0+1)&0xf],$X[($i+2+1)&0xf]	// +1
158	xor	tmp3=$X[($i+8+1)&0xf],$X[($i+13+1)&0xf]	// +1
159	nop.i	0			};;
160{ .mmi;	add	$f=$f,tmp0		    // f+=F_20_39(b,c,d)
161	xor	tmp2=tmp2,tmp3		    // +1
162	shrp	$b=tmp6,tmp6,2		}   // b=ROTATE(b,30)
163{ .mmi;	or	tmp1=tmp1,tmp5		    // ROTATE(a,5)
164	mux2	tmp6=$a,0x44		};; // see b in next iteration
165{ .mii;	add	$f=$f,tmp1		    // f+=ROTATE(a,5)
166	shrp	$e=tmp2,tmp2,31		    // f+1=ROTATE(x[0]^x[2]^x[8]^x[13],1)
167	nop.i	0			};;
168
169___
170}
171else {
172$code.=<<___;
173{ .mib;	mov	$X[$i&0xf]=$f		    // Xupdate
174	dep.z	tmp5=$a,5,27		}   // a<<5
175{ .mib;	xor	tmp0=$c,$b
176	add	tmp4=$e,$Konst		};;
177{ .mib;	xor	tmp0=tmp0,$d		    // F_20_39(b,c,d)=b^c^d
178	extr.u	tmp1=$a,27,5		}   // a>>27
179{ .mib;	add	$f=$f,tmp4		    // f+=e+K_20_39
180	add	$h1=$h1,$a		};; // wrap up
181{ .mmi;	add	$f=$f,tmp0		    // f+=F_20_39(b,c,d)
182	shrp	$b=tmp6,tmp6,2		}   // b=ROTATE(b,30) ;;?
183{ .mmi;	or	tmp1=tmp1,tmp5		    // ROTATE(a,5)
184	add	$h3=$h3,$c		};; // wrap up
185{ .mib;	add	tmp3=1,inp		    // used in unaligned codepath
186	add	$f=$f,tmp1		}   // f+=ROTATE(a,5)
187{ .mib;	add	$h2=$h2,$b		    // wrap up
188	add	$h4=$h4,$d		};; // wrap up
189
190___
191}
192}
193
194sub BODY_40_59 {
195local	*code=shift;
196local	($i,$a,$b,$c,$d,$e,$f)=@_;
197
198$code.=<<___;
199{ .mmi;	mov	$X[$i&0xf]=$f		    // Xupdate
200	and	tmp0=$c,$b
201	dep.z	tmp5=$a,5,27		}   // a<<5
202{ .mmi;	and	tmp1=$d,$b
203	add	tmp4=$e,$K_40_59	};;
204{ .mmi;	or	tmp0=tmp0,tmp1		    // (b&c)|(b&d)
205	add	$f=$f,tmp4		    // f+=e+K_40_59
206	extr.u	tmp1=$a,27,5		}   // a>>27
207{ .mmi;	and	tmp4=$c,$d
208	xor	tmp2=$X[($i+0+1)&0xf],$X[($i+2+1)&0xf]	// +1
209	xor	tmp3=$X[($i+8+1)&0xf],$X[($i+13+1)&0xf]	// +1
210	};;
211{ .mmi;	or	tmp1=tmp1,tmp5		    // ROTATE(a,5)
212	xor	tmp2=tmp2,tmp3		    // +1
213	shrp	$b=tmp6,tmp6,2		}   // b=ROTATE(b,30)
214{ .mmi;	or	tmp0=tmp0,tmp4		    // F_40_59(b,c,d)=(b&c)|(b&d)|(c&d)
215	mux2	tmp6=$a,0x44		};; // see b in next iteration
216{ .mii;	add	$f=$f,tmp0		    // f+=F_40_59(b,c,d)
217	shrp	$e=tmp2,tmp2,31;;	    // f+1=ROTATE(x[0]^x[2]^x[8]^x[13],1)
218	add	$f=$f,tmp1		};; // f+=ROTATE(a,5)
219
220___
221}
222sub BODY_60_79	{ &BODY_20_39(@_,$K_60_79); }
223
224$code.=<<___;
225.text
226
227tmp0=r8;
228tmp1=r9;
229tmp2=r10;
230tmp3=r11;
231ctx=r32;	// in0
232inp=r33;	// in1
233
234// void sha1_block_data_order(SHA_CTX *c,const void *p,size_t num);
235.global	sha1_block_data_order#
236.proc	sha1_block_data_order#
237.align	32
238sha1_block_data_order:
239	.prologue
240{ .mmi;	alloc	tmp1=ar.pfs,3,15,0,0
241	$ADDP	tmp0=4,ctx
242	.save	ar.lc,r3
243	mov	r3=ar.lc		}
244{ .mmi;	$ADDP	ctx=0,ctx
245	$ADDP	inp=0,inp
246	mov	r2=pr			};;
247tmp4=in2;
248tmp5=loc13;
249tmp6=loc14;
250	.body
251{ .mlx;	ld4	$h0=[ctx],8
252	movl	$K_00_19=0x5a827999	}
253{ .mlx;	ld4	$h1=[tmp0],8
254	movl	$K_20_39=0x6ed9eba1	};;
255{ .mlx;	ld4	$h2=[ctx],8
256	movl	$K_40_59=0x8f1bbcdc	}
257{ .mlx;	ld4	$h3=[tmp0]
258	movl	$K_60_79=0xca62c1d6	};;
259{ .mmi;	ld4	$h4=[ctx],-16
260	add	in2=-1,in2		    // adjust num for ar.lc
261	mov	ar.ec=1			};;
262{ .mmi;	nop.m	0
263	add	tmp3=1,inp
264	mov	ar.lc=in2		};; // brp.loop.imp: too far
265
266.Ldtop:
267{ .mmi;	mov	$A=$h0
268	mov	$B=$h1
269	mux2	tmp6=$h1,0x44		}
270{ .mmi;	mov	$C=$h2
271	mov	$D=$h3
272	mov	$E=$h4			};;
273
274___
275
276{ my $i,@V=($A,$B,$C,$D,$E,$T);
277
278	for($i=0;$i<16;$i++)	{ &BODY_00_15(\$code,$i,@V); unshift(@V,pop(@V)); }
279	for(;$i<20;$i++)	{ &BODY_16_19(\$code,$i,@V); unshift(@V,pop(@V)); }
280	for(;$i<40;$i++)	{ &BODY_20_39(\$code,$i,@V); unshift(@V,pop(@V)); }
281	for(;$i<60;$i++)	{ &BODY_40_59(\$code,$i,@V); unshift(@V,pop(@V)); }
282	for(;$i<80;$i++)	{ &BODY_60_79(\$code,$i,@V); unshift(@V,pop(@V)); }
283
284	(($V[5] eq $D) and ($V[0] eq $E)) or die;	# double-check
285}
286
287$code.=<<___;
288{ .mmb;	add	$h0=$h0,$E
289	nop.m	0
290	br.ctop.dptk.many	.Ldtop	};;
291.Ldend:
292{ .mmi;	add	tmp0=4,ctx
293	mov	ar.lc=r3		};;
294{ .mmi;	st4	[ctx]=$h0,8
295	st4	[tmp0]=$h1,8		};;
296{ .mmi;	st4	[ctx]=$h2,8
297	st4	[tmp0]=$h3		};;
298{ .mib;	st4	[ctx]=$h4,-16
299	mov	pr=r2,0x1ffff
300	br.ret.sptk.many	b0	};;
301.endp	sha1_block_data_order#
302stringz	"SHA1 block transform for IA64, CRYPTOGAMS by <appro\@openssl.org>"
303___
304
305$output=shift and open STDOUT,">$output";
306print $code;
307