1#!/usr/bin/perl -w
2#
3# MD5 optimized for AMD64.
4#
5# Author: Marc Bevand <bevand_m (at) epita.fr>
6# Licence: I hereby disclaim the copyright on this code and place it
7# in the public domain.
8#
9
10use strict;
11
12my $code;
13
14# round1_step() does:
15#   dst = x + ((dst + F(x,y,z) + X[k] + T_i) <<< s)
16#   %r10d = X[k_next]
17#   %r11d = z' (copy of z for the next step)
18# Each round1_step() takes about 5.71 clocks (9 instructions, 1.58 IPC)
19sub round1_step
20{
21    my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_;
22    $T_i = unpack("l",pack("l", hex($T_i))); # convert to 32-bit signed decimal
23    $code .= " mov	0*4(%rsi),	%r10d		/* (NEXT STEP) X[0] */\n" if ($pos == -1);
24    $code .= " mov	%edx,		%r11d		/* (NEXT STEP) z' = %edx */\n" if ($pos == -1);
25    $code .= <<EOF;
26	xor	$y,		%r11d		/* y ^ ... */
27	lea	$T_i($dst,%r10d),$dst		/* Const + dst + ... */
28	and	$x,		%r11d		/* x & ... */
29	xor	$z,		%r11d		/* z ^ ... */
30	mov	$k_next*4(%rsi),%r10d		/* (NEXT STEP) X[$k_next] */
31	add	%r11d,		$dst		/* dst += ... */
32	rol	\$$s,		$dst		/* dst <<< s */
33	mov	$y,		%r11d		/* (NEXT STEP) z' = $y */
34	add	$x,		$dst		/* dst += x */
35EOF
36}
37
38# round2_step() does:
39#   dst = x + ((dst + G(x,y,z) + X[k] + T_i) <<< s)
40#   %r10d = X[k_next]
41#   %r11d = y' (copy of y for the next step)
42# Each round2_step() takes about 6.22 clocks (9 instructions, 1.45 IPC)
43sub round2_step
44{
45    my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_;
46    $T_i = unpack("l",pack("l", hex($T_i))); # convert to 32-bit signed decimal
47    $code .= " mov	1*4(%rsi),	%r10d		/* (NEXT STEP) X[1] */\n" if ($pos == -1);
48    $code .= " mov	%ecx,		%r11d		/* (NEXT STEP) y' = %ecx */\n" if ($pos == -1);
49    $code .= <<EOF;
50	xor	$x,		%r11d		/* x ^ ... */
51	lea	$T_i($dst,%r10d),$dst		/* Const + dst + ... */
52	and	$z,		%r11d		/* z & ... */
53	xor	$y,		%r11d		/* y ^ ... */
54	mov	$k_next*4(%rsi),%r10d		/* (NEXT STEP) X[$k_next] */
55	add	%r11d,		$dst		/* dst += ... */
56	rol	\$$s,		$dst		/* dst <<< s */
57	mov	$x,		%r11d		/* (NEXT STEP) y' = $x */
58	add	$x,		$dst		/* dst += x */
59EOF
60}
61
62# round3_step() does:
63#   dst = x + ((dst + H(x,y,z) + X[k] + T_i) <<< s)
64#   %r10d = X[k_next]
65#   %r11d = y' (copy of y for the next step)
66# Each round3_step() takes about 4.26 clocks (8 instructions, 1.88 IPC)
67sub round3_step
68{
69    my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_;
70    $T_i = unpack("l",pack("l", hex($T_i))); # convert to 32-bit signed decimal
71    $code .= " mov	5*4(%rsi),	%r10d		/* (NEXT STEP) X[5] */\n" if ($pos == -1);
72    $code .= " mov	%ecx,		%r11d		/* (NEXT STEP) y' = %ecx */\n" if ($pos == -1);
73    $code .= <<EOF;
74	lea	$T_i($dst,%r10d),$dst		/* Const + dst + ... */
75	mov	$k_next*4(%rsi),%r10d		/* (NEXT STEP) X[$k_next] */
76	xor	$z,		%r11d		/* z ^ ... */
77	xor	$x,		%r11d		/* x ^ ... */
78	add	%r11d,		$dst		/* dst += ... */
79	rol	\$$s,		$dst		/* dst <<< s */
80	mov	$x,		%r11d		/* (NEXT STEP) y' = $x */
81	add	$x,		$dst		/* dst += x */
82EOF
83}
84
85# round4_step() does:
86#   dst = x + ((dst + I(x,y,z) + X[k] + T_i) <<< s)
87#   %r10d = X[k_next]
88#   %r11d = not z' (copy of not z for the next step)
89# Each round4_step() takes about 5.27 clocks (9 instructions, 1.71 IPC)
90sub round4_step
91{
92    my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_;
93    $T_i = unpack("l",pack("l", hex($T_i))); # convert to 32-bit signed decimal
94    $code .= " mov	0*4(%rsi),	%r10d		/* (NEXT STEP) X[0] */\n" if ($pos == -1);
95    $code .= " mov	\$0xffffffff,	%r11d\n" if ($pos == -1);
96    $code .= " xor	%edx,		%r11d		/* (NEXT STEP) not z' = not %edx*/\n"
97    if ($pos == -1);
98    $code .= <<EOF;
99	lea	$T_i($dst,%r10d),$dst		/* Const + dst + ... */
100	or	$x,		%r11d		/* x | ... */
101	xor	$y,		%r11d		/* y ^ ... */
102	add	%r11d,		$dst		/* dst += ... */
103	mov	$k_next*4(%rsi),%r10d		/* (NEXT STEP) X[$k_next] */
104	mov	\$0xffffffff,	%r11d
105	rol	\$$s,		$dst		/* dst <<< s */
106	xor	$y,		%r11d		/* (NEXT STEP) not z' = not $y */
107	add	$x,		$dst		/* dst += x */
108EOF
109}
110
111my $output = shift;
112open STDOUT,"| $^X ../perlasm/x86_64-xlate.pl $output";
113
114$code .= <<EOF;
115.text
116.align 16
117
118.globl md5_block_asm_data_order
119.type md5_block_asm_data_order,\@function,3
120md5_block_asm_data_order:
121	push	%rbp
122	push	%rbx
123	push	%r14
124	push	%r15
125
126	# rdi = arg #1 (ctx, MD5_CTX pointer)
127	# rsi = arg #2 (ptr, data pointer)
128	# rdx = arg #3 (nbr, number of 16-word blocks to process)
129	mov	%rdi,		%rbp	# rbp = ctx
130	shl	\$6,		%rdx	# rdx = nbr in bytes
131	lea	(%rsi,%rdx),	%rdi	# rdi = end
132	mov	0*4(%rbp),	%eax	# eax = ctx->A
133	mov	1*4(%rbp),	%ebx	# ebx = ctx->B
134	mov	2*4(%rbp),	%ecx	# ecx = ctx->C
135	mov	3*4(%rbp),	%edx	# edx = ctx->D
136	# end is 'rdi'
137	# ptr is 'rsi'
138	# A is 'eax'
139	# B is 'ebx'
140	# C is 'ecx'
141	# D is 'edx'
142
143	cmp	%rdi,		%rsi		# cmp end with ptr
144	je	.Lend				# jmp if ptr == end
145
146	# BEGIN of loop over 16-word blocks
147.Lloop:	# save old values of A, B, C, D
148	mov	%eax,		%r8d
149	mov	%ebx,		%r9d
150	mov	%ecx,		%r14d
151	mov	%edx,		%r15d
152EOF
153round1_step(-1,'%eax','%ebx','%ecx','%edx', '1','0xd76aa478', '7');
154round1_step( 0,'%edx','%eax','%ebx','%ecx', '2','0xe8c7b756','12');
155round1_step( 0,'%ecx','%edx','%eax','%ebx', '3','0x242070db','17');
156round1_step( 0,'%ebx','%ecx','%edx','%eax', '4','0xc1bdceee','22');
157round1_step( 0,'%eax','%ebx','%ecx','%edx', '5','0xf57c0faf', '7');
158round1_step( 0,'%edx','%eax','%ebx','%ecx', '6','0x4787c62a','12');
159round1_step( 0,'%ecx','%edx','%eax','%ebx', '7','0xa8304613','17');
160round1_step( 0,'%ebx','%ecx','%edx','%eax', '8','0xfd469501','22');
161round1_step( 0,'%eax','%ebx','%ecx','%edx', '9','0x698098d8', '7');
162round1_step( 0,'%edx','%eax','%ebx','%ecx','10','0x8b44f7af','12');
163round1_step( 0,'%ecx','%edx','%eax','%ebx','11','0xffff5bb1','17');
164round1_step( 0,'%ebx','%ecx','%edx','%eax','12','0x895cd7be','22');
165round1_step( 0,'%eax','%ebx','%ecx','%edx','13','0x6b901122', '7');
166round1_step( 0,'%edx','%eax','%ebx','%ecx','14','0xfd987193','12');
167round1_step( 0,'%ecx','%edx','%eax','%ebx','15','0xa679438e','17');
168round1_step( 1,'%ebx','%ecx','%edx','%eax', '0','0x49b40821','22');
169
170round2_step(-1,'%eax','%ebx','%ecx','%edx', '6','0xf61e2562', '5');
171round2_step( 0,'%edx','%eax','%ebx','%ecx','11','0xc040b340', '9');
172round2_step( 0,'%ecx','%edx','%eax','%ebx', '0','0x265e5a51','14');
173round2_step( 0,'%ebx','%ecx','%edx','%eax', '5','0xe9b6c7aa','20');
174round2_step( 0,'%eax','%ebx','%ecx','%edx','10','0xd62f105d', '5');
175round2_step( 0,'%edx','%eax','%ebx','%ecx','15', '0x2441453', '9');
176round2_step( 0,'%ecx','%edx','%eax','%ebx', '4','0xd8a1e681','14');
177round2_step( 0,'%ebx','%ecx','%edx','%eax', '9','0xe7d3fbc8','20');
178round2_step( 0,'%eax','%ebx','%ecx','%edx','14','0x21e1cde6', '5');
179round2_step( 0,'%edx','%eax','%ebx','%ecx', '3','0xc33707d6', '9');
180round2_step( 0,'%ecx','%edx','%eax','%ebx', '8','0xf4d50d87','14');
181round2_step( 0,'%ebx','%ecx','%edx','%eax','13','0x455a14ed','20');
182round2_step( 0,'%eax','%ebx','%ecx','%edx', '2','0xa9e3e905', '5');
183round2_step( 0,'%edx','%eax','%ebx','%ecx', '7','0xfcefa3f8', '9');
184round2_step( 0,'%ecx','%edx','%eax','%ebx','12','0x676f02d9','14');
185round2_step( 1,'%ebx','%ecx','%edx','%eax', '0','0x8d2a4c8a','20');
186
187round3_step(-1,'%eax','%ebx','%ecx','%edx', '8','0xfffa3942', '4');
188round3_step( 0,'%edx','%eax','%ebx','%ecx','11','0x8771f681','11');
189round3_step( 0,'%ecx','%edx','%eax','%ebx','14','0x6d9d6122','16');
190round3_step( 0,'%ebx','%ecx','%edx','%eax', '1','0xfde5380c','23');
191round3_step( 0,'%eax','%ebx','%ecx','%edx', '4','0xa4beea44', '4');
192round3_step( 0,'%edx','%eax','%ebx','%ecx', '7','0x4bdecfa9','11');
193round3_step( 0,'%ecx','%edx','%eax','%ebx','10','0xf6bb4b60','16');
194round3_step( 0,'%ebx','%ecx','%edx','%eax','13','0xbebfbc70','23');
195round3_step( 0,'%eax','%ebx','%ecx','%edx', '0','0x289b7ec6', '4');
196round3_step( 0,'%edx','%eax','%ebx','%ecx', '3','0xeaa127fa','11');
197round3_step( 0,'%ecx','%edx','%eax','%ebx', '6','0xd4ef3085','16');
198round3_step( 0,'%ebx','%ecx','%edx','%eax', '9', '0x4881d05','23');
199round3_step( 0,'%eax','%ebx','%ecx','%edx','12','0xd9d4d039', '4');
200round3_step( 0,'%edx','%eax','%ebx','%ecx','15','0xe6db99e5','11');
201round3_step( 0,'%ecx','%edx','%eax','%ebx', '2','0x1fa27cf8','16');
202round3_step( 1,'%ebx','%ecx','%edx','%eax', '0','0xc4ac5665','23');
203
204round4_step(-1,'%eax','%ebx','%ecx','%edx', '7','0xf4292244', '6');
205round4_step( 0,'%edx','%eax','%ebx','%ecx','14','0x432aff97','10');
206round4_step( 0,'%ecx','%edx','%eax','%ebx', '5','0xab9423a7','15');
207round4_step( 0,'%ebx','%ecx','%edx','%eax','12','0xfc93a039','21');
208round4_step( 0,'%eax','%ebx','%ecx','%edx', '3','0x655b59c3', '6');
209round4_step( 0,'%edx','%eax','%ebx','%ecx','10','0x8f0ccc92','10');
210round4_step( 0,'%ecx','%edx','%eax','%ebx', '1','0xffeff47d','15');
211round4_step( 0,'%ebx','%ecx','%edx','%eax', '8','0x85845dd1','21');
212round4_step( 0,'%eax','%ebx','%ecx','%edx','15','0x6fa87e4f', '6');
213round4_step( 0,'%edx','%eax','%ebx','%ecx', '6','0xfe2ce6e0','10');
214round4_step( 0,'%ecx','%edx','%eax','%ebx','13','0xa3014314','15');
215round4_step( 0,'%ebx','%ecx','%edx','%eax', '4','0x4e0811a1','21');
216round4_step( 0,'%eax','%ebx','%ecx','%edx','11','0xf7537e82', '6');
217round4_step( 0,'%edx','%eax','%ebx','%ecx', '2','0xbd3af235','10');
218round4_step( 0,'%ecx','%edx','%eax','%ebx', '9','0x2ad7d2bb','15');
219round4_step( 1,'%ebx','%ecx','%edx','%eax', '0','0xeb86d391','21');
220$code .= <<EOF;
221	# add old values of A, B, C, D
222	add	%r8d,	%eax
223	add	%r9d,	%ebx
224	add	%r14d,	%ecx
225	add	%r15d,	%edx
226
227	# loop control
228	add	\$64,		%rsi		# ptr += 64
229	cmp	%rdi,		%rsi		# cmp end with ptr
230	jb	.Lloop				# jmp if ptr < end
231	# END of loop over 16-word blocks
232
233.Lend:
234	mov	%eax,		0*4(%rbp)	# ctx->A = A
235	mov	%ebx,		1*4(%rbp)	# ctx->B = B
236	mov	%ecx,		2*4(%rbp)	# ctx->C = C
237	mov	%edx,		3*4(%rbp)	# ctx->D = D
238
239	pop	%r15
240	pop	%r14
241	pop	%rbx
242	pop	%rbp
243	ret
244.size md5_block_asm_data_order,.-md5_block_asm_data_order
245EOF
246
247print $code;
248
249close STDOUT;
250