1#! /usr/bin/env perl 2# Copyright 2010-2016 The OpenSSL Project Authors. All Rights Reserved. 3# 4# Licensed under the OpenSSL license (the "License"). You may not use 5# this file except in compliance with the License. You can obtain a copy 6# in the file LICENSE in the source distribution or at 7# https://www.openssl.org/source/license.html 8 9# 10# ==================================================================== 11# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL 12# project. The module is, however, dual licensed under OpenSSL and 13# CRYPTOGAMS licenses depending on where you obtain it. For further 14# details see http://www.openssl.org/~appro/cryptogams/. 15# ==================================================================== 16# 17# March, June 2010 18# 19# The module implements "4-bit" GCM GHASH function and underlying 20# single multiplication operation in GF(2^128). "4-bit" means that 21# it uses 256 bytes per-key table [+128 bytes shared table]. GHASH 22# function features so called "528B" variant utilizing additional 23# 256+16 bytes of per-key storage [+512 bytes shared table]. 24# Performance results are for this streamed GHASH subroutine and are 25# expressed in cycles per processed byte, less is better: 26# 27# gcc 3.4.x(*) assembler 28# 29# P4 28.6 14.0 +100% 30# Opteron 19.3 7.7 +150% 31# Core2 17.8 8.1(**) +120% 32# Atom 31.6 16.8 +88% 33# VIA Nano 21.8 10.1 +115% 34# 35# (*) comparison is not completely fair, because C results are 36# for vanilla "256B" implementation, while assembler results 37# are for "528B";-) 38# (**) it's mystery [to me] why Core2 result is not same as for 39# Opteron; 40 41# May 2010 42# 43# Add PCLMULQDQ version performing at 2.02 cycles per processed byte. 44# See ghash-x86.pl for background information and details about coding 45# techniques. 46# 47# Special thanks to David Woodhouse <dwmw2@infradead.org> for 48# providing access to a Westmere-based system on behalf of Intel 49# Open Source Technology Centre. 50 51# December 2012 52# 53# Overhaul: aggregate Karatsuba post-processing, improve ILP in 54# reduction_alg9, increase reduction aggregate factor to 4x. As for 55# the latter. ghash-x86.pl discusses that it makes lesser sense to 56# increase aggregate factor. Then why increase here? Critical path 57# consists of 3 independent pclmulqdq instructions, Karatsuba post- 58# processing and reduction. "On top" of this we lay down aggregated 59# multiplication operations, triplets of independent pclmulqdq's. As 60# issue rate for pclmulqdq is limited, it makes lesser sense to 61# aggregate more multiplications than it takes to perform remaining 62# non-multiplication operations. 2x is near-optimal coefficient for 63# contemporary Intel CPUs (therefore modest improvement coefficient), 64# but not for Bulldozer. Latter is because logical SIMD operations 65# are twice as slow in comparison to Intel, so that critical path is 66# longer. A CPU with higher pclmulqdq issue rate would also benefit 67# from higher aggregate factor... 68# 69# Westmere 1.78(+13%) 70# Sandy Bridge 1.80(+8%) 71# Ivy Bridge 1.80(+7%) 72# Haswell 0.55(+93%) (if system doesn't support AVX) 73# Broadwell 0.45(+110%)(if system doesn't support AVX) 74# Skylake 0.44(+110%)(if system doesn't support AVX) 75# Bulldozer 1.49(+27%) 76# Silvermont 2.88(+13%) 77# Knights L 2.12(-) (if system doesn't support AVX) 78# Goldmont 1.08(+24%) 79 80# March 2013 81# 82# ... 8x aggregate factor AVX code path is using reduction algorithm 83# suggested by Shay Gueron[1]. Even though contemporary AVX-capable 84# CPUs such as Sandy and Ivy Bridge can execute it, the code performs 85# sub-optimally in comparison to above mentioned version. But thanks 86# to Ilya Albrekht and Max Locktyukhin of Intel Corp. we knew that 87# it performs in 0.41 cycles per byte on Haswell processor, in 88# 0.29 on Broadwell, and in 0.36 on Skylake. 89# 90# Knights Landing achieves 1.09 cpb. 91# 92# [1] http://rt.openssl.org/Ticket/Display.html?id=2900&user=guest&pass=guest 93 94$flavour = shift; 95$output = shift; 96if ($flavour =~ /\./) { $output = $flavour; undef $flavour; } 97 98$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/); 99 100$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; 101( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or 102( $xlate="${dir}../../../perlasm/x86_64-xlate.pl" and -f $xlate) or 103die "can't locate x86_64-xlate.pl"; 104 105# See the notes about |$avx| in aesni-gcm-x86_64.pl; otherwise tags will be 106# computed incorrectly. 107# 108# In upstream, this is controlled by shelling out to the compiler to check 109# versions, but BoringSSL is intended to be used with pre-generated perlasm 110# output, so this isn't useful anyway. 111$avx = 1; 112 113open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\""; 114*STDOUT=*OUT; 115 116$do4xaggr=1; 117 118# common register layout 119$nlo="%rax"; 120$nhi="%rbx"; 121$Zlo="%r8"; 122$Zhi="%r9"; 123$tmp="%r10"; 124$rem_4bit = "%r11"; 125 126$Xi="%rdi"; 127$Htbl="%rsi"; 128 129# per-function register layout 130$cnt="%rcx"; 131$rem="%rdx"; 132 133sub LB() { my $r=shift; $r =~ s/%[er]([a-d])x/%\1l/ or 134 $r =~ s/%[er]([sd]i)/%\1l/ or 135 $r =~ s/%[er](bp)/%\1l/ or 136 $r =~ s/%(r[0-9]+)[d]?/%\1b/; $r; } 137 138sub AUTOLOAD() # thunk [simplified] 32-bit style perlasm 139{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; 140 my $arg = pop; 141 $arg = "\$$arg" if ($arg*1 eq $arg); 142 $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n"; 143} 144 145{ my $N; 146 sub loop() { 147 my $inp = shift; 148 149 $N++; 150$code.=<<___; 151 xor $nlo,$nlo 152 xor $nhi,$nhi 153 mov `&LB("$Zlo")`,`&LB("$nlo")` 154 mov `&LB("$Zlo")`,`&LB("$nhi")` 155 shl \$4,`&LB("$nlo")` 156 mov \$14,$cnt 157 mov 8($Htbl,$nlo),$Zlo 158 mov ($Htbl,$nlo),$Zhi 159 and \$0xf0,`&LB("$nhi")` 160 mov $Zlo,$rem 161 jmp .Loop$N 162 163.align 16 164.Loop$N: 165 shr \$4,$Zlo 166 and \$0xf,$rem 167 mov $Zhi,$tmp 168 mov ($inp,$cnt),`&LB("$nlo")` 169 shr \$4,$Zhi 170 xor 8($Htbl,$nhi),$Zlo 171 shl \$60,$tmp 172 xor ($Htbl,$nhi),$Zhi 173 mov `&LB("$nlo")`,`&LB("$nhi")` 174 xor ($rem_4bit,$rem,8),$Zhi 175 mov $Zlo,$rem 176 shl \$4,`&LB("$nlo")` 177 xor $tmp,$Zlo 178 dec $cnt 179 js .Lbreak$N 180 181 shr \$4,$Zlo 182 and \$0xf,$rem 183 mov $Zhi,$tmp 184 shr \$4,$Zhi 185 xor 8($Htbl,$nlo),$Zlo 186 shl \$60,$tmp 187 xor ($Htbl,$nlo),$Zhi 188 and \$0xf0,`&LB("$nhi")` 189 xor ($rem_4bit,$rem,8),$Zhi 190 mov $Zlo,$rem 191 xor $tmp,$Zlo 192 jmp .Loop$N 193 194.align 16 195.Lbreak$N: 196 shr \$4,$Zlo 197 and \$0xf,$rem 198 mov $Zhi,$tmp 199 shr \$4,$Zhi 200 xor 8($Htbl,$nlo),$Zlo 201 shl \$60,$tmp 202 xor ($Htbl,$nlo),$Zhi 203 and \$0xf0,`&LB("$nhi")` 204 xor ($rem_4bit,$rem,8),$Zhi 205 mov $Zlo,$rem 206 xor $tmp,$Zlo 207 208 shr \$4,$Zlo 209 and \$0xf,$rem 210 mov $Zhi,$tmp 211 shr \$4,$Zhi 212 xor 8($Htbl,$nhi),$Zlo 213 shl \$60,$tmp 214 xor ($Htbl,$nhi),$Zhi 215 xor $tmp,$Zlo 216 xor ($rem_4bit,$rem,8),$Zhi 217 218 bswap $Zlo 219 bswap $Zhi 220___ 221}} 222 223$code=<<___; 224.text 225.extern OPENSSL_ia32cap_P 226 227.globl gcm_gmult_4bit 228.type gcm_gmult_4bit,\@function,2 229.align 16 230gcm_gmult_4bit: 231 push %rbx 232 push %rbp # %rbp and others are pushed exclusively in 233 push %r12 # order to reuse Win64 exception handler... 234 push %r13 235 push %r14 236 push %r15 237 sub \$280,%rsp 238.Lgmult_prologue: 239 240 movzb 15($Xi),$Zlo 241 lea .Lrem_4bit(%rip),$rem_4bit 242___ 243 &loop ($Xi); 244$code.=<<___; 245 mov $Zlo,8($Xi) 246 mov $Zhi,($Xi) 247 248 lea 280+48(%rsp),%rsi 249 mov -8(%rsi),%rbx 250 lea (%rsi),%rsp 251.Lgmult_epilogue: 252 ret 253.size gcm_gmult_4bit,.-gcm_gmult_4bit 254___ 255 256# per-function register layout 257$inp="%rdx"; 258$len="%rcx"; 259$rem_8bit=$rem_4bit; 260 261$code.=<<___; 262.globl gcm_ghash_4bit 263.type gcm_ghash_4bit,\@function,4 264.align 16 265gcm_ghash_4bit: 266 push %rbx 267 push %rbp 268 push %r12 269 push %r13 270 push %r14 271 push %r15 272 sub \$280,%rsp 273.Lghash_prologue: 274 mov $inp,%r14 # reassign couple of args 275 mov $len,%r15 276___ 277{ my $inp="%r14"; 278 my $dat="%edx"; 279 my $len="%r15"; 280 my @nhi=("%ebx","%ecx"); 281 my @rem=("%r12","%r13"); 282 my $Hshr4="%rbp"; 283 284 &sub ($Htbl,-128); # size optimization 285 &lea ($Hshr4,"16+128(%rsp)"); 286 { my @lo =($nlo,$nhi); 287 my @hi =($Zlo,$Zhi); 288 289 &xor ($dat,$dat); 290 for ($i=0,$j=-2;$i<18;$i++,$j++) { 291 &mov ("$j(%rsp)",&LB($dat)) if ($i>1); 292 &or ($lo[0],$tmp) if ($i>1); 293 &mov (&LB($dat),&LB($lo[1])) if ($i>0 && $i<17); 294 &shr ($lo[1],4) if ($i>0 && $i<17); 295 &mov ($tmp,$hi[1]) if ($i>0 && $i<17); 296 &shr ($hi[1],4) if ($i>0 && $i<17); 297 &mov ("8*$j($Hshr4)",$hi[0]) if ($i>1); 298 &mov ($hi[0],"16*$i+0-128($Htbl)") if ($i<16); 299 &shl (&LB($dat),4) if ($i>0 && $i<17); 300 &mov ("8*$j-128($Hshr4)",$lo[0]) if ($i>1); 301 &mov ($lo[0],"16*$i+8-128($Htbl)") if ($i<16); 302 &shl ($tmp,60) if ($i>0 && $i<17); 303 304 push (@lo,shift(@lo)); 305 push (@hi,shift(@hi)); 306 } 307 } 308 &add ($Htbl,-128); 309 &mov ($Zlo,"8($Xi)"); 310 &mov ($Zhi,"0($Xi)"); 311 &add ($len,$inp); # pointer to the end of data 312 &lea ($rem_8bit,".Lrem_8bit(%rip)"); 313 &jmp (".Louter_loop"); 314 315$code.=".align 16\n.Louter_loop:\n"; 316 &xor ($Zhi,"($inp)"); 317 &mov ("%rdx","8($inp)"); 318 &lea ($inp,"16($inp)"); 319 &xor ("%rdx",$Zlo); 320 &mov ("($Xi)",$Zhi); 321 &mov ("8($Xi)","%rdx"); 322 &shr ("%rdx",32); 323 324 &xor ($nlo,$nlo); 325 &rol ($dat,8); 326 &mov (&LB($nlo),&LB($dat)); 327 &movz ($nhi[0],&LB($dat)); 328 &shl (&LB($nlo),4); 329 &shr ($nhi[0],4); 330 331 for ($j=11,$i=0;$i<15;$i++) { 332 &rol ($dat,8); 333 &xor ($Zlo,"8($Htbl,$nlo)") if ($i>0); 334 &xor ($Zhi,"($Htbl,$nlo)") if ($i>0); 335 &mov ($Zlo,"8($Htbl,$nlo)") if ($i==0); 336 &mov ($Zhi,"($Htbl,$nlo)") if ($i==0); 337 338 &mov (&LB($nlo),&LB($dat)); 339 &xor ($Zlo,$tmp) if ($i>0); 340 &movzw ($rem[1],"($rem_8bit,$rem[1],2)") if ($i>0); 341 342 &movz ($nhi[1],&LB($dat)); 343 &shl (&LB($nlo),4); 344 &movzb ($rem[0],"(%rsp,$nhi[0])"); 345 346 &shr ($nhi[1],4) if ($i<14); 347 &and ($nhi[1],0xf0) if ($i==14); 348 &shl ($rem[1],48) if ($i>0); 349 &xor ($rem[0],$Zlo); 350 351 &mov ($tmp,$Zhi); 352 &xor ($Zhi,$rem[1]) if ($i>0); 353 &shr ($Zlo,8); 354 355 &movz ($rem[0],&LB($rem[0])); 356 &mov ($dat,"$j($Xi)") if (--$j%4==0); 357 &shr ($Zhi,8); 358 359 &xor ($Zlo,"-128($Hshr4,$nhi[0],8)"); 360 &shl ($tmp,56); 361 &xor ($Zhi,"($Hshr4,$nhi[0],8)"); 362 363 unshift (@nhi,pop(@nhi)); # "rotate" registers 364 unshift (@rem,pop(@rem)); 365 } 366 &movzw ($rem[1],"($rem_8bit,$rem[1],2)"); 367 &xor ($Zlo,"8($Htbl,$nlo)"); 368 &xor ($Zhi,"($Htbl,$nlo)"); 369 370 &shl ($rem[1],48); 371 &xor ($Zlo,$tmp); 372 373 &xor ($Zhi,$rem[1]); 374 &movz ($rem[0],&LB($Zlo)); 375 &shr ($Zlo,4); 376 377 &mov ($tmp,$Zhi); 378 &shl (&LB($rem[0]),4); 379 &shr ($Zhi,4); 380 381 &xor ($Zlo,"8($Htbl,$nhi[0])"); 382 &movzw ($rem[0],"($rem_8bit,$rem[0],2)"); 383 &shl ($tmp,60); 384 385 &xor ($Zhi,"($Htbl,$nhi[0])"); 386 &xor ($Zlo,$tmp); 387 &shl ($rem[0],48); 388 389 &bswap ($Zlo); 390 &xor ($Zhi,$rem[0]); 391 392 &bswap ($Zhi); 393 &cmp ($inp,$len); 394 &jb (".Louter_loop"); 395} 396$code.=<<___; 397 mov $Zlo,8($Xi) 398 mov $Zhi,($Xi) 399 400 lea 280+48(%rsp),%rsi 401 mov -48(%rsi),%r15 402 mov -40(%rsi),%r14 403 mov -32(%rsi),%r13 404 mov -24(%rsi),%r12 405 mov -16(%rsi),%rbp 406 mov -8(%rsi),%rbx 407 lea 0(%rsi),%rsp 408.Lghash_epilogue: 409 ret 410.size gcm_ghash_4bit,.-gcm_ghash_4bit 411___ 412 413###################################################################### 414# PCLMULQDQ version. 415 416@_4args=$win64? ("%rcx","%rdx","%r8", "%r9") : # Win64 order 417 ("%rdi","%rsi","%rdx","%rcx"); # Unix order 418 419($Xi,$Xhi)=("%xmm0","%xmm1"); $Hkey="%xmm2"; 420($T1,$T2,$T3)=("%xmm3","%xmm4","%xmm5"); 421 422sub clmul64x64_T2 { # minimal register pressure 423my ($Xhi,$Xi,$Hkey,$HK)=@_; 424 425if (!defined($HK)) { $HK = $T2; 426$code.=<<___; 427 movdqa $Xi,$Xhi # 428 pshufd \$0b01001110,$Xi,$T1 429 pshufd \$0b01001110,$Hkey,$T2 430 pxor $Xi,$T1 # 431 pxor $Hkey,$T2 432___ 433} else { 434$code.=<<___; 435 movdqa $Xi,$Xhi # 436 pshufd \$0b01001110,$Xi,$T1 437 pxor $Xi,$T1 # 438___ 439} 440$code.=<<___; 441 pclmulqdq \$0x00,$Hkey,$Xi ####### 442 pclmulqdq \$0x11,$Hkey,$Xhi ####### 443 pclmulqdq \$0x00,$HK,$T1 ####### 444 pxor $Xi,$T1 # 445 pxor $Xhi,$T1 # 446 447 movdqa $T1,$T2 # 448 psrldq \$8,$T1 449 pslldq \$8,$T2 # 450 pxor $T1,$Xhi 451 pxor $T2,$Xi # 452___ 453} 454 455sub reduction_alg9 { # 17/11 times faster than Intel version 456my ($Xhi,$Xi) = @_; 457 458$code.=<<___; 459 # 1st phase 460 movdqa $Xi,$T2 # 461 movdqa $Xi,$T1 462 psllq \$5,$Xi 463 pxor $Xi,$T1 # 464 psllq \$1,$Xi 465 pxor $T1,$Xi # 466 psllq \$57,$Xi # 467 movdqa $Xi,$T1 # 468 pslldq \$8,$Xi 469 psrldq \$8,$T1 # 470 pxor $T2,$Xi 471 pxor $T1,$Xhi # 472 473 # 2nd phase 474 movdqa $Xi,$T2 475 psrlq \$1,$Xi 476 pxor $T2,$Xhi # 477 pxor $Xi,$T2 478 psrlq \$5,$Xi 479 pxor $T2,$Xi # 480 psrlq \$1,$Xi # 481 pxor $Xhi,$Xi # 482___ 483} 484 485{ my ($Htbl,$Xip)=@_4args; 486 my $HK="%xmm6"; 487 488$code.=<<___; 489.globl gcm_init_clmul 490.type gcm_init_clmul,\@abi-omnipotent 491.align 16 492gcm_init_clmul: 493.L_init_clmul: 494___ 495$code.=<<___ if ($win64); 496.LSEH_begin_gcm_init_clmul: 497 # I can't trust assembler to use specific encoding:-( 498 .byte 0x48,0x83,0xec,0x18 #sub $0x18,%rsp 499 .byte 0x0f,0x29,0x34,0x24 #movaps %xmm6,(%rsp) 500___ 501$code.=<<___; 502 movdqu ($Xip),$Hkey 503 pshufd \$0b01001110,$Hkey,$Hkey # dword swap 504 505 # <<1 twist 506 pshufd \$0b11111111,$Hkey,$T2 # broadcast uppermost dword 507 movdqa $Hkey,$T1 508 psllq \$1,$Hkey 509 pxor $T3,$T3 # 510 psrlq \$63,$T1 511 pcmpgtd $T2,$T3 # broadcast carry bit 512 pslldq \$8,$T1 513 por $T1,$Hkey # H<<=1 514 515 # magic reduction 516 pand .L0x1c2_polynomial(%rip),$T3 517 pxor $T3,$Hkey # if(carry) H^=0x1c2_polynomial 518 519 # calculate H^2 520 pshufd \$0b01001110,$Hkey,$HK 521 movdqa $Hkey,$Xi 522 pxor $Hkey,$HK 523___ 524 &clmul64x64_T2 ($Xhi,$Xi,$Hkey,$HK); 525 &reduction_alg9 ($Xhi,$Xi); 526$code.=<<___; 527 pshufd \$0b01001110,$Hkey,$T1 528 pshufd \$0b01001110,$Xi,$T2 529 pxor $Hkey,$T1 # Karatsuba pre-processing 530 movdqu $Hkey,0x00($Htbl) # save H 531 pxor $Xi,$T2 # Karatsuba pre-processing 532 movdqu $Xi,0x10($Htbl) # save H^2 533 palignr \$8,$T1,$T2 # low part is H.lo^H.hi... 534 movdqu $T2,0x20($Htbl) # save Karatsuba "salt" 535___ 536if ($do4xaggr) { 537 &clmul64x64_T2 ($Xhi,$Xi,$Hkey,$HK); # H^3 538 &reduction_alg9 ($Xhi,$Xi); 539$code.=<<___; 540 movdqa $Xi,$T3 541___ 542 &clmul64x64_T2 ($Xhi,$Xi,$Hkey,$HK); # H^4 543 &reduction_alg9 ($Xhi,$Xi); 544$code.=<<___; 545 pshufd \$0b01001110,$T3,$T1 546 pshufd \$0b01001110,$Xi,$T2 547 pxor $T3,$T1 # Karatsuba pre-processing 548 movdqu $T3,0x30($Htbl) # save H^3 549 pxor $Xi,$T2 # Karatsuba pre-processing 550 movdqu $Xi,0x40($Htbl) # save H^4 551 palignr \$8,$T1,$T2 # low part is H^3.lo^H^3.hi... 552 movdqu $T2,0x50($Htbl) # save Karatsuba "salt" 553___ 554} 555$code.=<<___ if ($win64); 556 movaps (%rsp),%xmm6 557 lea 0x18(%rsp),%rsp 558.LSEH_end_gcm_init_clmul: 559___ 560$code.=<<___; 561 ret 562.size gcm_init_clmul,.-gcm_init_clmul 563___ 564} 565 566{ my ($Xip,$Htbl)=@_4args; 567 568$code.=<<___; 569.globl gcm_gmult_clmul 570.type gcm_gmult_clmul,\@abi-omnipotent 571.align 16 572gcm_gmult_clmul: 573.L_gmult_clmul: 574 movdqu ($Xip),$Xi 575 movdqa .Lbswap_mask(%rip),$T3 576 movdqu ($Htbl),$Hkey 577 movdqu 0x20($Htbl),$T2 578 pshufb $T3,$Xi 579___ 580 &clmul64x64_T2 ($Xhi,$Xi,$Hkey,$T2); 581$code.=<<___ if (0 || (&reduction_alg9($Xhi,$Xi)&&0)); 582 # experimental alternative. special thing about is that there 583 # no dependency between the two multiplications... 584 mov \$`0xE1<<1`,%eax 585 mov \$0xA040608020C0E000,%r10 # ((7..0)��0xE0)&0xff 586 mov \$0x07,%r11d 587 movq %rax,$T1 588 movq %r10,$T2 589 movq %r11,$T3 # borrow $T3 590 pand $Xi,$T3 591 pshufb $T3,$T2 # ($Xi&7)��0xE0 592 movq %rax,$T3 593 pclmulqdq \$0x00,$Xi,$T1 # ��(0xE1<<1) 594 pxor $Xi,$T2 595 pslldq \$15,$T2 596 paddd $T2,$T2 # <<(64+56+1) 597 pxor $T2,$Xi 598 pclmulqdq \$0x01,$T3,$Xi 599 movdqa .Lbswap_mask(%rip),$T3 # reload $T3 600 psrldq \$1,$T1 601 pxor $T1,$Xhi 602 pslldq \$7,$Xi 603 pxor $Xhi,$Xi 604___ 605$code.=<<___; 606 pshufb $T3,$Xi 607 movdqu $Xi,($Xip) 608 ret 609.size gcm_gmult_clmul,.-gcm_gmult_clmul 610___ 611} 612 613{ my ($Xip,$Htbl,$inp,$len)=@_4args; 614 my ($Xln,$Xmn,$Xhn,$Hkey2,$HK) = map("%xmm$_",(3..7)); 615 my ($T1,$T2,$T3)=map("%xmm$_",(8..10)); 616 617$code.=<<___; 618.globl gcm_ghash_clmul 619.type gcm_ghash_clmul,\@abi-omnipotent 620.align 32 621gcm_ghash_clmul: 622.L_ghash_clmul: 623___ 624$code.=<<___ if ($win64); 625 lea -0x88(%rsp),%rax 626.LSEH_begin_gcm_ghash_clmul: 627 # I can't trust assembler to use specific encoding:-( 628 .byte 0x48,0x8d,0x60,0xe0 #lea -0x20(%rax),%rsp 629 .byte 0x0f,0x29,0x70,0xe0 #movaps %xmm6,-0x20(%rax) 630 .byte 0x0f,0x29,0x78,0xf0 #movaps %xmm7,-0x10(%rax) 631 .byte 0x44,0x0f,0x29,0x00 #movaps %xmm8,0(%rax) 632 .byte 0x44,0x0f,0x29,0x48,0x10 #movaps %xmm9,0x10(%rax) 633 .byte 0x44,0x0f,0x29,0x50,0x20 #movaps %xmm10,0x20(%rax) 634 .byte 0x44,0x0f,0x29,0x58,0x30 #movaps %xmm11,0x30(%rax) 635 .byte 0x44,0x0f,0x29,0x60,0x40 #movaps %xmm12,0x40(%rax) 636 .byte 0x44,0x0f,0x29,0x68,0x50 #movaps %xmm13,0x50(%rax) 637 .byte 0x44,0x0f,0x29,0x70,0x60 #movaps %xmm14,0x60(%rax) 638 .byte 0x44,0x0f,0x29,0x78,0x70 #movaps %xmm15,0x70(%rax) 639___ 640$code.=<<___; 641 movdqa .Lbswap_mask(%rip),$T3 642 643 movdqu ($Xip),$Xi 644 movdqu ($Htbl),$Hkey 645 movdqu 0x20($Htbl),$HK 646 pshufb $T3,$Xi 647 648 sub \$0x10,$len 649 jz .Lodd_tail 650 651 movdqu 0x10($Htbl),$Hkey2 652___ 653if ($do4xaggr) { 654my ($Xl,$Xm,$Xh,$Hkey3,$Hkey4)=map("%xmm$_",(11..15)); 655 656$code.=<<___; 657 leaq OPENSSL_ia32cap_P(%rip),%rax 658 mov 4(%rax),%eax 659 cmp \$0x30,$len 660 jb .Lskip4x 661 662 and \$`1<<26|1<<22`,%eax # isolate MOVBE+XSAVE 663 cmp \$`1<<22`,%eax # check for MOVBE without XSAVE 664 je .Lskip4x 665 666 sub \$0x30,$len 667 mov \$0xA040608020C0E000,%rax # ((7..0)��0xE0)&0xff 668 movdqu 0x30($Htbl),$Hkey3 669 movdqu 0x40($Htbl),$Hkey4 670 671 ####### 672 # Xi+4 =[(H*Ii+3) + (H^2*Ii+2) + (H^3*Ii+1) + H^4*(Ii+Xi)] mod P 673 # 674 movdqu 0x30($inp),$Xln 675 movdqu 0x20($inp),$Xl 676 pshufb $T3,$Xln 677 pshufb $T3,$Xl 678 movdqa $Xln,$Xhn 679 pshufd \$0b01001110,$Xln,$Xmn 680 pxor $Xln,$Xmn 681 pclmulqdq \$0x00,$Hkey,$Xln 682 pclmulqdq \$0x11,$Hkey,$Xhn 683 pclmulqdq \$0x00,$HK,$Xmn 684 685 movdqa $Xl,$Xh 686 pshufd \$0b01001110,$Xl,$Xm 687 pxor $Xl,$Xm 688 pclmulqdq \$0x00,$Hkey2,$Xl 689 pclmulqdq \$0x11,$Hkey2,$Xh 690 pclmulqdq \$0x10,$HK,$Xm 691 xorps $Xl,$Xln 692 xorps $Xh,$Xhn 693 movups 0x50($Htbl),$HK 694 xorps $Xm,$Xmn 695 696 movdqu 0x10($inp),$Xl 697 movdqu 0($inp),$T1 698 pshufb $T3,$Xl 699 pshufb $T3,$T1 700 movdqa $Xl,$Xh 701 pshufd \$0b01001110,$Xl,$Xm 702 pxor $T1,$Xi 703 pxor $Xl,$Xm 704 pclmulqdq \$0x00,$Hkey3,$Xl 705 movdqa $Xi,$Xhi 706 pshufd \$0b01001110,$Xi,$T1 707 pxor $Xi,$T1 708 pclmulqdq \$0x11,$Hkey3,$Xh 709 pclmulqdq \$0x00,$HK,$Xm 710 xorps $Xl,$Xln 711 xorps $Xh,$Xhn 712 713 lea 0x40($inp),$inp 714 sub \$0x40,$len 715 jc .Ltail4x 716 717 jmp .Lmod4_loop 718.align 32 719.Lmod4_loop: 720 pclmulqdq \$0x00,$Hkey4,$Xi 721 xorps $Xm,$Xmn 722 movdqu 0x30($inp),$Xl 723 pshufb $T3,$Xl 724 pclmulqdq \$0x11,$Hkey4,$Xhi 725 xorps $Xln,$Xi 726 movdqu 0x20($inp),$Xln 727 movdqa $Xl,$Xh 728 pclmulqdq \$0x10,$HK,$T1 729 pshufd \$0b01001110,$Xl,$Xm 730 xorps $Xhn,$Xhi 731 pxor $Xl,$Xm 732 pshufb $T3,$Xln 733 movups 0x20($Htbl),$HK 734 xorps $Xmn,$T1 735 pclmulqdq \$0x00,$Hkey,$Xl 736 pshufd \$0b01001110,$Xln,$Xmn 737 738 pxor $Xi,$T1 # aggregated Karatsuba post-processing 739 movdqa $Xln,$Xhn 740 pxor $Xhi,$T1 # 741 pxor $Xln,$Xmn 742 movdqa $T1,$T2 # 743 pclmulqdq \$0x11,$Hkey,$Xh 744 pslldq \$8,$T1 745 psrldq \$8,$T2 # 746 pxor $T1,$Xi 747 movdqa .L7_mask(%rip),$T1 748 pxor $T2,$Xhi # 749 movq %rax,$T2 750 751 pand $Xi,$T1 # 1st phase 752 pshufb $T1,$T2 # 753 pxor $Xi,$T2 # 754 pclmulqdq \$0x00,$HK,$Xm 755 psllq \$57,$T2 # 756 movdqa $T2,$T1 # 757 pslldq \$8,$T2 758 pclmulqdq \$0x00,$Hkey2,$Xln 759 psrldq \$8,$T1 # 760 pxor $T2,$Xi 761 pxor $T1,$Xhi # 762 movdqu 0($inp),$T1 763 764 movdqa $Xi,$T2 # 2nd phase 765 psrlq \$1,$Xi 766 pclmulqdq \$0x11,$Hkey2,$Xhn 767 xorps $Xl,$Xln 768 movdqu 0x10($inp),$Xl 769 pshufb $T3,$Xl 770 pclmulqdq \$0x10,$HK,$Xmn 771 xorps $Xh,$Xhn 772 movups 0x50($Htbl),$HK 773 pshufb $T3,$T1 774 pxor $T2,$Xhi # 775 pxor $Xi,$T2 776 psrlq \$5,$Xi 777 778 movdqa $Xl,$Xh 779 pxor $Xm,$Xmn 780 pshufd \$0b01001110,$Xl,$Xm 781 pxor $T2,$Xi # 782 pxor $T1,$Xhi 783 pxor $Xl,$Xm 784 pclmulqdq \$0x00,$Hkey3,$Xl 785 psrlq \$1,$Xi # 786 pxor $Xhi,$Xi # 787 movdqa $Xi,$Xhi 788 pclmulqdq \$0x11,$Hkey3,$Xh 789 xorps $Xl,$Xln 790 pshufd \$0b01001110,$Xi,$T1 791 pxor $Xi,$T1 792 793 pclmulqdq \$0x00,$HK,$Xm 794 xorps $Xh,$Xhn 795 796 lea 0x40($inp),$inp 797 sub \$0x40,$len 798 jnc .Lmod4_loop 799 800.Ltail4x: 801 pclmulqdq \$0x00,$Hkey4,$Xi 802 pclmulqdq \$0x11,$Hkey4,$Xhi 803 pclmulqdq \$0x10,$HK,$T1 804 xorps $Xm,$Xmn 805 xorps $Xln,$Xi 806 xorps $Xhn,$Xhi 807 pxor $Xi,$Xhi # aggregated Karatsuba post-processing 808 pxor $Xmn,$T1 809 810 pxor $Xhi,$T1 # 811 pxor $Xi,$Xhi 812 813 movdqa $T1,$T2 # 814 psrldq \$8,$T1 815 pslldq \$8,$T2 # 816 pxor $T1,$Xhi 817 pxor $T2,$Xi # 818___ 819 &reduction_alg9($Xhi,$Xi); 820$code.=<<___; 821 add \$0x40,$len 822 jz .Ldone 823 movdqu 0x20($Htbl),$HK 824 sub \$0x10,$len 825 jz .Lodd_tail 826.Lskip4x: 827___ 828} 829$code.=<<___; 830 ####### 831 # Xi+2 =[H*(Ii+1 + Xi+1)] mod P = 832 # [(H*Ii+1) + (H*Xi+1)] mod P = 833 # [(H*Ii+1) + H^2*(Ii+Xi)] mod P 834 # 835 movdqu ($inp),$T1 # Ii 836 movdqu 16($inp),$Xln # Ii+1 837 pshufb $T3,$T1 838 pshufb $T3,$Xln 839 pxor $T1,$Xi # Ii+Xi 840 841 movdqa $Xln,$Xhn 842 pshufd \$0b01001110,$Xln,$Xmn 843 pxor $Xln,$Xmn 844 pclmulqdq \$0x00,$Hkey,$Xln 845 pclmulqdq \$0x11,$Hkey,$Xhn 846 pclmulqdq \$0x00,$HK,$Xmn 847 848 lea 32($inp),$inp # i+=2 849 nop 850 sub \$0x20,$len 851 jbe .Leven_tail 852 nop 853 jmp .Lmod_loop 854 855.align 32 856.Lmod_loop: 857 movdqa $Xi,$Xhi 858 movdqa $Xmn,$T1 859 pshufd \$0b01001110,$Xi,$Xmn # 860 pxor $Xi,$Xmn # 861 862 pclmulqdq \$0x00,$Hkey2,$Xi 863 pclmulqdq \$0x11,$Hkey2,$Xhi 864 pclmulqdq \$0x10,$HK,$Xmn 865 866 pxor $Xln,$Xi # (H*Ii+1) + H^2*(Ii+Xi) 867 pxor $Xhn,$Xhi 868 movdqu ($inp),$T2 # Ii 869 pxor $Xi,$T1 # aggregated Karatsuba post-processing 870 pshufb $T3,$T2 871 movdqu 16($inp),$Xln # Ii+1 872 873 pxor $Xhi,$T1 874 pxor $T2,$Xhi # "Ii+Xi", consume early 875 pxor $T1,$Xmn 876 pshufb $T3,$Xln 877 movdqa $Xmn,$T1 # 878 psrldq \$8,$T1 879 pslldq \$8,$Xmn # 880 pxor $T1,$Xhi 881 pxor $Xmn,$Xi # 882 883 movdqa $Xln,$Xhn # 884 885 movdqa $Xi,$T2 # 1st phase 886 movdqa $Xi,$T1 887 psllq \$5,$Xi 888 pxor $Xi,$T1 # 889 pclmulqdq \$0x00,$Hkey,$Xln ####### 890 psllq \$1,$Xi 891 pxor $T1,$Xi # 892 psllq \$57,$Xi # 893 movdqa $Xi,$T1 # 894 pslldq \$8,$Xi 895 psrldq \$8,$T1 # 896 pxor $T2,$Xi 897 pshufd \$0b01001110,$Xhn,$Xmn 898 pxor $T1,$Xhi # 899 pxor $Xhn,$Xmn # 900 901 movdqa $Xi,$T2 # 2nd phase 902 psrlq \$1,$Xi 903 pclmulqdq \$0x11,$Hkey,$Xhn ####### 904 pxor $T2,$Xhi # 905 pxor $Xi,$T2 906 psrlq \$5,$Xi 907 pxor $T2,$Xi # 908 lea 32($inp),$inp 909 psrlq \$1,$Xi # 910 pclmulqdq \$0x00,$HK,$Xmn ####### 911 pxor $Xhi,$Xi # 912 913 sub \$0x20,$len 914 ja .Lmod_loop 915 916.Leven_tail: 917 movdqa $Xi,$Xhi 918 movdqa $Xmn,$T1 919 pshufd \$0b01001110,$Xi,$Xmn # 920 pxor $Xi,$Xmn # 921 922 pclmulqdq \$0x00,$Hkey2,$Xi 923 pclmulqdq \$0x11,$Hkey2,$Xhi 924 pclmulqdq \$0x10,$HK,$Xmn 925 926 pxor $Xln,$Xi # (H*Ii+1) + H^2*(Ii+Xi) 927 pxor $Xhn,$Xhi 928 pxor $Xi,$T1 929 pxor $Xhi,$T1 930 pxor $T1,$Xmn 931 movdqa $Xmn,$T1 # 932 psrldq \$8,$T1 933 pslldq \$8,$Xmn # 934 pxor $T1,$Xhi 935 pxor $Xmn,$Xi # 936___ 937 &reduction_alg9 ($Xhi,$Xi); 938$code.=<<___; 939 test $len,$len 940 jnz .Ldone 941 942.Lodd_tail: 943 movdqu ($inp),$T1 # Ii 944 pshufb $T3,$T1 945 pxor $T1,$Xi # Ii+Xi 946___ 947 &clmul64x64_T2 ($Xhi,$Xi,$Hkey,$HK); # H*(Ii+Xi) 948 &reduction_alg9 ($Xhi,$Xi); 949$code.=<<___; 950.Ldone: 951 pshufb $T3,$Xi 952 movdqu $Xi,($Xip) 953___ 954$code.=<<___ if ($win64); 955 movaps (%rsp),%xmm6 956 movaps 0x10(%rsp),%xmm7 957 movaps 0x20(%rsp),%xmm8 958 movaps 0x30(%rsp),%xmm9 959 movaps 0x40(%rsp),%xmm10 960 movaps 0x50(%rsp),%xmm11 961 movaps 0x60(%rsp),%xmm12 962 movaps 0x70(%rsp),%xmm13 963 movaps 0x80(%rsp),%xmm14 964 movaps 0x90(%rsp),%xmm15 965 lea 0xa8(%rsp),%rsp 966.LSEH_end_gcm_ghash_clmul: 967___ 968$code.=<<___; 969 ret 970.size gcm_ghash_clmul,.-gcm_ghash_clmul 971___ 972} 973 974$code.=<<___; 975.globl gcm_init_avx 976.type gcm_init_avx,\@abi-omnipotent 977.align 32 978gcm_init_avx: 979___ 980if ($avx) { 981my ($Htbl,$Xip)=@_4args; 982my $HK="%xmm6"; 983 984$code.=<<___ if ($win64); 985.LSEH_begin_gcm_init_avx: 986 # I can't trust assembler to use specific encoding:-( 987 .byte 0x48,0x83,0xec,0x18 #sub $0x18,%rsp 988 .byte 0x0f,0x29,0x34,0x24 #movaps %xmm6,(%rsp) 989___ 990$code.=<<___; 991 vzeroupper 992 993 vmovdqu ($Xip),$Hkey 994 vpshufd \$0b01001110,$Hkey,$Hkey # dword swap 995 996 # <<1 twist 997 vpshufd \$0b11111111,$Hkey,$T2 # broadcast uppermost dword 998 vpsrlq \$63,$Hkey,$T1 999 vpsllq \$1,$Hkey,$Hkey 1000 vpxor $T3,$T3,$T3 # 1001 vpcmpgtd $T2,$T3,$T3 # broadcast carry bit 1002 vpslldq \$8,$T1,$T1 1003 vpor $T1,$Hkey,$Hkey # H<<=1 1004 1005 # magic reduction 1006 vpand .L0x1c2_polynomial(%rip),$T3,$T3 1007 vpxor $T3,$Hkey,$Hkey # if(carry) H^=0x1c2_polynomial 1008 1009 vpunpckhqdq $Hkey,$Hkey,$HK 1010 vmovdqa $Hkey,$Xi 1011 vpxor $Hkey,$HK,$HK 1012 mov \$4,%r10 # up to H^8 1013 jmp .Linit_start_avx 1014___ 1015 1016sub clmul64x64_avx { 1017my ($Xhi,$Xi,$Hkey,$HK)=@_; 1018 1019if (!defined($HK)) { $HK = $T2; 1020$code.=<<___; 1021 vpunpckhqdq $Xi,$Xi,$T1 1022 vpunpckhqdq $Hkey,$Hkey,$T2 1023 vpxor $Xi,$T1,$T1 # 1024 vpxor $Hkey,$T2,$T2 1025___ 1026} else { 1027$code.=<<___; 1028 vpunpckhqdq $Xi,$Xi,$T1 1029 vpxor $Xi,$T1,$T1 # 1030___ 1031} 1032$code.=<<___; 1033 vpclmulqdq \$0x11,$Hkey,$Xi,$Xhi ####### 1034 vpclmulqdq \$0x00,$Hkey,$Xi,$Xi ####### 1035 vpclmulqdq \$0x00,$HK,$T1,$T1 ####### 1036 vpxor $Xi,$Xhi,$T2 # 1037 vpxor $T2,$T1,$T1 # 1038 1039 vpslldq \$8,$T1,$T2 # 1040 vpsrldq \$8,$T1,$T1 1041 vpxor $T2,$Xi,$Xi # 1042 vpxor $T1,$Xhi,$Xhi 1043___ 1044} 1045 1046sub reduction_avx { 1047my ($Xhi,$Xi) = @_; 1048 1049$code.=<<___; 1050 vpsllq \$57,$Xi,$T1 # 1st phase 1051 vpsllq \$62,$Xi,$T2 1052 vpxor $T1,$T2,$T2 # 1053 vpsllq \$63,$Xi,$T1 1054 vpxor $T1,$T2,$T2 # 1055 vpslldq \$8,$T2,$T1 # 1056 vpsrldq \$8,$T2,$T2 1057 vpxor $T1,$Xi,$Xi # 1058 vpxor $T2,$Xhi,$Xhi 1059 1060 vpsrlq \$1,$Xi,$T2 # 2nd phase 1061 vpxor $Xi,$Xhi,$Xhi 1062 vpxor $T2,$Xi,$Xi # 1063 vpsrlq \$5,$T2,$T2 1064 vpxor $T2,$Xi,$Xi # 1065 vpsrlq \$1,$Xi,$Xi # 1066 vpxor $Xhi,$Xi,$Xi # 1067___ 1068} 1069 1070$code.=<<___; 1071.align 32 1072.Linit_loop_avx: 1073 vpalignr \$8,$T1,$T2,$T3 # low part is H.lo^H.hi... 1074 vmovdqu $T3,-0x10($Htbl) # save Karatsuba "salt" 1075___ 1076 &clmul64x64_avx ($Xhi,$Xi,$Hkey,$HK); # calculate H^3,5,7 1077 &reduction_avx ($Xhi,$Xi); 1078$code.=<<___; 1079.Linit_start_avx: 1080 vmovdqa $Xi,$T3 1081___ 1082 &clmul64x64_avx ($Xhi,$Xi,$Hkey,$HK); # calculate H^2,4,6,8 1083 &reduction_avx ($Xhi,$Xi); 1084$code.=<<___; 1085 vpshufd \$0b01001110,$T3,$T1 1086 vpshufd \$0b01001110,$Xi,$T2 1087 vpxor $T3,$T1,$T1 # Karatsuba pre-processing 1088 vmovdqu $T3,0x00($Htbl) # save H^1,3,5,7 1089 vpxor $Xi,$T2,$T2 # Karatsuba pre-processing 1090 vmovdqu $Xi,0x10($Htbl) # save H^2,4,6,8 1091 lea 0x30($Htbl),$Htbl 1092 sub \$1,%r10 1093 jnz .Linit_loop_avx 1094 1095 vpalignr \$8,$T2,$T1,$T3 # last "salt" is flipped 1096 vmovdqu $T3,-0x10($Htbl) 1097 1098 vzeroupper 1099___ 1100$code.=<<___ if ($win64); 1101 movaps (%rsp),%xmm6 1102 lea 0x18(%rsp),%rsp 1103.LSEH_end_gcm_init_avx: 1104___ 1105$code.=<<___; 1106 ret 1107.size gcm_init_avx,.-gcm_init_avx 1108___ 1109} else { 1110$code.=<<___; 1111 jmp .L_init_clmul 1112.size gcm_init_avx,.-gcm_init_avx 1113___ 1114} 1115 1116$code.=<<___; 1117.globl gcm_gmult_avx 1118.type gcm_gmult_avx,\@abi-omnipotent 1119.align 32 1120gcm_gmult_avx: 1121 jmp .L_gmult_clmul 1122.size gcm_gmult_avx,.-gcm_gmult_avx 1123___ 1124 1125$code.=<<___; 1126.globl gcm_ghash_avx 1127.type gcm_ghash_avx,\@abi-omnipotent 1128.align 32 1129gcm_ghash_avx: 1130___ 1131if ($avx) { 1132my ($Xip,$Htbl,$inp,$len)=@_4args; 1133my ($Xlo,$Xhi,$Xmi, 1134 $Zlo,$Zhi,$Zmi, 1135 $Hkey,$HK,$T1,$T2, 1136 $Xi,$Xo,$Tred,$bswap,$Ii,$Ij) = map("%xmm$_",(0..15)); 1137 1138$code.=<<___ if ($win64); 1139 lea -0x88(%rsp),%rax 1140.LSEH_begin_gcm_ghash_avx: 1141 # I can't trust assembler to use specific encoding:-( 1142 .byte 0x48,0x8d,0x60,0xe0 #lea -0x20(%rax),%rsp 1143 .byte 0x0f,0x29,0x70,0xe0 #movaps %xmm6,-0x20(%rax) 1144 .byte 0x0f,0x29,0x78,0xf0 #movaps %xmm7,-0x10(%rax) 1145 .byte 0x44,0x0f,0x29,0x00 #movaps %xmm8,0(%rax) 1146 .byte 0x44,0x0f,0x29,0x48,0x10 #movaps %xmm9,0x10(%rax) 1147 .byte 0x44,0x0f,0x29,0x50,0x20 #movaps %xmm10,0x20(%rax) 1148 .byte 0x44,0x0f,0x29,0x58,0x30 #movaps %xmm11,0x30(%rax) 1149 .byte 0x44,0x0f,0x29,0x60,0x40 #movaps %xmm12,0x40(%rax) 1150 .byte 0x44,0x0f,0x29,0x68,0x50 #movaps %xmm13,0x50(%rax) 1151 .byte 0x44,0x0f,0x29,0x70,0x60 #movaps %xmm14,0x60(%rax) 1152 .byte 0x44,0x0f,0x29,0x78,0x70 #movaps %xmm15,0x70(%rax) 1153___ 1154$code.=<<___; 1155 vzeroupper 1156 1157 vmovdqu ($Xip),$Xi # load $Xi 1158 lea .L0x1c2_polynomial(%rip),%r10 1159 lea 0x40($Htbl),$Htbl # size optimization 1160 vmovdqu .Lbswap_mask(%rip),$bswap 1161 vpshufb $bswap,$Xi,$Xi 1162 cmp \$0x80,$len 1163 jb .Lshort_avx 1164 sub \$0x80,$len 1165 1166 vmovdqu 0x70($inp),$Ii # I[7] 1167 vmovdqu 0x00-0x40($Htbl),$Hkey # $Hkey^1 1168 vpshufb $bswap,$Ii,$Ii 1169 vmovdqu 0x20-0x40($Htbl),$HK 1170 1171 vpunpckhqdq $Ii,$Ii,$T2 1172 vmovdqu 0x60($inp),$Ij # I[6] 1173 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo 1174 vpxor $Ii,$T2,$T2 1175 vpshufb $bswap,$Ij,$Ij 1176 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi 1177 vmovdqu 0x10-0x40($Htbl),$Hkey # $Hkey^2 1178 vpunpckhqdq $Ij,$Ij,$T1 1179 vmovdqu 0x50($inp),$Ii # I[5] 1180 vpclmulqdq \$0x00,$HK,$T2,$Xmi 1181 vpxor $Ij,$T1,$T1 1182 1183 vpshufb $bswap,$Ii,$Ii 1184 vpclmulqdq \$0x00,$Hkey,$Ij,$Zlo 1185 vpunpckhqdq $Ii,$Ii,$T2 1186 vpclmulqdq \$0x11,$Hkey,$Ij,$Zhi 1187 vmovdqu 0x30-0x40($Htbl),$Hkey # $Hkey^3 1188 vpxor $Ii,$T2,$T2 1189 vmovdqu 0x40($inp),$Ij # I[4] 1190 vpclmulqdq \$0x10,$HK,$T1,$Zmi 1191 vmovdqu 0x50-0x40($Htbl),$HK 1192 1193 vpshufb $bswap,$Ij,$Ij 1194 vpxor $Xlo,$Zlo,$Zlo 1195 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo 1196 vpxor $Xhi,$Zhi,$Zhi 1197 vpunpckhqdq $Ij,$Ij,$T1 1198 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi 1199 vmovdqu 0x40-0x40($Htbl),$Hkey # $Hkey^4 1200 vpxor $Xmi,$Zmi,$Zmi 1201 vpclmulqdq \$0x00,$HK,$T2,$Xmi 1202 vpxor $Ij,$T1,$T1 1203 1204 vmovdqu 0x30($inp),$Ii # I[3] 1205 vpxor $Zlo,$Xlo,$Xlo 1206 vpclmulqdq \$0x00,$Hkey,$Ij,$Zlo 1207 vpxor $Zhi,$Xhi,$Xhi 1208 vpshufb $bswap,$Ii,$Ii 1209 vpclmulqdq \$0x11,$Hkey,$Ij,$Zhi 1210 vmovdqu 0x60-0x40($Htbl),$Hkey # $Hkey^5 1211 vpxor $Zmi,$Xmi,$Xmi 1212 vpunpckhqdq $Ii,$Ii,$T2 1213 vpclmulqdq \$0x10,$HK,$T1,$Zmi 1214 vmovdqu 0x80-0x40($Htbl),$HK 1215 vpxor $Ii,$T2,$T2 1216 1217 vmovdqu 0x20($inp),$Ij # I[2] 1218 vpxor $Xlo,$Zlo,$Zlo 1219 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo 1220 vpxor $Xhi,$Zhi,$Zhi 1221 vpshufb $bswap,$Ij,$Ij 1222 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi 1223 vmovdqu 0x70-0x40($Htbl),$Hkey # $Hkey^6 1224 vpxor $Xmi,$Zmi,$Zmi 1225 vpunpckhqdq $Ij,$Ij,$T1 1226 vpclmulqdq \$0x00,$HK,$T2,$Xmi 1227 vpxor $Ij,$T1,$T1 1228 1229 vmovdqu 0x10($inp),$Ii # I[1] 1230 vpxor $Zlo,$Xlo,$Xlo 1231 vpclmulqdq \$0x00,$Hkey,$Ij,$Zlo 1232 vpxor $Zhi,$Xhi,$Xhi 1233 vpshufb $bswap,$Ii,$Ii 1234 vpclmulqdq \$0x11,$Hkey,$Ij,$Zhi 1235 vmovdqu 0x90-0x40($Htbl),$Hkey # $Hkey^7 1236 vpxor $Zmi,$Xmi,$Xmi 1237 vpunpckhqdq $Ii,$Ii,$T2 1238 vpclmulqdq \$0x10,$HK,$T1,$Zmi 1239 vmovdqu 0xb0-0x40($Htbl),$HK 1240 vpxor $Ii,$T2,$T2 1241 1242 vmovdqu ($inp),$Ij # I[0] 1243 vpxor $Xlo,$Zlo,$Zlo 1244 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo 1245 vpxor $Xhi,$Zhi,$Zhi 1246 vpshufb $bswap,$Ij,$Ij 1247 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi 1248 vmovdqu 0xa0-0x40($Htbl),$Hkey # $Hkey^8 1249 vpxor $Xmi,$Zmi,$Zmi 1250 vpclmulqdq \$0x10,$HK,$T2,$Xmi 1251 1252 lea 0x80($inp),$inp 1253 cmp \$0x80,$len 1254 jb .Ltail_avx 1255 1256 vpxor $Xi,$Ij,$Ij # accumulate $Xi 1257 sub \$0x80,$len 1258 jmp .Loop8x_avx 1259 1260.align 32 1261.Loop8x_avx: 1262 vpunpckhqdq $Ij,$Ij,$T1 1263 vmovdqu 0x70($inp),$Ii # I[7] 1264 vpxor $Xlo,$Zlo,$Zlo 1265 vpxor $Ij,$T1,$T1 1266 vpclmulqdq \$0x00,$Hkey,$Ij,$Xi 1267 vpshufb $bswap,$Ii,$Ii 1268 vpxor $Xhi,$Zhi,$Zhi 1269 vpclmulqdq \$0x11,$Hkey,$Ij,$Xo 1270 vmovdqu 0x00-0x40($Htbl),$Hkey # $Hkey^1 1271 vpunpckhqdq $Ii,$Ii,$T2 1272 vpxor $Xmi,$Zmi,$Zmi 1273 vpclmulqdq \$0x00,$HK,$T1,$Tred 1274 vmovdqu 0x20-0x40($Htbl),$HK 1275 vpxor $Ii,$T2,$T2 1276 1277 vmovdqu 0x60($inp),$Ij # I[6] 1278 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo 1279 vpxor $Zlo,$Xi,$Xi # collect result 1280 vpshufb $bswap,$Ij,$Ij 1281 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi 1282 vxorps $Zhi,$Xo,$Xo 1283 vmovdqu 0x10-0x40($Htbl),$Hkey # $Hkey^2 1284 vpunpckhqdq $Ij,$Ij,$T1 1285 vpclmulqdq \$0x00,$HK, $T2,$Xmi 1286 vpxor $Zmi,$Tred,$Tred 1287 vxorps $Ij,$T1,$T1 1288 1289 vmovdqu 0x50($inp),$Ii # I[5] 1290 vpxor $Xi,$Tred,$Tred # aggregated Karatsuba post-processing 1291 vpclmulqdq \$0x00,$Hkey,$Ij,$Zlo 1292 vpxor $Xo,$Tred,$Tred 1293 vpslldq \$8,$Tred,$T2 1294 vpxor $Xlo,$Zlo,$Zlo 1295 vpclmulqdq \$0x11,$Hkey,$Ij,$Zhi 1296 vpsrldq \$8,$Tred,$Tred 1297 vpxor $T2, $Xi, $Xi 1298 vmovdqu 0x30-0x40($Htbl),$Hkey # $Hkey^3 1299 vpshufb $bswap,$Ii,$Ii 1300 vxorps $Tred,$Xo, $Xo 1301 vpxor $Xhi,$Zhi,$Zhi 1302 vpunpckhqdq $Ii,$Ii,$T2 1303 vpclmulqdq \$0x10,$HK, $T1,$Zmi 1304 vmovdqu 0x50-0x40($Htbl),$HK 1305 vpxor $Ii,$T2,$T2 1306 vpxor $Xmi,$Zmi,$Zmi 1307 1308 vmovdqu 0x40($inp),$Ij # I[4] 1309 vpalignr \$8,$Xi,$Xi,$Tred # 1st phase 1310 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo 1311 vpshufb $bswap,$Ij,$Ij 1312 vpxor $Zlo,$Xlo,$Xlo 1313 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi 1314 vmovdqu 0x40-0x40($Htbl),$Hkey # $Hkey^4 1315 vpunpckhqdq $Ij,$Ij,$T1 1316 vpxor $Zhi,$Xhi,$Xhi 1317 vpclmulqdq \$0x00,$HK, $T2,$Xmi 1318 vxorps $Ij,$T1,$T1 1319 vpxor $Zmi,$Xmi,$Xmi 1320 1321 vmovdqu 0x30($inp),$Ii # I[3] 1322 vpclmulqdq \$0x10,(%r10),$Xi,$Xi 1323 vpclmulqdq \$0x00,$Hkey,$Ij,$Zlo 1324 vpshufb $bswap,$Ii,$Ii 1325 vpxor $Xlo,$Zlo,$Zlo 1326 vpclmulqdq \$0x11,$Hkey,$Ij,$Zhi 1327 vmovdqu 0x60-0x40($Htbl),$Hkey # $Hkey^5 1328 vpunpckhqdq $Ii,$Ii,$T2 1329 vpxor $Xhi,$Zhi,$Zhi 1330 vpclmulqdq \$0x10,$HK, $T1,$Zmi 1331 vmovdqu 0x80-0x40($Htbl),$HK 1332 vpxor $Ii,$T2,$T2 1333 vpxor $Xmi,$Zmi,$Zmi 1334 1335 vmovdqu 0x20($inp),$Ij # I[2] 1336 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo 1337 vpshufb $bswap,$Ij,$Ij 1338 vpxor $Zlo,$Xlo,$Xlo 1339 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi 1340 vmovdqu 0x70-0x40($Htbl),$Hkey # $Hkey^6 1341 vpunpckhqdq $Ij,$Ij,$T1 1342 vpxor $Zhi,$Xhi,$Xhi 1343 vpclmulqdq \$0x00,$HK, $T2,$Xmi 1344 vpxor $Ij,$T1,$T1 1345 vpxor $Zmi,$Xmi,$Xmi 1346 vxorps $Tred,$Xi,$Xi 1347 1348 vmovdqu 0x10($inp),$Ii # I[1] 1349 vpalignr \$8,$Xi,$Xi,$Tred # 2nd phase 1350 vpclmulqdq \$0x00,$Hkey,$Ij,$Zlo 1351 vpshufb $bswap,$Ii,$Ii 1352 vpxor $Xlo,$Zlo,$Zlo 1353 vpclmulqdq \$0x11,$Hkey,$Ij,$Zhi 1354 vmovdqu 0x90-0x40($Htbl),$Hkey # $Hkey^7 1355 vpclmulqdq \$0x10,(%r10),$Xi,$Xi 1356 vxorps $Xo,$Tred,$Tred 1357 vpunpckhqdq $Ii,$Ii,$T2 1358 vpxor $Xhi,$Zhi,$Zhi 1359 vpclmulqdq \$0x10,$HK, $T1,$Zmi 1360 vmovdqu 0xb0-0x40($Htbl),$HK 1361 vpxor $Ii,$T2,$T2 1362 vpxor $Xmi,$Zmi,$Zmi 1363 1364 vmovdqu ($inp),$Ij # I[0] 1365 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo 1366 vpshufb $bswap,$Ij,$Ij 1367 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi 1368 vmovdqu 0xa0-0x40($Htbl),$Hkey # $Hkey^8 1369 vpxor $Tred,$Ij,$Ij 1370 vpclmulqdq \$0x10,$HK, $T2,$Xmi 1371 vpxor $Xi,$Ij,$Ij # accumulate $Xi 1372 1373 lea 0x80($inp),$inp 1374 sub \$0x80,$len 1375 jnc .Loop8x_avx 1376 1377 add \$0x80,$len 1378 jmp .Ltail_no_xor_avx 1379 1380.align 32 1381.Lshort_avx: 1382 vmovdqu -0x10($inp,$len),$Ii # very last word 1383 lea ($inp,$len),$inp 1384 vmovdqu 0x00-0x40($Htbl),$Hkey # $Hkey^1 1385 vmovdqu 0x20-0x40($Htbl),$HK 1386 vpshufb $bswap,$Ii,$Ij 1387 1388 vmovdqa $Xlo,$Zlo # subtle way to zero $Zlo, 1389 vmovdqa $Xhi,$Zhi # $Zhi and 1390 vmovdqa $Xmi,$Zmi # $Zmi 1391 sub \$0x10,$len 1392 jz .Ltail_avx 1393 1394 vpunpckhqdq $Ij,$Ij,$T1 1395 vpxor $Xlo,$Zlo,$Zlo 1396 vpclmulqdq \$0x00,$Hkey,$Ij,$Xlo 1397 vpxor $Ij,$T1,$T1 1398 vmovdqu -0x20($inp),$Ii 1399 vpxor $Xhi,$Zhi,$Zhi 1400 vpclmulqdq \$0x11,$Hkey,$Ij,$Xhi 1401 vmovdqu 0x10-0x40($Htbl),$Hkey # $Hkey^2 1402 vpshufb $bswap,$Ii,$Ij 1403 vpxor $Xmi,$Zmi,$Zmi 1404 vpclmulqdq \$0x00,$HK,$T1,$Xmi 1405 vpsrldq \$8,$HK,$HK 1406 sub \$0x10,$len 1407 jz .Ltail_avx 1408 1409 vpunpckhqdq $Ij,$Ij,$T1 1410 vpxor $Xlo,$Zlo,$Zlo 1411 vpclmulqdq \$0x00,$Hkey,$Ij,$Xlo 1412 vpxor $Ij,$T1,$T1 1413 vmovdqu -0x30($inp),$Ii 1414 vpxor $Xhi,$Zhi,$Zhi 1415 vpclmulqdq \$0x11,$Hkey,$Ij,$Xhi 1416 vmovdqu 0x30-0x40($Htbl),$Hkey # $Hkey^3 1417 vpshufb $bswap,$Ii,$Ij 1418 vpxor $Xmi,$Zmi,$Zmi 1419 vpclmulqdq \$0x00,$HK,$T1,$Xmi 1420 vmovdqu 0x50-0x40($Htbl),$HK 1421 sub \$0x10,$len 1422 jz .Ltail_avx 1423 1424 vpunpckhqdq $Ij,$Ij,$T1 1425 vpxor $Xlo,$Zlo,$Zlo 1426 vpclmulqdq \$0x00,$Hkey,$Ij,$Xlo 1427 vpxor $Ij,$T1,$T1 1428 vmovdqu -0x40($inp),$Ii 1429 vpxor $Xhi,$Zhi,$Zhi 1430 vpclmulqdq \$0x11,$Hkey,$Ij,$Xhi 1431 vmovdqu 0x40-0x40($Htbl),$Hkey # $Hkey^4 1432 vpshufb $bswap,$Ii,$Ij 1433 vpxor $Xmi,$Zmi,$Zmi 1434 vpclmulqdq \$0x00,$HK,$T1,$Xmi 1435 vpsrldq \$8,$HK,$HK 1436 sub \$0x10,$len 1437 jz .Ltail_avx 1438 1439 vpunpckhqdq $Ij,$Ij,$T1 1440 vpxor $Xlo,$Zlo,$Zlo 1441 vpclmulqdq \$0x00,$Hkey,$Ij,$Xlo 1442 vpxor $Ij,$T1,$T1 1443 vmovdqu -0x50($inp),$Ii 1444 vpxor $Xhi,$Zhi,$Zhi 1445 vpclmulqdq \$0x11,$Hkey,$Ij,$Xhi 1446 vmovdqu 0x60-0x40($Htbl),$Hkey # $Hkey^5 1447 vpshufb $bswap,$Ii,$Ij 1448 vpxor $Xmi,$Zmi,$Zmi 1449 vpclmulqdq \$0x00,$HK,$T1,$Xmi 1450 vmovdqu 0x80-0x40($Htbl),$HK 1451 sub \$0x10,$len 1452 jz .Ltail_avx 1453 1454 vpunpckhqdq $Ij,$Ij,$T1 1455 vpxor $Xlo,$Zlo,$Zlo 1456 vpclmulqdq \$0x00,$Hkey,$Ij,$Xlo 1457 vpxor $Ij,$T1,$T1 1458 vmovdqu -0x60($inp),$Ii 1459 vpxor $Xhi,$Zhi,$Zhi 1460 vpclmulqdq \$0x11,$Hkey,$Ij,$Xhi 1461 vmovdqu 0x70-0x40($Htbl),$Hkey # $Hkey^6 1462 vpshufb $bswap,$Ii,$Ij 1463 vpxor $Xmi,$Zmi,$Zmi 1464 vpclmulqdq \$0x00,$HK,$T1,$Xmi 1465 vpsrldq \$8,$HK,$HK 1466 sub \$0x10,$len 1467 jz .Ltail_avx 1468 1469 vpunpckhqdq $Ij,$Ij,$T1 1470 vpxor $Xlo,$Zlo,$Zlo 1471 vpclmulqdq \$0x00,$Hkey,$Ij,$Xlo 1472 vpxor $Ij,$T1,$T1 1473 vmovdqu -0x70($inp),$Ii 1474 vpxor $Xhi,$Zhi,$Zhi 1475 vpclmulqdq \$0x11,$Hkey,$Ij,$Xhi 1476 vmovdqu 0x90-0x40($Htbl),$Hkey # $Hkey^7 1477 vpshufb $bswap,$Ii,$Ij 1478 vpxor $Xmi,$Zmi,$Zmi 1479 vpclmulqdq \$0x00,$HK,$T1,$Xmi 1480 vmovq 0xb8-0x40($Htbl),$HK 1481 sub \$0x10,$len 1482 jmp .Ltail_avx 1483 1484.align 32 1485.Ltail_avx: 1486 vpxor $Xi,$Ij,$Ij # accumulate $Xi 1487.Ltail_no_xor_avx: 1488 vpunpckhqdq $Ij,$Ij,$T1 1489 vpxor $Xlo,$Zlo,$Zlo 1490 vpclmulqdq \$0x00,$Hkey,$Ij,$Xlo 1491 vpxor $Ij,$T1,$T1 1492 vpxor $Xhi,$Zhi,$Zhi 1493 vpclmulqdq \$0x11,$Hkey,$Ij,$Xhi 1494 vpxor $Xmi,$Zmi,$Zmi 1495 vpclmulqdq \$0x00,$HK,$T1,$Xmi 1496 1497 vmovdqu (%r10),$Tred 1498 1499 vpxor $Xlo,$Zlo,$Xi 1500 vpxor $Xhi,$Zhi,$Xo 1501 vpxor $Xmi,$Zmi,$Zmi 1502 1503 vpxor $Xi, $Zmi,$Zmi # aggregated Karatsuba post-processing 1504 vpxor $Xo, $Zmi,$Zmi 1505 vpslldq \$8, $Zmi,$T2 1506 vpsrldq \$8, $Zmi,$Zmi 1507 vpxor $T2, $Xi, $Xi 1508 vpxor $Zmi,$Xo, $Xo 1509 1510 vpclmulqdq \$0x10,$Tred,$Xi,$T2 # 1st phase 1511 vpalignr \$8,$Xi,$Xi,$Xi 1512 vpxor $T2,$Xi,$Xi 1513 1514 vpclmulqdq \$0x10,$Tred,$Xi,$T2 # 2nd phase 1515 vpalignr \$8,$Xi,$Xi,$Xi 1516 vpxor $Xo,$Xi,$Xi 1517 vpxor $T2,$Xi,$Xi 1518 1519 cmp \$0,$len 1520 jne .Lshort_avx 1521 1522 vpshufb $bswap,$Xi,$Xi 1523 vmovdqu $Xi,($Xip) 1524 vzeroupper 1525___ 1526$code.=<<___ if ($win64); 1527 movaps (%rsp),%xmm6 1528 movaps 0x10(%rsp),%xmm7 1529 movaps 0x20(%rsp),%xmm8 1530 movaps 0x30(%rsp),%xmm9 1531 movaps 0x40(%rsp),%xmm10 1532 movaps 0x50(%rsp),%xmm11 1533 movaps 0x60(%rsp),%xmm12 1534 movaps 0x70(%rsp),%xmm13 1535 movaps 0x80(%rsp),%xmm14 1536 movaps 0x90(%rsp),%xmm15 1537 lea 0xa8(%rsp),%rsp 1538.LSEH_end_gcm_ghash_avx: 1539___ 1540$code.=<<___; 1541 ret 1542.size gcm_ghash_avx,.-gcm_ghash_avx 1543___ 1544} else { 1545$code.=<<___; 1546 jmp .L_ghash_clmul 1547.size gcm_ghash_avx,.-gcm_ghash_avx 1548___ 1549} 1550 1551$code.=<<___; 1552.align 64 1553.Lbswap_mask: 1554 .byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 1555.L0x1c2_polynomial: 1556 .byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2 1557.L7_mask: 1558 .long 7,0,7,0 1559.L7_mask_poly: 1560 .long 7,0,`0xE1<<1`,0 1561.align 64 1562.type .Lrem_4bit,\@object 1563.Lrem_4bit: 1564 .long 0,`0x0000<<16`,0,`0x1C20<<16`,0,`0x3840<<16`,0,`0x2460<<16` 1565 .long 0,`0x7080<<16`,0,`0x6CA0<<16`,0,`0x48C0<<16`,0,`0x54E0<<16` 1566 .long 0,`0xE100<<16`,0,`0xFD20<<16`,0,`0xD940<<16`,0,`0xC560<<16` 1567 .long 0,`0x9180<<16`,0,`0x8DA0<<16`,0,`0xA9C0<<16`,0,`0xB5E0<<16` 1568.type .Lrem_8bit,\@object 1569.Lrem_8bit: 1570 .value 0x0000,0x01C2,0x0384,0x0246,0x0708,0x06CA,0x048C,0x054E 1571 .value 0x0E10,0x0FD2,0x0D94,0x0C56,0x0918,0x08DA,0x0A9C,0x0B5E 1572 .value 0x1C20,0x1DE2,0x1FA4,0x1E66,0x1B28,0x1AEA,0x18AC,0x196E 1573 .value 0x1230,0x13F2,0x11B4,0x1076,0x1538,0x14FA,0x16BC,0x177E 1574 .value 0x3840,0x3982,0x3BC4,0x3A06,0x3F48,0x3E8A,0x3CCC,0x3D0E 1575 .value 0x3650,0x3792,0x35D4,0x3416,0x3158,0x309A,0x32DC,0x331E 1576 .value 0x2460,0x25A2,0x27E4,0x2626,0x2368,0x22AA,0x20EC,0x212E 1577 .value 0x2A70,0x2BB2,0x29F4,0x2836,0x2D78,0x2CBA,0x2EFC,0x2F3E 1578 .value 0x7080,0x7142,0x7304,0x72C6,0x7788,0x764A,0x740C,0x75CE 1579 .value 0x7E90,0x7F52,0x7D14,0x7CD6,0x7998,0x785A,0x7A1C,0x7BDE 1580 .value 0x6CA0,0x6D62,0x6F24,0x6EE6,0x6BA8,0x6A6A,0x682C,0x69EE 1581 .value 0x62B0,0x6372,0x6134,0x60F6,0x65B8,0x647A,0x663C,0x67FE 1582 .value 0x48C0,0x4902,0x4B44,0x4A86,0x4FC8,0x4E0A,0x4C4C,0x4D8E 1583 .value 0x46D0,0x4712,0x4554,0x4496,0x41D8,0x401A,0x425C,0x439E 1584 .value 0x54E0,0x5522,0x5764,0x56A6,0x53E8,0x522A,0x506C,0x51AE 1585 .value 0x5AF0,0x5B32,0x5974,0x58B6,0x5DF8,0x5C3A,0x5E7C,0x5FBE 1586 .value 0xE100,0xE0C2,0xE284,0xE346,0xE608,0xE7CA,0xE58C,0xE44E 1587 .value 0xEF10,0xEED2,0xEC94,0xED56,0xE818,0xE9DA,0xEB9C,0xEA5E 1588 .value 0xFD20,0xFCE2,0xFEA4,0xFF66,0xFA28,0xFBEA,0xF9AC,0xF86E 1589 .value 0xF330,0xF2F2,0xF0B4,0xF176,0xF438,0xF5FA,0xF7BC,0xF67E 1590 .value 0xD940,0xD882,0xDAC4,0xDB06,0xDE48,0xDF8A,0xDDCC,0xDC0E 1591 .value 0xD750,0xD692,0xD4D4,0xD516,0xD058,0xD19A,0xD3DC,0xD21E 1592 .value 0xC560,0xC4A2,0xC6E4,0xC726,0xC268,0xC3AA,0xC1EC,0xC02E 1593 .value 0xCB70,0xCAB2,0xC8F4,0xC936,0xCC78,0xCDBA,0xCFFC,0xCE3E 1594 .value 0x9180,0x9042,0x9204,0x93C6,0x9688,0x974A,0x950C,0x94CE 1595 .value 0x9F90,0x9E52,0x9C14,0x9DD6,0x9898,0x995A,0x9B1C,0x9ADE 1596 .value 0x8DA0,0x8C62,0x8E24,0x8FE6,0x8AA8,0x8B6A,0x892C,0x88EE 1597 .value 0x83B0,0x8272,0x8034,0x81F6,0x84B8,0x857A,0x873C,0x86FE 1598 .value 0xA9C0,0xA802,0xAA44,0xAB86,0xAEC8,0xAF0A,0xAD4C,0xAC8E 1599 .value 0xA7D0,0xA612,0xA454,0xA596,0xA0D8,0xA11A,0xA35C,0xA29E 1600 .value 0xB5E0,0xB422,0xB664,0xB7A6,0xB2E8,0xB32A,0xB16C,0xB0AE 1601 .value 0xBBF0,0xBA32,0xB874,0xB9B6,0xBCF8,0xBD3A,0xBF7C,0xBEBE 1602 1603.asciz "GHASH for x86_64, CRYPTOGAMS by <appro\@openssl.org>" 1604.align 64 1605___ 1606 1607# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame, 1608# CONTEXT *context,DISPATCHER_CONTEXT *disp) 1609if ($win64) { 1610$rec="%rcx"; 1611$frame="%rdx"; 1612$context="%r8"; 1613$disp="%r9"; 1614 1615$code.=<<___; 1616.extern __imp_RtlVirtualUnwind 1617.type se_handler,\@abi-omnipotent 1618.align 16 1619se_handler: 1620 push %rsi 1621 push %rdi 1622 push %rbx 1623 push %rbp 1624 push %r12 1625 push %r13 1626 push %r14 1627 push %r15 1628 pushfq 1629 sub \$64,%rsp 1630 1631 mov 120($context),%rax # pull context->Rax 1632 mov 248($context),%rbx # pull context->Rip 1633 1634 mov 8($disp),%rsi # disp->ImageBase 1635 mov 56($disp),%r11 # disp->HandlerData 1636 1637 mov 0(%r11),%r10d # HandlerData[0] 1638 lea (%rsi,%r10),%r10 # prologue label 1639 cmp %r10,%rbx # context->Rip<prologue label 1640 jb .Lin_prologue 1641 1642 mov 152($context),%rax # pull context->Rsp 1643 1644 mov 4(%r11),%r10d # HandlerData[1] 1645 lea (%rsi,%r10),%r10 # epilogue label 1646 cmp %r10,%rbx # context->Rip>=epilogue label 1647 jae .Lin_prologue 1648 1649 lea 48+280(%rax),%rax # adjust "rsp" 1650 1651 mov -8(%rax),%rbx 1652 mov -16(%rax),%rbp 1653 mov -24(%rax),%r12 1654 mov -32(%rax),%r13 1655 mov -40(%rax),%r14 1656 mov -48(%rax),%r15 1657 mov %rbx,144($context) # restore context->Rbx 1658 mov %rbp,160($context) # restore context->Rbp 1659 mov %r12,216($context) # restore context->R12 1660 mov %r13,224($context) # restore context->R13 1661 mov %r14,232($context) # restore context->R14 1662 mov %r15,240($context) # restore context->R15 1663 1664.Lin_prologue: 1665 mov 8(%rax),%rdi 1666 mov 16(%rax),%rsi 1667 mov %rax,152($context) # restore context->Rsp 1668 mov %rsi,168($context) # restore context->Rsi 1669 mov %rdi,176($context) # restore context->Rdi 1670 1671 mov 40($disp),%rdi # disp->ContextRecord 1672 mov $context,%rsi # context 1673 mov \$`1232/8`,%ecx # sizeof(CONTEXT) 1674 .long 0xa548f3fc # cld; rep movsq 1675 1676 mov $disp,%rsi 1677 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER 1678 mov 8(%rsi),%rdx # arg2, disp->ImageBase 1679 mov 0(%rsi),%r8 # arg3, disp->ControlPc 1680 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry 1681 mov 40(%rsi),%r10 # disp->ContextRecord 1682 lea 56(%rsi),%r11 # &disp->HandlerData 1683 lea 24(%rsi),%r12 # &disp->EstablisherFrame 1684 mov %r10,32(%rsp) # arg5 1685 mov %r11,40(%rsp) # arg6 1686 mov %r12,48(%rsp) # arg7 1687 mov %rcx,56(%rsp) # arg8, (NULL) 1688 call *__imp_RtlVirtualUnwind(%rip) 1689 1690 mov \$1,%eax # ExceptionContinueSearch 1691 add \$64,%rsp 1692 popfq 1693 pop %r15 1694 pop %r14 1695 pop %r13 1696 pop %r12 1697 pop %rbp 1698 pop %rbx 1699 pop %rdi 1700 pop %rsi 1701 ret 1702.size se_handler,.-se_handler 1703 1704.section .pdata 1705.align 4 1706 .rva .LSEH_begin_gcm_gmult_4bit 1707 .rva .LSEH_end_gcm_gmult_4bit 1708 .rva .LSEH_info_gcm_gmult_4bit 1709 1710 .rva .LSEH_begin_gcm_ghash_4bit 1711 .rva .LSEH_end_gcm_ghash_4bit 1712 .rva .LSEH_info_gcm_ghash_4bit 1713 1714 .rva .LSEH_begin_gcm_init_clmul 1715 .rva .LSEH_end_gcm_init_clmul 1716 .rva .LSEH_info_gcm_init_clmul 1717 1718 .rva .LSEH_begin_gcm_ghash_clmul 1719 .rva .LSEH_end_gcm_ghash_clmul 1720 .rva .LSEH_info_gcm_ghash_clmul 1721___ 1722$code.=<<___ if ($avx); 1723 .rva .LSEH_begin_gcm_init_avx 1724 .rva .LSEH_end_gcm_init_avx 1725 .rva .LSEH_info_gcm_init_clmul 1726 1727 .rva .LSEH_begin_gcm_ghash_avx 1728 .rva .LSEH_end_gcm_ghash_avx 1729 .rva .LSEH_info_gcm_ghash_clmul 1730___ 1731$code.=<<___; 1732.section .xdata 1733.align 8 1734.LSEH_info_gcm_gmult_4bit: 1735 .byte 9,0,0,0 1736 .rva se_handler 1737 .rva .Lgmult_prologue,.Lgmult_epilogue # HandlerData 1738.LSEH_info_gcm_ghash_4bit: 1739 .byte 9,0,0,0 1740 .rva se_handler 1741 .rva .Lghash_prologue,.Lghash_epilogue # HandlerData 1742.LSEH_info_gcm_init_clmul: 1743 .byte 0x01,0x08,0x03,0x00 1744 .byte 0x08,0x68,0x00,0x00 #movaps 0x00(rsp),xmm6 1745 .byte 0x04,0x22,0x00,0x00 #sub rsp,0x18 1746.LSEH_info_gcm_ghash_clmul: 1747 .byte 0x01,0x33,0x16,0x00 1748 .byte 0x33,0xf8,0x09,0x00 #movaps 0x90(rsp),xmm15 1749 .byte 0x2e,0xe8,0x08,0x00 #movaps 0x80(rsp),xmm14 1750 .byte 0x29,0xd8,0x07,0x00 #movaps 0x70(rsp),xmm13 1751 .byte 0x24,0xc8,0x06,0x00 #movaps 0x60(rsp),xmm12 1752 .byte 0x1f,0xb8,0x05,0x00 #movaps 0x50(rsp),xmm11 1753 .byte 0x1a,0xa8,0x04,0x00 #movaps 0x40(rsp),xmm10 1754 .byte 0x15,0x98,0x03,0x00 #movaps 0x30(rsp),xmm9 1755 .byte 0x10,0x88,0x02,0x00 #movaps 0x20(rsp),xmm8 1756 .byte 0x0c,0x78,0x01,0x00 #movaps 0x10(rsp),xmm7 1757 .byte 0x08,0x68,0x00,0x00 #movaps 0x00(rsp),xmm6 1758 .byte 0x04,0x01,0x15,0x00 #sub rsp,0xa8 1759___ 1760} 1761 1762$code =~ s/\`([^\`]*)\`/eval($1)/gem; 1763 1764print $code; 1765 1766close STDOUT; 1767