/freebsd-13-stable/crypto/openssl/crypto/poly1305/asm/ |
H A D | poly1305-c64xplus.pl | 33 ($H0,$H1,$H2,$H3,$H4,$H4a)=("A8","B8","A10","B10","B2",$LEN); 143 LDW *${CTXA}[4],$H4 ; load h4 171 || ADD $PADBIT,$H4,$H4 ; h4+=padbit 174 || ADD $D3,$H4,$H4 192 MPY32 $H4,$S1,B20 193 || MPY32 $H4,$S2a,A20 199 MPY32 $H4,$S3b,B22 206 MPY32 $H4, [all...] |
H A D | poly1305-x86_64.pl | 342 my ($H0,$H1,$H2,$H3,$H4, $T0,$T1,$T2,$T3,$T4, $D0,$D1,$D2,$D3,$D4, $MASK) = 637 vmovd $h2#d,$H4 740 vmovd $h2#d,$H4 774 vmovd 4*4($ctx),$H4 922 vmovdqa $H4,0x40(%r11) # 927 vmovdqa 0x40(%rsp),$H4 # s2^2 934 vpmuludq $T4,$H4,$H0 # h4*s2 935 vpmuludq $T3,$H4,$H4 # h3*s2 938 vpaddq $H4, [all...] |
H A D | poly1305-armv4.pl | 449 my ($D0,$D1,$D2,$D3,$D4, $H0,$H1,$H2,$H3,$H4) = map("q$_",(5..14)); 536 @ H0>>+H1>>+H2>>+H3>>+H4 537 @ H3>>+H4>>*5+H0>>+H1 550 @ H0, H2, H3 are guaranteed to be 26 bits wide, while H1 and H4 557 @ H4 = H4*R0 + H3*R1 + H2*R2 + H1*R3 + H0 * R4, 567 @ 5*H4 - by 5*5 52-bit addends, or 57 bits. But when hashing the 569 @ 5*H4 by 5*5*3, or 59[!] bits. How is this relevant? vmlal.u32 575 @ one has to watch for H2 (which is narrower than H0) and 5*H4 753 vmov.32 $H4#l [all...] |
H A D | poly1305-armv8.pl | 218 my ($H0,$H1,$H2,$H3,$H4) = map("v$_.2s",(24..28)); 409 fmov ${H4},x14 458 fmov ${H4},x14 660 add $IN01_4,$IN01_4,$H4 700 xtn $H4,$ACC4 703 bic $H4,#0xfc,lsl#24 721 add $H4,$H4,$T1.2s // h3 -> h4 739 add $IN23_4,$IN01_4,$H4 802 add $IN01_4,$IN01_4,$H4 [all...] |
/freebsd-13-stable/sys/contrib/libsodium/src/libsodium/crypto_onetimeauth/poly1305/sse2/ |
H A D | poly1305_sse2.c | 206 xmmi H0, H1, H2, H3, H4; local 233 H4 = _mm_srli_epi64(T6, 40); 234 H4 = _mm_or_si128(H4, HIBIT); 246 H4 = _mm_shuffle_epi32(T2, _MM_SHUFFLE(1, 1, 0, 0)); 309 T0 = H4; 314 T1 = H4; 318 T2 = H4; 324 T3 = H4; 338 T4 = H4; [all...] |
/freebsd-13-stable/sys/crypto/aesni/ |
H A D | aesni_ghash.c | 163 reduce4(__m128i H1, __m128i H2, __m128i H3, __m128i H4, argument 176 H4_X4_lo = _mm_clmulepi64_si128(H4, X4, 0x00); 185 H4_X4_hi = _mm_clmulepi64_si128(H4, X4, 0x11); 203 tmp3 = _mm_shuffle_epi32(H4, 78); 205 tmp3 = _mm_xor_si128(tmp3, H4); 279 __m128i H, H2, H3, H4, Y, T; local 349 gfmul(H,H3,&H4); 363 reduce4(H, H2, H3, H4, tmp4, tmp3, tmp2, tmp1, &X); 475 reduce4(H, H2, H3, H4, tmp4, tmp3, tmp2, tmp1, &X); 478 reduce4(H, H2, H3, H4, tmp 539 __m128i H, H2, H3, H4, Y, T; local [all...] |
/freebsd-13-stable/crypto/openssl/crypto/modes/asm/ |
H A D | ghashp8-ppc.pl | 377 $Xh3,$Xm3,$IN3,$H4l,$H4,$H4h) = map("v$_",(20..31)); 431 lvx_u $H4, r9,$Htbl 488 vpmsumd $Xm,$Xh,$H4 # H^4.hi��Xi.lo+H^4.lo��Xi.hi 528 vpmsumd $Xm,$Xh,$H4 # H^4.hi��Xi.lo+H^4.lo��Xi.hi 568 vmr $H4, $H3 591 vmr $H4, $H2 605 vmr $H4, $H
|
H A D | ghashv8-armx.pl | 412 $I1,$I2,$I3,$H3,$H34,$H4,$Yl,$Ym,$Yh) = map("q$_",(4..7,15..23)); 422 vld1.64 {$H3-$H4},[$Htbl] @ load twisted H^3, ..., H^4 477 vpmull.p64 $Xl,$H4,$IN @ H^4��(Xi+Ii) 479 vpmull2.p64 $Xh,$H4,$IN 532 vpmull.p64 $Xl,$H4,$IN @ H^4��(Xi+Ii) 534 vpmull2.p64 $Xh,$H4,$IN
|
/freebsd-13-stable/contrib/llvm-project/llvm/lib/Target/AArch64/ |
H A D | AArch64CallingConvention.cpp | 27 AArch64::H3, AArch64::H4, AArch64::H5,
|
H A D | AArch64FastISel.cpp | 3009 { AArch64::H0, AArch64::H1, AArch64::H2, AArch64::H3, AArch64::H4,
|
/freebsd-13-stable/sys/crypto/openssl/arm/ |
H A D | poly1305-armv4.S | 467 @ H0>>+H1>>+H2>>+H3>>+H4 468 @ H3>>+H4>>*5+H0>>+H1 481 @ H0, H2, H3 are guaranteed to be 26 bits wide, while H1 and H4 488 @ H4 = H4*R0 + H3*R1 + H2*R2 + H1*R3 + H0 * R4, 498 @ 5*H4 - by 5*5 52-bit addends, or 57 bits. But when hashing the 500 @ 5*H4 by 5*5*3, or 59[!] bits. How is this relevant? vmlal.u32 506 @ one has to watch for H2 (which is narrower than H0) and 5*H4
|
/freebsd-13-stable/crypto/openssh/openbsd-compat/ |
H A D | rmd160.c | 61 #define H4 0xC3D2E1F0U macro 106 ctx->state[4] = H4;
|
/freebsd-13-stable/sys/opencrypto/ |
H A D | rmd160.c | 60 #define H4 0xC3D2E1F0U macro 105 ctx->state[4] = H4;
|
/freebsd-13-stable/contrib/llvm-project/llvm/lib/Target/AArch64/Disassembler/ |
H A D | AArch64Disassembler.cpp | 379 AArch64::H0, AArch64::H1, AArch64::H2, AArch64::H3, AArch64::H4,
|