Searched refs:rem_4bit (Results 1 - 18 of 18) sorted by relevance

/netgear-R7000-V1.0.7.12_1.2.5/ap/gpl/openssl/crypto/modes/asm/
H A Dghash-s390x.pl71 $rem_4bit="%r14";
109 larl $rem_4bit,rem_4bit
148 larl $rem_4bit,rem_4bit
197 xg $Zhi,0($rem0,$rem_4bit)
210 xg $Zhi,0($rem1,$rem_4bit)
222 xg $Zhi,0($rem0,$rem_4bit)
232 xg $Zhi,0($rem1,$rem_4bit)
234 lg $tmp,0($xi,$rem_4bit)
248 rem_4bit: label
[all...]
H A Dghash-parisc.pl62 $rem_4bit="%r28";
107 blr %r0,$rem_4bit
110 andcm $rem_4bit,$rem,$rem_4bit
112 ldo L\$rem_4bit-L\$pic_gmult($rem_4bit),$rem_4bit
146 ldd $rem($rem_4bit),$rem
162 ldd $rem($rem_4bit),$rem
175 ldd $rem($rem_4bit),
[all...]
H A Dghash-sparcv9.pl68 $rem_4bit="%l4";
86 rem_4bit: label
91 .type rem_4bit,#object
92 .size rem_4bit,(.-rem_4bit)
105 add %o7,rem_4bit-1b,$rem_4bit
121 ldx [$rem_4bit+$remi],$rem
143 ldx [$rem_4bit+$remi],$rem
158 ldx [$rem_4bit
[all...]
H A Dghash-armv4.pl95 $rem_4bit=$inp; # used in gcm_gmult_4bit
134 .type rem_4bit,%object
136 rem_4bit: label
141 .size rem_4bit,.-rem_4bit
145 sub $rem_4bit,pc,#8
146 sub $rem_4bit,$rem_4bit,#32 @ &rem_4bit
157 sub r12,r12,#48 @ &rem_4bit
[all...]
H A Dghash-alpha.pl45 $rem_4bit="AT"; # $28
69 s8addq $remp,$rem_4bit,$remp
97 s8addq $remp,$rem_4bit,$remp
115 s8addq $remp,$rem_4bit,$remp
139 s8addq $remp,$rem_4bit,$remp
156 s8addq $remp,$rem_4bit,$remp
181 s8addq $remp,$rem_4bit,$remp
199 s8addq $remp,$rem_4bit,$remp
222 s8addq $remp,$rem_4bit,$remp
235 s8addq $remp,$rem_4bit,
448 rem_4bit: label
[all...]
H A Dghash-x86.pl347 &static_label("rem_4bit");
351 $S=12; # shift factor for rem_4bit
359 # used to optimize critical path in 'Z.hi ^= rem_4bit[Z.lo&0xf]'.
362 # Reference to rem_4bit is scheduled so late that I had to >>4
363 # rem_4bit elements. This resulted in 20-45% procent improvement
367 my $rem_4bit = "eax";
396 &pxor ($Zhi,&QWP(0,$rem_4bit,$rem[1],8)) if ($cnt<28);
406 &mov ($inp,&DWP(4,$rem_4bit,$rem[1],8)); # last rem_4bit[rem]
413 &shl ($inp,4); # compensate for rem_4bit[
[all...]
H A Dghash-ia64.pl53 # &rem_4bit[Zlo&0xf]. It works, because rem_4bit is aligned at 128
132 add rem_4bitp=rem_4bit#-gcm_gmult_4bit#,rem_4bitp
414 .type rem_4bit#,\@object
415 rem_4bit: label
420 .size rem_4bit#,128
H A Dghash-x86_64.pl123 $rem_4bit = "%r11";
174 xor ($rem_4bit,$rem,8),$Zhi
189 xor ($rem_4bit,$rem,8),$Zhi
204 xor ($rem_4bit,$rem,8),$Zhi
216 xor ($rem_4bit,$rem,8),$Zhi
237 lea .Lrem_4bit(%rip),$rem_4bit
255 $rem_8bit=$rem_4bit;
/netgear-R7000-V1.0.7.12_1.2.5/ap/gpl/openssl-1.0.2h/crypto/modes/asm/
H A Dghash-s390x.pl71 $rem_4bit="%r14";
109 larl $rem_4bit,rem_4bit
148 larl $rem_4bit,rem_4bit
197 xg $Zhi,0($rem0,$rem_4bit)
210 xg $Zhi,0($rem1,$rem_4bit)
222 xg $Zhi,0($rem0,$rem_4bit)
232 xg $Zhi,0($rem1,$rem_4bit)
234 lg $tmp,0($xi,$rem_4bit)
248 rem_4bit: label
[all...]
H A Dghash-parisc.pl62 $rem_4bit="%r28";
107 blr %r0,$rem_4bit
110 andcm $rem_4bit,$rem,$rem_4bit
112 ldo L\$rem_4bit-L\$pic_gmult($rem_4bit),$rem_4bit
146 ldd $rem($rem_4bit),$rem
162 ldd $rem($rem_4bit),$rem
175 ldd $rem($rem_4bit),
[all...]
H A Dghash-sparcv9.pl68 $rem_4bit="%l4";
86 rem_4bit: label
91 .type rem_4bit,#object
92 .size rem_4bit,(.-rem_4bit)
105 add %o7,rem_4bit-1b,$rem_4bit
121 ldx [$rem_4bit+$remi],$rem
143 ldx [$rem_4bit+$remi],$rem
158 ldx [$rem_4bit
[all...]
H A Dghash-armv4.pl95 $rem_4bit=$inp; # used in gcm_gmult_4bit
134 .type rem_4bit,%object
136 rem_4bit: label
141 .size rem_4bit,.-rem_4bit
145 sub $rem_4bit,pc,#8
146 sub $rem_4bit,$rem_4bit,#32 @ &rem_4bit
157 sub r12,r12,#48 @ &rem_4bit
[all...]
H A Dghash-alpha.pl45 $rem_4bit="AT"; # $28
69 s8addq $remp,$rem_4bit,$remp
97 s8addq $remp,$rem_4bit,$remp
115 s8addq $remp,$rem_4bit,$remp
139 s8addq $remp,$rem_4bit,$remp
156 s8addq $remp,$rem_4bit,$remp
181 s8addq $remp,$rem_4bit,$remp
199 s8addq $remp,$rem_4bit,$remp
222 s8addq $remp,$rem_4bit,$remp
235 s8addq $remp,$rem_4bit,
448 rem_4bit: label
[all...]
H A Dghash-x86.pl347 &static_label("rem_4bit");
351 $S=12; # shift factor for rem_4bit
359 # used to optimize critical path in 'Z.hi ^= rem_4bit[Z.lo&0xf]'.
362 # Reference to rem_4bit is scheduled so late that I had to >>4
363 # rem_4bit elements. This resulted in 20-45% procent improvement
367 my $rem_4bit = "eax";
396 &pxor ($Zhi,&QWP(0,$rem_4bit,$rem[1],8)) if ($cnt<28);
406 &mov ($inp,&DWP(4,$rem_4bit,$rem[1],8)); # last rem_4bit[rem]
413 &shl ($inp,4); # compensate for rem_4bit[
[all...]
H A Dghash-ia64.pl53 # &rem_4bit[Zlo&0xf]. It works, because rem_4bit is aligned at 128
132 add rem_4bitp=rem_4bit#-gcm_gmult_4bit#,rem_4bitp
414 .type rem_4bit#,\@object
415 rem_4bit: label
420 .size rem_4bit#,128
H A Dghash-x86_64.pl123 $rem_4bit = "%r11";
174 xor ($rem_4bit,$rem,8),$Zhi
189 xor ($rem_4bit,$rem,8),$Zhi
204 xor ($rem_4bit,$rem,8),$Zhi
216 xor ($rem_4bit,$rem,8),$Zhi
237 lea .Lrem_4bit(%rip),$rem_4bit
255 $rem_8bit=$rem_4bit;
/netgear-R7000-V1.0.7.12_1.2.5/ap/gpl/openssl/crypto/modes/
H A Dgcm128.c344 static const size_t rem_4bit[16] = { variable
375 Z.hi ^= rem_4bit[rem];
377 Z.hi ^= (u64)rem_4bit[rem] << 32;
393 Z.hi ^= rem_4bit[rem];
395 Z.hi ^= (u64)rem_4bit[rem] << 32;
460 Z.hi ^= rem_4bit[rem];
462 Z.hi ^= (u64)rem_4bit[rem] << 32;
479 Z.hi ^= rem_4bit[rem];
481 Z.hi ^= (u64)rem_4bit[rem] << 32;
/netgear-R7000-V1.0.7.12_1.2.5/ap/gpl/openssl-1.0.2h/crypto/modes/
H A Dgcm128.c344 static const size_t rem_4bit[16] = { variable
375 Z.hi ^= rem_4bit[rem];
377 Z.hi ^= (u64)rem_4bit[rem] << 32;
393 Z.hi ^= rem_4bit[rem];
395 Z.hi ^= (u64)rem_4bit[rem] << 32;
460 Z.hi ^= rem_4bit[rem];
462 Z.hi ^= (u64)rem_4bit[rem] << 32;
479 Z.hi ^= rem_4bit[rem];
481 Z.hi ^= (u64)rem_4bit[rem] << 32;

Completed in 101 milliseconds