Searched refs:v12 (Results 1 - 25 of 37) sorted by path

12

/linux-master/arch/arm64/crypto/
H A Daes-ce-ccm-core.S38 dround \va, \vb, v12
H A Daes-neon.S45 movi v12.16b, #0x1b
84 mul_by_x2 v8.16b, \in\().16b, v9.16b, v12.16b
90 mul_by_x v9.16b, \in\().16b, v8.16b, v12.16b
183 mul_by_x2_2x v8, v9, \in0, \in1, v10, v11, v12
192 mul_by_x_2x v8, v9, \in0, \in1, v10, v11, v12
H A Daes-neonbs-core.S392 movi v12.16b, #0x10
409 cmtst v4.16b, v7.16b, v12.16b
440 eor v12.16b, v2.16b, v9.16b
443 tbl v2.16b, {v12.16b}, v8.16b
462 sbox v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, \
469 mix_cols v0, v1, v4, v6, v3, v7, v2, v5, v8, v9, v10, v11, v12, \
483 eor v0.16b, v0.16b, v12.16b
484 eor v1.16b, v1.16b, v12.16b
485 eor v4.16b, v4.16b, v12.16b
486 eor v6.16b, v6.16b, v12
[all...]
H A Dchacha-neon-core.S37 * Clobbers: w3, x10, v4, v12
42 ld1 {v12.4s}, [x10]
59 tbl v3.16b, {v3.16b}, v12.16b
88 tbl v3.16b, {v3.16b}, v12.16b
218 ld4r {v12.4s-v15.4s}, [x8]
232 mov a12, v12.s[0]
238 add v12.4s, v12.4s, v30.4s
254 eor v12.16b, v12
[all...]
H A Dcrct10dif-ce-core.S225 CPU_LE( rev64 v12.16b, v12.16b )
231 CPU_LE( ext v12.16b, v12.16b, v12.16b, #8 )
236 eor \reg2\().16b, \reg2\().16b, v12.16b
H A Dghash-ce-core.S27 t5 .req v12
51 XM3 .req v12
376 K4 .req v12
H A Dnh-neon-core.S30 T4 .req v12
H A Dpolyval-ce-core.S50 KEY4 .req v12
H A Dsha1-ce-core.S29 dg0v .req v12
H A Dsha2-ce-core.S84 ld1 {v12.4s-v15.4s}, [x8]
119 add_update 1, v12, 19, 16, 17, 18
H A Dsha3-ce-core.S49 ld1 {v12.1d-v15.1d}, [x8], #32
78 eor v12.8b, v12.8b, v30.8b
113 eor3 v27.16b, v2.16b, v7.16b, v12.16b
134 xar v2.2d, v12.2d, v26.2d, (64 - 43)
135 xar v12.2d, v13.2d, v27.2d, (64 - 25)
166 bcax v10.16b, v29.16b, v12.16b, v26.16b
167 bcax v11.16b, v26.16b, v13.16b, v12.16b
168 bcax v12.16b, v12
[all...]
H A Dsha512-ce-core.S118 0: ld1 {v12.2d-v15.2d}, [x1], #64
122 CPU_LE( rev64 v12.16b, v12.16b )
H A Dsm3-ce-core.S62 round \ab, \s0, v11, v12, 0
63 round \ab, \s0, v12, v11, 1
64 round \ab, \s0, v11, v12, 2
65 round \ab, \s0, v12, v11, 3
H A Dsm4-ce-core.S237 rev32 v12.16b, v4.16b
242 SM4_CRYPT_BLK8_BE(v8, v9, v10, v11, v12, v13, v14, v15)
248 eor v12.16b, v12.16b, v3.16b
254 st1 {v12.16b-v15.16b}, [x1], #64
442 ld1 {v12.16b-v15.16b}, [x2], #64
450 eor v4.16b, v4.16b, v12.16b
560 tweak_next(v12, v11, RTMP3)
561 tweak_next(v13, v12, RTMP0)
571 eor v4.16b, v4.16b, v12
[all...]
H A Dsm4-ce-gcm-core.S251 #define RTMP4 v12
573 #define RTMP0 v12
H A Dsm4-neon-core.S21 #define RTMP4 v12
26 #define RX0 v12
/linux-master/arch/powerpc/crypto/
H A Daes-gcm-p10.S616 # load 2 more round keys (v11, v12)
623 # load 2 more round keys (v11, v12, v13, v14)
1157 # load 2 more round keys (v11, v12)
1164 # load 2 more round keys (v11, v12, v13, v14)
H A Dchacha-p10le-8x.S38 # Column round (v0, v4, v8, v12, v1, v5, v9, v13, v2, v6, v10, v14, v3, v7, v11, v15)
39 # Diagnal round (v0, v5, v10, v15, v1, v6, v11, v12, v2, v7, v8, v13, v3, v4, v9, v14)
195 # QR(v0, v4, v8, v12, v1, v5, v9, v13, v2, v6, v10, v14, v3, v7, v11, v15)
292 # QR(v0, v5, v10, v15, v1, v6, v11, v12, v2, v7, v8, v13, v3, v4, v9, v14)
395 # QR(v0, v4, v8, v12, v1, v5, v9, v13, v2, v6, v10, v14, v3, v7, v11, v15)
437 # QR(v0, v5, v10, v15, v1, v6, v11, v12, v2, v7, v8, v13, v3, v4, v9, v14)
H A Dcrc32-vpmsum_core.S223 VPMSUMD(v12,v20,const1)
281 vxor v4,v4,v12
282 VPMSUMD(v12,v20,const1)
329 vxor v4,v4,v12
330 VPMSUMD(v12,v20,const1)
351 vxor v4,v4,v12
382 lvx v12,off64,r4
384 VPERM(v12,v12,v12,byteswa
[all...]
/linux-master/arch/powerpc/include/asm/
H A Dppc_asm.h689 #define v12 12 macro
/linux-master/arch/powerpc/lib/
H A Dcopyuser_power7.S594 VPERM(v12,v4,v3,v16)
606 err4; stvx v12,r3,r12
H A Dmemcpy_power7.S541 VPERM(v12,v4,v3,v16)
553 stvx v12,r3,r12
/linux-master/arch/riscv/crypto/
H A Daes-macros.S81 vle32.v v12, (\keyp)
111 vaesem.vs \data, v12
115 vaesem.vs \data, v12
128 vaesdm.vs \data, v12
134 vaesdm.vs \data, v12
H A Daes-riscv64-zvkned-zvbb-zvkg.S100 // Save a copy of T bit-reversed in v12.
101 vbrev8.v v12, TWEAKS
133 vaesz.vs TWEAKS_BREV, v12
H A Dchacha-riscv64-zvkb.S196 vid.v v12
197 vadd.vx v12, v12, COUNTER
210 chacha_round v0, v4, v8, v12, v1, v5, v9, v13, \
213 chacha_round v0, v5, v10, v15, v1, v6, v11, v12, \
251 vadd.vx v12, v12, COUNTER
255 vadd.vv v12, v12, v0
263 vxor.vv v28, v28, v12
[all...]

Completed in 350 milliseconds

12