vpaes-armv8.S revision 1.1
1.text 2 3.type _vpaes_consts,%object 4.align 7 // totally strategic alignment 5_vpaes_consts: 6.Lk_mc_forward: // mc_forward 7.quad 0x0407060500030201, 0x0C0F0E0D080B0A09 8.quad 0x080B0A0904070605, 0x000302010C0F0E0D 9.quad 0x0C0F0E0D080B0A09, 0x0407060500030201 10.quad 0x000302010C0F0E0D, 0x080B0A0904070605 11.Lk_mc_backward: // mc_backward 12.quad 0x0605040702010003, 0x0E0D0C0F0A09080B 13.quad 0x020100030E0D0C0F, 0x0A09080B06050407 14.quad 0x0E0D0C0F0A09080B, 0x0605040702010003 15.quad 0x0A09080B06050407, 0x020100030E0D0C0F 16.Lk_sr: // sr 17.quad 0x0706050403020100, 0x0F0E0D0C0B0A0908 18.quad 0x030E09040F0A0500, 0x0B06010C07020D08 19.quad 0x0F060D040B020900, 0x070E050C030A0108 20.quad 0x0B0E0104070A0D00, 0x0306090C0F020508 21 22// 23// "Hot" constants 24// 25.Lk_inv: // inv, inva 26.quad 0x0E05060F0D080180, 0x040703090A0B0C02 27.quad 0x01040A060F0B0780, 0x030D0E0C02050809 28.Lk_ipt: // input transform (lo, hi) 29.quad 0xC2B2E8985A2A7000, 0xCABAE09052227808 30.quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81 31.Lk_sbo: // sbou, sbot 32.quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878 33.quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA 34.Lk_sb1: // sb1u, sb1t 35.quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF 36.quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544 37.Lk_sb2: // sb2u, sb2t 38.quad 0x69EB88400AE12900, 0xC2A163C8AB82234A 39.quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD 40 41// 42// Decryption stuff 43// 44.Lk_dipt: // decryption input transform 45.quad 0x0F505B040B545F00, 0x154A411E114E451A 46.quad 0x86E383E660056500, 0x12771772F491F194 47.Lk_dsbo: // decryption sbox final output 48.quad 0x1387EA537EF94000, 0xC7AA6DB9D4943E2D 49.quad 0x12D7560F93441D00, 0xCA4B8159D8C58E9C 50.Lk_dsb9: // decryption sbox output *9*u, *9*t 51.quad 0x851C03539A86D600, 0xCAD51F504F994CC9 52.quad 0xC03B1789ECD74900, 0x725E2C9EB2FBA565 53.Lk_dsbd: // decryption sbox output *D*u, *D*t 54.quad 0x7D57CCDFE6B1A200, 0xF56E9B13882A4439 55.quad 0x3CE2FAF724C6CB00, 0x2931180D15DEEFD3 56.Lk_dsbb: // decryption sbox output *B*u, *B*t 57.quad 0xD022649296B44200, 0x602646F6B0F2D404 58.quad 0xC19498A6CD596700, 0xF3FF0C3E3255AA6B 59.Lk_dsbe: // decryption sbox output *E*u, *E*t 60.quad 0x46F2929626D4D000, 0x2242600464B4F6B0 61.quad 0x0C55A6CDFFAAC100, 0x9467F36B98593E32 62 63// 64// Key schedule constants 65// 66.Lk_dksd: // decryption key schedule: invskew x*D 67.quad 0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9 68.quad 0x41C277F4B5368300, 0x5FDC69EAAB289D1E 69.Lk_dksb: // decryption key schedule: invskew x*B 70.quad 0x9A4FCA1F8550D500, 0x03D653861CC94C99 71.quad 0x115BEDA7B6FC4A00, 0xD993256F7E3482C8 72.Lk_dkse: // decryption key schedule: invskew x*E + 0x63 73.quad 0xD5031CCA1FC9D600, 0x53859A4C994F5086 74.quad 0xA23196054FDC7BE8, 0xCD5EF96A20B31487 75.Lk_dks9: // decryption key schedule: invskew x*9 76.quad 0xB6116FC87ED9A700, 0x4AED933482255BFC 77.quad 0x4576516227143300, 0x8BB89FACE9DAFDCE 78 79.Lk_rcon: // rcon 80.quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81 81 82.Lk_opt: // output transform 83.quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808 84.quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0 85.Lk_deskew: // deskew tables: inverts the sbox's "skew" 86.quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A 87.quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77 88 89.byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,105,111,110,32,65,69,83,32,102,111,114,32,65,82,77,118,56,44,32,77,105,107,101,32,72,97,109,98,117,114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105,118,101,114,115,105,116,121,41,0 90.align 2 91.size _vpaes_consts,.-_vpaes_consts 92.align 6 93## 94## _aes_preheat 95## 96## Fills register %r10 -> .aes_consts (so you can -fPIC) 97## and %xmm9-%xmm15 as specified below. 98## 99.type _vpaes_encrypt_preheat,%function 100.align 4 101_vpaes_encrypt_preheat: 102 adr x10, .Lk_inv 103 movi v17.16b, #0x0f 104 ld1 {v18.2d,v19.2d}, [x10],#32 // .Lk_inv 105 ld1 {v20.2d,v21.2d,v22.2d,v23.2d}, [x10],#64 // .Lk_ipt, .Lk_sbo 106 ld1 {v24.2d,v25.2d,v26.2d,v27.2d}, [x10] // .Lk_sb1, .Lk_sb2 107 ret 108.size _vpaes_encrypt_preheat,.-_vpaes_encrypt_preheat 109 110## 111## _aes_encrypt_core 112## 113## AES-encrypt %xmm0. 114## 115## Inputs: 116## %xmm0 = input 117## %xmm9-%xmm15 as in _vpaes_preheat 118## (%rdx) = scheduled keys 119## 120## Output in %xmm0 121## Clobbers %xmm1-%xmm5, %r9, %r10, %r11, %rax 122## Preserves %xmm6 - %xmm8 so you get some local vectors 123## 124## 125.type _vpaes_encrypt_core,%function 126.align 4 127_vpaes_encrypt_core: 128 mov x9, x2 129 ldr w8, [x2,#240] // pull rounds 130 adr x11, .Lk_mc_forward+16 131 // vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo 132 ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key 133 and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 134 ushr v0.16b, v7.16b, #4 // vpsrlb $4, %xmm0, %xmm0 135 tbl v1.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1 136 // vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi 137 tbl v2.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2 138 eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0 139 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 140 b .Lenc_entry 141 142.align 4 143.Lenc_loop: 144 // middle of middle round 145 add x10, x11, #0x40 146 tbl v4.16b, {v25.16b}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u 147 ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # .Lk_mc_forward[] 148 tbl v0.16b, {v24.16b}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t 149 eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k 150 tbl v5.16b, {v27.16b}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u 151 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A 152 tbl v2.16b, {v26.16b}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t 153 ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[] 154 tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B 155 eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A 156 tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D 157 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B 158 tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C 159 eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D 160 and x11, x11, #~(1<<6) // and $0x30, %r11 # ... mod 4 161 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D 162 sub w8, w8, #1 // nr-- 163 164.Lenc_entry: 165 // top of round 166 and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k 167 ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i 168 tbl v5.16b, {v19.16b}, v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k 169 eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j 170 tbl v3.16b, {v18.16b}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i 171 tbl v4.16b, {v18.16b}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j 172 eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k 173 eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k 174 tbl v2.16b, {v18.16b}, v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak 175 tbl v3.16b, {v18.16b}, v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak 176 eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io 177 eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo 178 ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5 179 cbnz w8, .Lenc_loop 180 181 // middle of last round 182 add x10, x11, #0x80 183 // vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo 184 // vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16 185 tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou 186 ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # .Lk_sr[] 187 tbl v0.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t 188 eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k 189 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A 190 tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0 191 ret 192.size _vpaes_encrypt_core,.-_vpaes_encrypt_core 193 194.globl vpaes_encrypt 195.type vpaes_encrypt,%function 196.align 4 197vpaes_encrypt: 198 stp x29,x30,[sp,#-16]! 199 add x29,sp,#0 200 201 ld1 {v7.16b}, [x0] 202 bl _vpaes_encrypt_preheat 203 bl _vpaes_encrypt_core 204 st1 {v0.16b}, [x1] 205 206 ldp x29,x30,[sp],#16 207 ret 208.size vpaes_encrypt,.-vpaes_encrypt 209 210.type _vpaes_encrypt_2x,%function 211.align 4 212_vpaes_encrypt_2x: 213 mov x9, x2 214 ldr w8, [x2,#240] // pull rounds 215 adr x11, .Lk_mc_forward+16 216 // vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo 217 ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key 218 and v1.16b, v14.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 219 ushr v0.16b, v14.16b, #4 // vpsrlb $4, %xmm0, %xmm0 220 and v9.16b, v15.16b, v17.16b 221 ushr v8.16b, v15.16b, #4 222 tbl v1.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1 223 tbl v9.16b, {v20.16b}, v9.16b 224 // vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi 225 tbl v2.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2 226 tbl v10.16b, {v21.16b}, v8.16b 227 eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0 228 eor v8.16b, v9.16b, v16.16b 229 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 230 eor v8.16b, v8.16b, v10.16b 231 b .Lenc_2x_entry 232 233.align 4 234.Lenc_2x_loop: 235 // middle of middle round 236 add x10, x11, #0x40 237 tbl v4.16b, {v25.16b}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u 238 tbl v12.16b, {v25.16b}, v10.16b 239 ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # .Lk_mc_forward[] 240 tbl v0.16b, {v24.16b}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t 241 tbl v8.16b, {v24.16b}, v11.16b 242 eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k 243 eor v12.16b, v12.16b, v16.16b 244 tbl v5.16b, {v27.16b}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u 245 tbl v13.16b, {v27.16b}, v10.16b 246 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A 247 eor v8.16b, v8.16b, v12.16b 248 tbl v2.16b, {v26.16b}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t 249 tbl v10.16b, {v26.16b}, v11.16b 250 ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[] 251 tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B 252 tbl v11.16b, {v8.16b}, v1.16b 253 eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A 254 eor v10.16b, v10.16b, v13.16b 255 tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D 256 tbl v8.16b, {v8.16b}, v4.16b 257 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B 258 eor v11.16b, v11.16b, v10.16b 259 tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C 260 tbl v12.16b, {v11.16b},v1.16b 261 eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D 262 eor v8.16b, v8.16b, v11.16b 263 and x11, x11, #~(1<<6) // and $0x30, %r11 # ... mod 4 264 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D 265 eor v8.16b, v8.16b, v12.16b 266 sub w8, w8, #1 // nr-- 267 268.Lenc_2x_entry: 269 // top of round 270 and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k 271 ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i 272 and v9.16b, v8.16b, v17.16b 273 ushr v8.16b, v8.16b, #4 274 tbl v5.16b, {v19.16b},v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k 275 tbl v13.16b, {v19.16b},v9.16b 276 eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j 277 eor v9.16b, v9.16b, v8.16b 278 tbl v3.16b, {v18.16b},v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i 279 tbl v11.16b, {v18.16b},v8.16b 280 tbl v4.16b, {v18.16b},v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j 281 tbl v12.16b, {v18.16b},v9.16b 282 eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k 283 eor v11.16b, v11.16b, v13.16b 284 eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k 285 eor v12.16b, v12.16b, v13.16b 286 tbl v2.16b, {v18.16b},v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak 287 tbl v10.16b, {v18.16b},v11.16b 288 tbl v3.16b, {v18.16b},v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak 289 tbl v11.16b, {v18.16b},v12.16b 290 eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io 291 eor v10.16b, v10.16b, v9.16b 292 eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo 293 eor v11.16b, v11.16b, v8.16b 294 ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5 295 cbnz w8, .Lenc_2x_loop 296 297 // middle of last round 298 add x10, x11, #0x80 299 // vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo 300 // vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16 301 tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou 302 tbl v12.16b, {v22.16b}, v10.16b 303 ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # .Lk_sr[] 304 tbl v0.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t 305 tbl v8.16b, {v23.16b}, v11.16b 306 eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k 307 eor v12.16b, v12.16b, v16.16b 308 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A 309 eor v8.16b, v8.16b, v12.16b 310 tbl v0.16b, {v0.16b},v1.16b // vpshufb %xmm1, %xmm0, %xmm0 311 tbl v1.16b, {v8.16b},v1.16b 312 ret 313.size _vpaes_encrypt_2x,.-_vpaes_encrypt_2x 314 315.type _vpaes_decrypt_preheat,%function 316.align 4 317_vpaes_decrypt_preheat: 318 adr x10, .Lk_inv 319 movi v17.16b, #0x0f 320 adr x11, .Lk_dipt 321 ld1 {v18.2d,v19.2d}, [x10],#32 // .Lk_inv 322 ld1 {v20.2d,v21.2d,v22.2d,v23.2d}, [x11],#64 // .Lk_dipt, .Lk_dsbo 323 ld1 {v24.2d,v25.2d,v26.2d,v27.2d}, [x11],#64 // .Lk_dsb9, .Lk_dsbd 324 ld1 {v28.2d,v29.2d,v30.2d,v31.2d}, [x11] // .Lk_dsbb, .Lk_dsbe 325 ret 326.size _vpaes_decrypt_preheat,.-_vpaes_decrypt_preheat 327 328## 329## Decryption core 330## 331## Same API as encryption core. 332## 333.type _vpaes_decrypt_core,%function 334.align 4 335_vpaes_decrypt_core: 336 mov x9, x2 337 ldr w8, [x2,#240] // pull rounds 338 339 // vmovdqa .Lk_dipt(%rip), %xmm2 # iptlo 340 lsl x11, x8, #4 // mov %rax, %r11; shl $4, %r11 341 eor x11, x11, #0x30 // xor $0x30, %r11 342 adr x10, .Lk_sr 343 and x11, x11, #0x30 // and $0x30, %r11 344 add x11, x11, x10 345 adr x10, .Lk_mc_forward+48 346 347 ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm4 # round0 key 348 and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 349 ushr v0.16b, v7.16b, #4 // vpsrlb $4, %xmm0, %xmm0 350 tbl v2.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm2 351 ld1 {v5.2d}, [x10] // vmovdqa .Lk_mc_forward+48(%rip), %xmm5 352 // vmovdqa .Lk_dipt+16(%rip), %xmm1 # ipthi 353 tbl v0.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm1, %xmm0 354 eor v2.16b, v2.16b, v16.16b // vpxor %xmm4, %xmm2, %xmm2 355 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 356 b .Ldec_entry 357 358.align 4 359.Ldec_loop: 360// 361// Inverse mix columns 362// 363 // vmovdqa -0x20(%r10),%xmm4 # 4 : sb9u 364 // vmovdqa -0x10(%r10),%xmm1 # 0 : sb9t 365 tbl v4.16b, {v24.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sb9u 366 tbl v1.16b, {v25.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb9t 367 eor v0.16b, v4.16b, v16.16b // vpxor %xmm4, %xmm0, %xmm0 368 // vmovdqa 0x00(%r10),%xmm4 # 4 : sbdu 369 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch 370 // vmovdqa 0x10(%r10),%xmm1 # 0 : sbdt 371 372 tbl v4.16b, {v26.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbdu 373 tbl v0.16b, {v0.16b}, v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch 374 tbl v1.16b, {v27.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbdt 375 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch 376 // vmovdqa 0x20(%r10), %xmm4 # 4 : sbbu 377 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch 378 // vmovdqa 0x30(%r10), %xmm1 # 0 : sbbt 379 380 tbl v4.16b, {v28.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbbu 381 tbl v0.16b, {v0.16b}, v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch 382 tbl v1.16b, {v29.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbbt 383 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch 384 // vmovdqa 0x40(%r10), %xmm4 # 4 : sbeu 385 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch 386 // vmovdqa 0x50(%r10), %xmm1 # 0 : sbet 387 388 tbl v4.16b, {v30.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbeu 389 tbl v0.16b, {v0.16b}, v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch 390 tbl v1.16b, {v31.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbet 391 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch 392 ext v5.16b, v5.16b, v5.16b, #12 // vpalignr $12, %xmm5, %xmm5, %xmm5 393 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch 394 sub w8, w8, #1 // sub $1,%rax # nr-- 395 396.Ldec_entry: 397 // top of round 398 and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k 399 ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i 400 tbl v2.16b, {v19.16b}, v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k 401 eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j 402 tbl v3.16b, {v18.16b}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i 403 tbl v4.16b, {v18.16b}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j 404 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k 405 eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k 406 tbl v2.16b, {v18.16b}, v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak 407 tbl v3.16b, {v18.16b}, v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak 408 eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io 409 eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo 410 ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm0 411 cbnz w8, .Ldec_loop 412 413 // middle of last round 414 // vmovdqa 0x60(%r10), %xmm4 # 3 : sbou 415 tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou 416 // vmovdqa 0x70(%r10), %xmm1 # 0 : sbot 417 ld1 {v2.2d}, [x11] // vmovdqa -0x160(%r11), %xmm2 # .Lk_sr-.Lk_dsbd=-0x160 418 tbl v1.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb1t 419 eor v4.16b, v4.16b, v16.16b // vpxor %xmm0, %xmm4, %xmm4 # 4 = sb1u + k 420 eor v0.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm0 # 0 = A 421 tbl v0.16b, {v0.16b}, v2.16b // vpshufb %xmm2, %xmm0, %xmm0 422 ret 423.size _vpaes_decrypt_core,.-_vpaes_decrypt_core 424 425.globl vpaes_decrypt 426.type vpaes_decrypt,%function 427.align 4 428vpaes_decrypt: 429 stp x29,x30,[sp,#-16]! 430 add x29,sp,#0 431 432 ld1 {v7.16b}, [x0] 433 bl _vpaes_decrypt_preheat 434 bl _vpaes_decrypt_core 435 st1 {v0.16b}, [x1] 436 437 ldp x29,x30,[sp],#16 438 ret 439.size vpaes_decrypt,.-vpaes_decrypt 440 441// v14-v15 input, v0-v1 output 442.type _vpaes_decrypt_2x,%function 443.align 4 444_vpaes_decrypt_2x: 445 mov x9, x2 446 ldr w8, [x2,#240] // pull rounds 447 448 // vmovdqa .Lk_dipt(%rip), %xmm2 # iptlo 449 lsl x11, x8, #4 // mov %rax, %r11; shl $4, %r11 450 eor x11, x11, #0x30 // xor $0x30, %r11 451 adr x10, .Lk_sr 452 and x11, x11, #0x30 // and $0x30, %r11 453 add x11, x11, x10 454 adr x10, .Lk_mc_forward+48 455 456 ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm4 # round0 key 457 and v1.16b, v14.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 458 ushr v0.16b, v14.16b, #4 // vpsrlb $4, %xmm0, %xmm0 459 and v9.16b, v15.16b, v17.16b 460 ushr v8.16b, v15.16b, #4 461 tbl v2.16b, {v20.16b},v1.16b // vpshufb %xmm1, %xmm2, %xmm2 462 tbl v10.16b, {v20.16b},v9.16b 463 ld1 {v5.2d}, [x10] // vmovdqa .Lk_mc_forward+48(%rip), %xmm5 464 // vmovdqa .Lk_dipt+16(%rip), %xmm1 # ipthi 465 tbl v0.16b, {v21.16b},v0.16b // vpshufb %xmm0, %xmm1, %xmm0 466 tbl v8.16b, {v21.16b},v8.16b 467 eor v2.16b, v2.16b, v16.16b // vpxor %xmm4, %xmm2, %xmm2 468 eor v10.16b, v10.16b, v16.16b 469 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 470 eor v8.16b, v8.16b, v10.16b 471 b .Ldec_2x_entry 472 473.align 4 474.Ldec_2x_loop: 475// 476// Inverse mix columns 477// 478 // vmovdqa -0x20(%r10),%xmm4 # 4 : sb9u 479 // vmovdqa -0x10(%r10),%xmm1 # 0 : sb9t 480 tbl v4.16b, {v24.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sb9u 481 tbl v12.16b, {v24.16b}, v10.16b 482 tbl v1.16b, {v25.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb9t 483 tbl v9.16b, {v25.16b}, v11.16b 484 eor v0.16b, v4.16b, v16.16b // vpxor %xmm4, %xmm0, %xmm0 485 eor v8.16b, v12.16b, v16.16b 486 // vmovdqa 0x00(%r10),%xmm4 # 4 : sbdu 487 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch 488 eor v8.16b, v8.16b, v9.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch 489 // vmovdqa 0x10(%r10),%xmm1 # 0 : sbdt 490 491 tbl v4.16b, {v26.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbdu 492 tbl v12.16b, {v26.16b}, v10.16b 493 tbl v0.16b, {v0.16b},v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch 494 tbl v8.16b, {v8.16b},v5.16b 495 tbl v1.16b, {v27.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbdt 496 tbl v9.16b, {v27.16b}, v11.16b 497 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch 498 eor v8.16b, v8.16b, v12.16b 499 // vmovdqa 0x20(%r10), %xmm4 # 4 : sbbu 500 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch 501 eor v8.16b, v8.16b, v9.16b 502 // vmovdqa 0x30(%r10), %xmm1 # 0 : sbbt 503 504 tbl v4.16b, {v28.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbbu 505 tbl v12.16b, {v28.16b}, v10.16b 506 tbl v0.16b, {v0.16b},v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch 507 tbl v8.16b, {v8.16b},v5.16b 508 tbl v1.16b, {v29.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbbt 509 tbl v9.16b, {v29.16b}, v11.16b 510 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch 511 eor v8.16b, v8.16b, v12.16b 512 // vmovdqa 0x40(%r10), %xmm4 # 4 : sbeu 513 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch 514 eor v8.16b, v8.16b, v9.16b 515 // vmovdqa 0x50(%r10), %xmm1 # 0 : sbet 516 517 tbl v4.16b, {v30.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbeu 518 tbl v12.16b, {v30.16b}, v10.16b 519 tbl v0.16b, {v0.16b},v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch 520 tbl v8.16b, {v8.16b},v5.16b 521 tbl v1.16b, {v31.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbet 522 tbl v9.16b, {v31.16b}, v11.16b 523 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch 524 eor v8.16b, v8.16b, v12.16b 525 ext v5.16b, v5.16b, v5.16b, #12 // vpalignr $12, %xmm5, %xmm5, %xmm5 526 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch 527 eor v8.16b, v8.16b, v9.16b 528 sub w8, w8, #1 // sub $1,%rax # nr-- 529 530.Ldec_2x_entry: 531 // top of round 532 and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k 533 ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i 534 and v9.16b, v8.16b, v17.16b 535 ushr v8.16b, v8.16b, #4 536 tbl v2.16b, {v19.16b},v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k 537 tbl v10.16b, {v19.16b},v9.16b 538 eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j 539 eor v9.16b, v9.16b, v8.16b 540 tbl v3.16b, {v18.16b},v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i 541 tbl v11.16b, {v18.16b},v8.16b 542 tbl v4.16b, {v18.16b},v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j 543 tbl v12.16b, {v18.16b},v9.16b 544 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k 545 eor v11.16b, v11.16b, v10.16b 546 eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k 547 eor v12.16b, v12.16b, v10.16b 548 tbl v2.16b, {v18.16b},v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak 549 tbl v10.16b, {v18.16b},v11.16b 550 tbl v3.16b, {v18.16b},v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak 551 tbl v11.16b, {v18.16b},v12.16b 552 eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io 553 eor v10.16b, v10.16b, v9.16b 554 eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo 555 eor v11.16b, v11.16b, v8.16b 556 ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm0 557 cbnz w8, .Ldec_2x_loop 558 559 // middle of last round 560 // vmovdqa 0x60(%r10), %xmm4 # 3 : sbou 561 tbl v4.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou 562 tbl v12.16b, {v22.16b}, v10.16b 563 // vmovdqa 0x70(%r10), %xmm1 # 0 : sbot 564 tbl v1.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb1t 565 tbl v9.16b, {v23.16b}, v11.16b 566 ld1 {v2.2d}, [x11] // vmovdqa -0x160(%r11), %xmm2 # .Lk_sr-.Lk_dsbd=-0x160 567 eor v4.16b, v4.16b, v16.16b // vpxor %xmm0, %xmm4, %xmm4 # 4 = sb1u + k 568 eor v12.16b, v12.16b, v16.16b 569 eor v0.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm0 # 0 = A 570 eor v8.16b, v9.16b, v12.16b 571 tbl v0.16b, {v0.16b},v2.16b // vpshufb %xmm2, %xmm0, %xmm0 572 tbl v1.16b, {v8.16b},v2.16b 573 ret 574.size _vpaes_decrypt_2x,.-_vpaes_decrypt_2x 575######################################################## 576## ## 577## AES key schedule ## 578## ## 579######################################################## 580.type _vpaes_key_preheat,%function 581.align 4 582_vpaes_key_preheat: 583 adr x10, .Lk_inv 584 movi v16.16b, #0x5b // .Lk_s63 585 adr x11, .Lk_sb1 586 movi v17.16b, #0x0f // .Lk_s0F 587 ld1 {v18.2d,v19.2d,v20.2d,v21.2d}, [x10] // .Lk_inv, .Lk_ipt 588 adr x10, .Lk_dksd 589 ld1 {v22.2d,v23.2d}, [x11] // .Lk_sb1 590 adr x11, .Lk_mc_forward 591 ld1 {v24.2d,v25.2d,v26.2d,v27.2d}, [x10],#64 // .Lk_dksd, .Lk_dksb 592 ld1 {v28.2d,v29.2d,v30.2d,v31.2d}, [x10],#64 // .Lk_dkse, .Lk_dks9 593 ld1 {v8.2d}, [x10] // .Lk_rcon 594 ld1 {v9.2d}, [x11] // .Lk_mc_forward[0] 595 ret 596.size _vpaes_key_preheat,.-_vpaes_key_preheat 597 598.type _vpaes_schedule_core,%function 599.align 4 600_vpaes_schedule_core: 601 stp x29, x30, [sp,#-16]! 602 add x29,sp,#0 603 604 bl _vpaes_key_preheat // load the tables 605 606 ld1 {v0.16b}, [x0],#16 // vmovdqu (%rdi), %xmm0 # load key (unaligned) 607 608 // input transform 609 mov v3.16b, v0.16b // vmovdqa %xmm0, %xmm3 610 bl _vpaes_schedule_transform 611 mov v7.16b, v0.16b // vmovdqa %xmm0, %xmm7 612 613 adr x10, .Lk_sr // lea .Lk_sr(%rip),%r10 614 add x8, x8, x10 615 cbnz w3, .Lschedule_am_decrypting 616 617 // encrypting, output zeroth round key after transform 618 st1 {v0.2d}, [x2] // vmovdqu %xmm0, (%rdx) 619 b .Lschedule_go 620 621.Lschedule_am_decrypting: 622 // decrypting, output zeroth round key after shiftrows 623 ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1 624 tbl v3.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3 625 st1 {v3.2d}, [x2] // vmovdqu %xmm3, (%rdx) 626 eor x8, x8, #0x30 // xor $0x30, %r8 627 628.Lschedule_go: 629 cmp w1, #192 // cmp $192, %esi 630 b.hi .Lschedule_256 631 b.eq .Lschedule_192 632 // 128: fall though 633 634## 635## .schedule_128 636## 637## 128-bit specific part of key schedule. 638## 639## This schedule is really simple, because all its parts 640## are accomplished by the subroutines. 641## 642.Lschedule_128: 643 mov x0, #10 // mov $10, %esi 644 645.Loop_schedule_128: 646 sub x0, x0, #1 // dec %esi 647 bl _vpaes_schedule_round 648 cbz x0, .Lschedule_mangle_last 649 bl _vpaes_schedule_mangle // write output 650 b .Loop_schedule_128 651 652## 653## .aes_schedule_192 654## 655## 192-bit specific part of key schedule. 656## 657## The main body of this schedule is the same as the 128-bit 658## schedule, but with more smearing. The long, high side is 659## stored in %xmm7 as before, and the short, low side is in 660## the high bits of %xmm6. 661## 662## This schedule is somewhat nastier, however, because each 663## round produces 192 bits of key material, or 1.5 round keys. 664## Therefore, on each cycle we do 2 rounds and produce 3 round 665## keys. 666## 667.align 4 668.Lschedule_192: 669 sub x0, x0, #8 670 ld1 {v0.16b}, [x0] // vmovdqu 8(%rdi),%xmm0 # load key part 2 (very unaligned) 671 bl _vpaes_schedule_transform // input transform 672 mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save short part 673 eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4 # clear 4 674 ins v6.d[0], v4.d[0] // vmovhlps %xmm4, %xmm6, %xmm6 # clobber low side with zeros 675 mov x0, #4 // mov $4, %esi 676 677.Loop_schedule_192: 678 sub x0, x0, #1 // dec %esi 679 bl _vpaes_schedule_round 680 ext v0.16b, v6.16b, v0.16b, #8 // vpalignr $8,%xmm6,%xmm0,%xmm0 681 bl _vpaes_schedule_mangle // save key n 682 bl _vpaes_schedule_192_smear 683 bl _vpaes_schedule_mangle // save key n+1 684 bl _vpaes_schedule_round 685 cbz x0, .Lschedule_mangle_last 686 bl _vpaes_schedule_mangle // save key n+2 687 bl _vpaes_schedule_192_smear 688 b .Loop_schedule_192 689 690## 691## .aes_schedule_256 692## 693## 256-bit specific part of key schedule. 694## 695## The structure here is very similar to the 128-bit 696## schedule, but with an additional "low side" in 697## %xmm6. The low side's rounds are the same as the 698## high side's, except no rcon and no rotation. 699## 700.align 4 701.Lschedule_256: 702 ld1 {v0.16b}, [x0] // vmovdqu 16(%rdi),%xmm0 # load key part 2 (unaligned) 703 bl _vpaes_schedule_transform // input transform 704 mov x0, #7 // mov $7, %esi 705 706.Loop_schedule_256: 707 sub x0, x0, #1 // dec %esi 708 bl _vpaes_schedule_mangle // output low result 709 mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save cur_lo in xmm6 710 711 // high round 712 bl _vpaes_schedule_round 713 cbz x0, .Lschedule_mangle_last 714 bl _vpaes_schedule_mangle 715 716 // low round. swap xmm7 and xmm6 717 dup v0.4s, v0.s[3] // vpshufd $0xFF, %xmm0, %xmm0 718 movi v4.16b, #0 719 mov v5.16b, v7.16b // vmovdqa %xmm7, %xmm5 720 mov v7.16b, v6.16b // vmovdqa %xmm6, %xmm7 721 bl _vpaes_schedule_low_round 722 mov v7.16b, v5.16b // vmovdqa %xmm5, %xmm7 723 724 b .Loop_schedule_256 725 726## 727## .aes_schedule_mangle_last 728## 729## Mangler for last round of key schedule 730## Mangles %xmm0 731## when encrypting, outputs out(%xmm0) ^ 63 732## when decrypting, outputs unskew(%xmm0) 733## 734## Always called right before return... jumps to cleanup and exits 735## 736.align 4 737.Lschedule_mangle_last: 738 // schedule last round key from xmm0 739 adr x11, .Lk_deskew // lea .Lk_deskew(%rip),%r11 # prepare to deskew 740 cbnz w3, .Lschedule_mangle_last_dec 741 742 // encrypting 743 ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10),%xmm1 744 adr x11, .Lk_opt // lea .Lk_opt(%rip), %r11 # prepare to output transform 745 add x2, x2, #32 // add $32, %rdx 746 tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0 # output permute 747 748.Lschedule_mangle_last_dec: 749 ld1 {v20.2d,v21.2d}, [x11] // reload constants 750 sub x2, x2, #16 // add $-16, %rdx 751 eor v0.16b, v0.16b, v16.16b // vpxor .Lk_s63(%rip), %xmm0, %xmm0 752 bl _vpaes_schedule_transform // output transform 753 st1 {v0.2d}, [x2] // vmovdqu %xmm0, (%rdx) # save last key 754 755 // cleanup 756 eor v0.16b, v0.16b, v0.16b // vpxor %xmm0, %xmm0, %xmm0 757 eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1 758 eor v2.16b, v2.16b, v2.16b // vpxor %xmm2, %xmm2, %xmm2 759 eor v3.16b, v3.16b, v3.16b // vpxor %xmm3, %xmm3, %xmm3 760 eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4 761 eor v5.16b, v5.16b, v5.16b // vpxor %xmm5, %xmm5, %xmm5 762 eor v6.16b, v6.16b, v6.16b // vpxor %xmm6, %xmm6, %xmm6 763 eor v7.16b, v7.16b, v7.16b // vpxor %xmm7, %xmm7, %xmm7 764 ldp x29, x30, [sp],#16 765 ret 766.size _vpaes_schedule_core,.-_vpaes_schedule_core 767 768## 769## .aes_schedule_192_smear 770## 771## Smear the short, low side in the 192-bit key schedule. 772## 773## Inputs: 774## %xmm7: high side, b a x y 775## %xmm6: low side, d c 0 0 776## %xmm13: 0 777## 778## Outputs: 779## %xmm6: b+c+d b+c 0 0 780## %xmm0: b+c+d b+c b a 781## 782.type _vpaes_schedule_192_smear,%function 783.align 4 784_vpaes_schedule_192_smear: 785 movi v1.16b, #0 786 dup v0.4s, v7.s[3] 787 ins v1.s[3], v6.s[2] // vpshufd $0x80, %xmm6, %xmm1 # d c 0 0 -> c 0 0 0 788 ins v0.s[0], v7.s[2] // vpshufd $0xFE, %xmm7, %xmm0 # b a _ _ -> b b b a 789 eor v6.16b, v6.16b, v1.16b // vpxor %xmm1, %xmm6, %xmm6 # -> c+d c 0 0 790 eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1 791 eor v6.16b, v6.16b, v0.16b // vpxor %xmm0, %xmm6, %xmm6 # -> b+c+d b+c b a 792 mov v0.16b, v6.16b // vmovdqa %xmm6, %xmm0 793 ins v6.d[0], v1.d[0] // vmovhlps %xmm1, %xmm6, %xmm6 # clobber low side with zeros 794 ret 795.size _vpaes_schedule_192_smear,.-_vpaes_schedule_192_smear 796 797## 798## .aes_schedule_round 799## 800## Runs one main round of the key schedule on %xmm0, %xmm7 801## 802## Specifically, runs subbytes on the high dword of %xmm0 803## then rotates it by one byte and xors into the low dword of 804## %xmm7. 805## 806## Adds rcon from low byte of %xmm8, then rotates %xmm8 for 807## next rcon. 808## 809## Smears the dwords of %xmm7 by xoring the low into the 810## second low, result into third, result into highest. 811## 812## Returns results in %xmm7 = %xmm0. 813## Clobbers %xmm1-%xmm4, %r11. 814## 815.type _vpaes_schedule_round,%function 816.align 4 817_vpaes_schedule_round: 818 // extract rcon from xmm8 819 movi v4.16b, #0 // vpxor %xmm4, %xmm4, %xmm4 820 ext v1.16b, v8.16b, v4.16b, #15 // vpalignr $15, %xmm8, %xmm4, %xmm1 821 ext v8.16b, v8.16b, v8.16b, #15 // vpalignr $15, %xmm8, %xmm8, %xmm8 822 eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7 823 824 // rotate 825 dup v0.4s, v0.s[3] // vpshufd $0xFF, %xmm0, %xmm0 826 ext v0.16b, v0.16b, v0.16b, #1 // vpalignr $1, %xmm0, %xmm0, %xmm0 827 828 // fall through... 829 830 // low round: same as high round, but no rotation and no rcon. 831_vpaes_schedule_low_round: 832 // smear xmm7 833 ext v1.16b, v4.16b, v7.16b, #12 // vpslldq $4, %xmm7, %xmm1 834 eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7 835 ext v4.16b, v4.16b, v7.16b, #8 // vpslldq $8, %xmm7, %xmm4 836 837 // subbytes 838 and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k 839 ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 # 1 = i 840 eor v7.16b, v7.16b, v4.16b // vpxor %xmm4, %xmm7, %xmm7 841 tbl v2.16b, {v19.16b}, v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k 842 eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j 843 tbl v3.16b, {v18.16b}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i 844 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k 845 tbl v4.16b, {v18.16b}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j 846 eor v7.16b, v7.16b, v16.16b // vpxor .Lk_s63(%rip), %xmm7, %xmm7 847 tbl v3.16b, {v18.16b}, v3.16b // vpshufb %xmm3, %xmm10, %xmm3 # 2 = 1/iak 848 eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k 849 tbl v2.16b, {v18.16b}, v4.16b // vpshufb %xmm4, %xmm10, %xmm2 # 3 = 1/jak 850 eor v3.16b, v3.16b, v1.16b // vpxor %xmm1, %xmm3, %xmm3 # 2 = io 851 eor v2.16b, v2.16b, v0.16b // vpxor %xmm0, %xmm2, %xmm2 # 3 = jo 852 tbl v4.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm13, %xmm4 # 4 = sbou 853 tbl v1.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm12, %xmm1 # 0 = sb1t 854 eor v1.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm1 # 0 = sbox output 855 856 // add in smeared stuff 857 eor v0.16b, v1.16b, v7.16b // vpxor %xmm7, %xmm1, %xmm0 858 eor v7.16b, v1.16b, v7.16b // vmovdqa %xmm0, %xmm7 859 ret 860.size _vpaes_schedule_round,.-_vpaes_schedule_round 861 862## 863## .aes_schedule_transform 864## 865## Linear-transform %xmm0 according to tables at (%r11) 866## 867## Requires that %xmm9 = 0x0F0F... as in preheat 868## Output in %xmm0 869## Clobbers %xmm1, %xmm2 870## 871.type _vpaes_schedule_transform,%function 872.align 4 873_vpaes_schedule_transform: 874 and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 875 ushr v0.16b, v0.16b, #4 // vpsrlb $4, %xmm0, %xmm0 876 // vmovdqa (%r11), %xmm2 # lo 877 tbl v2.16b, {v20.16b}, v1.16b // vpshufb %xmm1, %xmm2, %xmm2 878 // vmovdqa 16(%r11), %xmm1 # hi 879 tbl v0.16b, {v21.16b}, v0.16b // vpshufb %xmm0, %xmm1, %xmm0 880 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0 881 ret 882.size _vpaes_schedule_transform,.-_vpaes_schedule_transform 883 884## 885## .aes_schedule_mangle 886## 887## Mangle xmm0 from (basis-transformed) standard version 888## to our version. 889## 890## On encrypt, 891## xor with 0x63 892## multiply by circulant 0,1,1,1 893## apply shiftrows transform 894## 895## On decrypt, 896## xor with 0x63 897## multiply by "inverse mixcolumns" circulant E,B,D,9 898## deskew 899## apply shiftrows transform 900## 901## 902## Writes out to (%rdx), and increments or decrements it 903## Keeps track of round number mod 4 in %r8 904## Preserves xmm0 905## Clobbers xmm1-xmm5 906## 907.type _vpaes_schedule_mangle,%function 908.align 4 909_vpaes_schedule_mangle: 910 mov v4.16b, v0.16b // vmovdqa %xmm0, %xmm4 # save xmm0 for later 911 // vmovdqa .Lk_mc_forward(%rip),%xmm5 912 cbnz w3, .Lschedule_mangle_dec 913 914 // encrypting 915 eor v4.16b, v0.16b, v16.16b // vpxor .Lk_s63(%rip), %xmm0, %xmm4 916 add x2, x2, #16 // add $16, %rdx 917 tbl v4.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm4 918 tbl v1.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm1 919 tbl v3.16b, {v1.16b}, v9.16b // vpshufb %xmm5, %xmm1, %xmm3 920 eor v4.16b, v4.16b, v1.16b // vpxor %xmm1, %xmm4, %xmm4 921 ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1 922 eor v3.16b, v3.16b, v4.16b // vpxor %xmm4, %xmm3, %xmm3 923 924 b .Lschedule_mangle_both 925.align 4 926.Lschedule_mangle_dec: 927 // inverse mix columns 928 // lea .Lk_dksd(%rip),%r11 929 ushr v1.16b, v4.16b, #4 // vpsrlb $4, %xmm4, %xmm1 # 1 = hi 930 and v4.16b, v4.16b, v17.16b // vpand %xmm9, %xmm4, %xmm4 # 4 = lo 931 932 // vmovdqa 0x00(%r11), %xmm2 933 tbl v2.16b, {v24.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2 934 // vmovdqa 0x10(%r11), %xmm3 935 tbl v3.16b, {v25.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3 936 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 937 tbl v3.16b, {v3.16b}, v9.16b // vpshufb %xmm5, %xmm3, %xmm3 938 939 // vmovdqa 0x20(%r11), %xmm2 940 tbl v2.16b, {v26.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2 941 eor v2.16b, v2.16b, v3.16b // vpxor %xmm3, %xmm2, %xmm2 942 // vmovdqa 0x30(%r11), %xmm3 943 tbl v3.16b, {v27.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3 944 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 945 tbl v3.16b, {v3.16b}, v9.16b // vpshufb %xmm5, %xmm3, %xmm3 946 947 // vmovdqa 0x40(%r11), %xmm2 948 tbl v2.16b, {v28.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2 949 eor v2.16b, v2.16b, v3.16b // vpxor %xmm3, %xmm2, %xmm2 950 // vmovdqa 0x50(%r11), %xmm3 951 tbl v3.16b, {v29.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3 952 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 953 954 // vmovdqa 0x60(%r11), %xmm2 955 tbl v2.16b, {v30.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2 956 tbl v3.16b, {v3.16b}, v9.16b // vpshufb %xmm5, %xmm3, %xmm3 957 // vmovdqa 0x70(%r11), %xmm4 958 tbl v4.16b, {v31.16b}, v1.16b // vpshufb %xmm1, %xmm4, %xmm4 959 ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1 960 eor v2.16b, v2.16b, v3.16b // vpxor %xmm3, %xmm2, %xmm2 961 eor v3.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm3 962 963 sub x2, x2, #16 // add $-16, %rdx 964 965.Lschedule_mangle_both: 966 tbl v3.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3 967 add x8, x8, #64-16 // add $-16, %r8 968 and x8, x8, #~(1<<6) // and $0x30, %r8 969 st1 {v3.2d}, [x2] // vmovdqu %xmm3, (%rdx) 970 ret 971.size _vpaes_schedule_mangle,.-_vpaes_schedule_mangle 972 973.globl vpaes_set_encrypt_key 974.type vpaes_set_encrypt_key,%function 975.align 4 976vpaes_set_encrypt_key: 977 stp x29,x30,[sp,#-16]! 978 add x29,sp,#0 979 stp d8,d9,[sp,#-16]! // ABI spec says so 980 981 lsr w9, w1, #5 // shr $5,%eax 982 add w9, w9, #5 // $5,%eax 983 str w9, [x2,#240] // mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5; 984 985 mov w3, #0 // mov $0,%ecx 986 mov x8, #0x30 // mov $0x30,%r8d 987 bl _vpaes_schedule_core 988 eor x0, x0, x0 989 990 ldp d8,d9,[sp],#16 991 ldp x29,x30,[sp],#16 992 ret 993.size vpaes_set_encrypt_key,.-vpaes_set_encrypt_key 994 995.globl vpaes_set_decrypt_key 996.type vpaes_set_decrypt_key,%function 997.align 4 998vpaes_set_decrypt_key: 999 stp x29,x30,[sp,#-16]! 1000 add x29,sp,#0 1001 stp d8,d9,[sp,#-16]! // ABI spec says so 1002 1003 lsr w9, w1, #5 // shr $5,%eax 1004 add w9, w9, #5 // $5,%eax 1005 str w9, [x2,#240] // mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5; 1006 lsl w9, w9, #4 // shl $4,%eax 1007 add x2, x2, #16 // lea 16(%rdx,%rax),%rdx 1008 add x2, x2, x9 1009 1010 mov w3, #1 // mov $1,%ecx 1011 lsr w8, w1, #1 // shr $1,%r8d 1012 and x8, x8, #32 // and $32,%r8d 1013 eor x8, x8, #32 // xor $32,%r8d # nbits==192?0:32 1014 bl _vpaes_schedule_core 1015 1016 ldp d8,d9,[sp],#16 1017 ldp x29,x30,[sp],#16 1018 ret 1019.size vpaes_set_decrypt_key,.-vpaes_set_decrypt_key 1020.globl vpaes_cbc_encrypt 1021.type vpaes_cbc_encrypt,%function 1022.align 4 1023vpaes_cbc_encrypt: 1024 cbz x2, .Lcbc_abort 1025 cmp w5, #0 // check direction 1026 b.eq vpaes_cbc_decrypt 1027 1028 stp x29,x30,[sp,#-16]! 1029 add x29,sp,#0 1030 1031 mov x17, x2 // reassign 1032 mov x2, x3 // reassign 1033 1034 ld1 {v0.16b}, [x4] // load ivec 1035 bl _vpaes_encrypt_preheat 1036 b .Lcbc_enc_loop 1037 1038.align 4 1039.Lcbc_enc_loop: 1040 ld1 {v7.16b}, [x0],#16 // load input 1041 eor v7.16b, v7.16b, v0.16b // xor with ivec 1042 bl _vpaes_encrypt_core 1043 st1 {v0.16b}, [x1],#16 // save output 1044 subs x17, x17, #16 1045 b.hi .Lcbc_enc_loop 1046 1047 st1 {v0.16b}, [x4] // write ivec 1048 1049 ldp x29,x30,[sp],#16 1050.Lcbc_abort: 1051 ret 1052.size vpaes_cbc_encrypt,.-vpaes_cbc_encrypt 1053 1054.type vpaes_cbc_decrypt,%function 1055.align 4 1056vpaes_cbc_decrypt: 1057 stp x29,x30,[sp,#-16]! 1058 add x29,sp,#0 1059 stp d8,d9,[sp,#-16]! // ABI spec says so 1060 stp d10,d11,[sp,#-16]! 1061 stp d12,d13,[sp,#-16]! 1062 stp d14,d15,[sp,#-16]! 1063 1064 mov x17, x2 // reassign 1065 mov x2, x3 // reassign 1066 ld1 {v6.16b}, [x4] // load ivec 1067 bl _vpaes_decrypt_preheat 1068 tst x17, #16 1069 b.eq .Lcbc_dec_loop2x 1070 1071 ld1 {v7.16b}, [x0], #16 // load input 1072 bl _vpaes_decrypt_core 1073 eor v0.16b, v0.16b, v6.16b // xor with ivec 1074 orr v6.16b, v7.16b, v7.16b // next ivec value 1075 st1 {v0.16b}, [x1], #16 1076 subs x17, x17, #16 1077 b.ls .Lcbc_dec_done 1078 1079.align 4 1080.Lcbc_dec_loop2x: 1081 ld1 {v14.16b,v15.16b}, [x0], #32 1082 bl _vpaes_decrypt_2x 1083 eor v0.16b, v0.16b, v6.16b // xor with ivec 1084 eor v1.16b, v1.16b, v14.16b 1085 orr v6.16b, v15.16b, v15.16b 1086 st1 {v0.16b,v1.16b}, [x1], #32 1087 subs x17, x17, #32 1088 b.hi .Lcbc_dec_loop2x 1089 1090.Lcbc_dec_done: 1091 st1 {v6.16b}, [x4] 1092 1093 ldp d14,d15,[sp],#16 1094 ldp d12,d13,[sp],#16 1095 ldp d10,d11,[sp],#16 1096 ldp d8,d9,[sp],#16 1097 ldp x29,x30,[sp],#16 1098 ret 1099.size vpaes_cbc_decrypt,.-vpaes_cbc_decrypt 1100.globl vpaes_ecb_encrypt 1101.type vpaes_ecb_encrypt,%function 1102.align 4 1103vpaes_ecb_encrypt: 1104 stp x29,x30,[sp,#-16]! 1105 add x29,sp,#0 1106 stp d8,d9,[sp,#-16]! // ABI spec says so 1107 stp d10,d11,[sp,#-16]! 1108 stp d12,d13,[sp,#-16]! 1109 stp d14,d15,[sp,#-16]! 1110 1111 mov x17, x2 1112 mov x2, x3 1113 bl _vpaes_encrypt_preheat 1114 tst x17, #16 1115 b.eq .Lecb_enc_loop 1116 1117 ld1 {v7.16b}, [x0],#16 1118 bl _vpaes_encrypt_core 1119 st1 {v0.16b}, [x1],#16 1120 subs x17, x17, #16 1121 b.ls .Lecb_enc_done 1122 1123.align 4 1124.Lecb_enc_loop: 1125 ld1 {v14.16b,v15.16b}, [x0], #32 1126 bl _vpaes_encrypt_2x 1127 st1 {v0.16b,v1.16b}, [x1], #32 1128 subs x17, x17, #32 1129 b.hi .Lecb_enc_loop 1130 1131.Lecb_enc_done: 1132 ldp d14,d15,[sp],#16 1133 ldp d12,d13,[sp],#16 1134 ldp d10,d11,[sp],#16 1135 ldp d8,d9,[sp],#16 1136 ldp x29,x30,[sp],#16 1137 ret 1138.size vpaes_ecb_encrypt,.-vpaes_ecb_encrypt 1139 1140.globl vpaes_ecb_decrypt 1141.type vpaes_ecb_decrypt,%function 1142.align 4 1143vpaes_ecb_decrypt: 1144 stp x29,x30,[sp,#-16]! 1145 add x29,sp,#0 1146 stp d8,d9,[sp,#-16]! // ABI spec says so 1147 stp d10,d11,[sp,#-16]! 1148 stp d12,d13,[sp,#-16]! 1149 stp d14,d15,[sp,#-16]! 1150 1151 mov x17, x2 1152 mov x2, x3 1153 bl _vpaes_decrypt_preheat 1154 tst x17, #16 1155 b.eq .Lecb_dec_loop 1156 1157 ld1 {v7.16b}, [x0],#16 1158 bl _vpaes_encrypt_core 1159 st1 {v0.16b}, [x1],#16 1160 subs x17, x17, #16 1161 b.ls .Lecb_dec_done 1162 1163.align 4 1164.Lecb_dec_loop: 1165 ld1 {v14.16b,v15.16b}, [x0], #32 1166 bl _vpaes_decrypt_2x 1167 st1 {v0.16b,v1.16b}, [x1], #32 1168 subs x17, x17, #32 1169 b.hi .Lecb_dec_loop 1170 1171.Lecb_dec_done: 1172 ldp d14,d15,[sp],#16 1173 ldp d12,d13,[sp],#16 1174 ldp d10,d11,[sp],#16 1175 ldp d8,d9,[sp],#16 1176 ldp x29,x30,[sp],#16 1177 ret 1178.size vpaes_ecb_decrypt,.-vpaes_ecb_decrypt 1179