Lines Matching refs:xmov

141 #define	xmov	movaps						// aligned 16-byte move
215 xmov W_TMP, $0 // save W_TMP in the circular buffer
228 xmov W_TMP, WK($0&~3) // save quadruple W[i]+K in the stack memory, which would be used later for updating the hashes A/B/C/D/E
268 xmov 14*16(sp), W_TMP // load the bswapped 16-bytes from the aligned stack memory
269 xmov W_TMP, $0 // save W = W_TMP in the circular buffer
291 xmov $1, $4 // W = W12
293 xmov $3, W_TMP // W_TMP = W4
301 xmov $1, W_TMP2 // W_TMP2 = W3 ^ W16 ^ W8 ^ W14
302 xmov $1, W_TMP // W_TMP = W3 ^ W16 ^ W8 ^ W14
310 xmov W_TMP2, $0 // copy W[i] at location of W[i+3]
321 xmov W_TMP, $0 // save W = W_TMP in the W circular buffer
327 xmov W_TMP, WK($1&~3) // save WK = W+K for later update of the hashes A/B/C/D/E
333 xmov $1, $4 // W = W12 = (w9 w10 w11 w12)
336 xmov $0, W_TMP // W16 = (w13 w14 w15 w16)
341 xmov $3, W_TMP // W_TMP = W4 = (w1 w2 w3 w4)
362 xmov $2, W_TMP // (w1 w2 w3 w4)
369 xmov $2, W_TMP // (w1 w2 w3 w4)
370 xmov $1, W_TMP2 // (w5 w6 w7 w8)
379 xmov $3, W_TMP // W32
381 xmov W_TMP, $3 // W = W28 ^ W32;
382 xmov $2, W_TMP // W4
388 xmov $3, W_TMP // W32
390 xmov W_TMP, $3 // W = W28 ^ W32
391 xmov $2, W_TMP // W4 = (w1 w2 w3 w4)
392 xmov $1, W_TMP2 // W8 = (w5 w6 w7 w8)
401 xmov W_TMP, $1 // W = W_tmp = W6 ^ W16 ^ W28 ^ W32
413 xmov $0, W_TMP2 // W
415 xmov W_TMP2, $0 // save (W >> 30) at W
422 xmov W_TMP, $0 // W = (W6 ^ W16 ^ W28 ^ W32) rol 2
424 xmov W_TMP, WK($1&~3) // write W+K
427 xmov W_TMP, $0 // W = (W6 ^ W16 ^ W28 ^ W32) rol 2
429 xmov W_TMP, WK($1&~3) // write WK
1294 xmov %xmm0, 4*16(sp)
1295 xmov %xmm1, 5*16(sp)
1296 xmov %xmm2, 6*16(sp)
1297 xmov %xmm3, 7*16(sp)
1298 xmov %xmm4, 8*16(sp)
1299 xmov %xmm5, 9*16(sp)
1300 xmov %xmm6, 10*16(sp)
1301 xmov %xmm7, 11*16(sp)
1303 xmov %xmm8, 12*16(sp)
1304 xmov %xmm9, 13*16(sp)
1305 xmov %xmm10, 14*16(sp)
1318 xmov 0x40(K_BASE), XMM_SHUFB_BSWAP
1331 xmov 0x40(%eax), %xmm0
1332 xmov %xmm0, XMM_SHUFB_BSWAP
1340 xmov 4*16(sp), %xmm0
1341 xmov 5*16(sp), %xmm1
1342 xmov 6*16(sp), %xmm2
1343 xmov 7*16(sp), %xmm3
1344 xmov 8*16(sp), %xmm4
1345 xmov 9*16(sp), %xmm5
1346 xmov 10*16(sp), %xmm6
1347 xmov 11*16(sp), %xmm7
1349 xmov 12*16(sp), %xmm8
1350 xmov 13*16(sp), %xmm9
1351 xmov 14*16(sp), %xmm10
1391 xmov %xmm0, 4*16(sp)
1392 xmov %xmm1, 5*16(sp)
1393 xmov %xmm2, 6*16(sp)
1394 xmov %xmm3, 7*16(sp)
1395 xmov %xmm4, 8*16(sp)
1396 xmov %xmm5, 9*16(sp)
1397 xmov %xmm6, 10*16(sp)
1398 xmov %xmm7, 11*16(sp)
1400 xmov %xmm8, 12*16(sp)
1401 xmov %xmm9, 13*16(sp)
1433 xmov 4*16(sp), %xmm0
1434 xmov 5*16(sp), %xmm1
1435 xmov 6*16(sp), %xmm2
1436 xmov 7*16(sp), %xmm3
1437 xmov 8*16(sp), %xmm4
1438 xmov 9*16(sp), %xmm5
1439 xmov 10*16(sp), %xmm6
1440 xmov 11*16(sp), %xmm7
1442 xmov 12*16(sp), %xmm8
1443 xmov 13*16(sp), %xmm9