1;***************************************************************************** 2;* x86inc.asm: x264asm abstraction layer 3;***************************************************************************** 4;* Copyright (C) 2005-2013 x264 project 5;* 6;* Authors: Loren Merritt <lorenm@u.washington.edu> 7;* Anton Mitrofanov <BugMaster@narod.ru> 8;* Fiona Glaser <fiona@x264.com> 9;* Henrik Gramner <henrik@gramner.com> 10;* 11;* Permission to use, copy, modify, and/or distribute this software for any 12;* purpose with or without fee is hereby granted, provided that the above 13;* copyright notice and this permission notice appear in all copies. 14;* 15;* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 16;* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 17;* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 18;* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 19;* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 20;* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 21;* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 22;***************************************************************************** 23 24; This is a header file for the x264ASM assembly language, which uses 25; NASM/YASM syntax combined with a large number of macros to provide easy 26; abstraction between different calling conventions (x86_32, win64, linux64). 27; It also has various other useful features to simplify writing the kind of 28; DSP functions that are most often used in x264. 29 30; Unlike the rest of x264, this file is available under an ISC license, as it 31; has significant usefulness outside of x264 and we want it to be available 32; to the largest audience possible. Of course, if you modify it for your own 33; purposes to add a new feature, we strongly encourage contributing a patch 34; as this feature might be useful for others as well. Send patches or ideas 35; to x264-devel@videolan.org . 36 37%ifndef private_prefix 38 %define private_prefix x264 39%endif 40 41%ifndef public_prefix 42 %define public_prefix private_prefix 43%endif 44 45%define WIN64 0 46%define UNIX64 0 47%if ARCH_X86_64 48 %ifidn __OUTPUT_FORMAT__,win32 49 %define WIN64 1 50 %elifidn __OUTPUT_FORMAT__,win64 51 %define WIN64 1 52 %elifidn __OUTPUT_FORMAT__,x64 53 %define WIN64 1 54 %else 55 %define UNIX64 1 56 %endif 57%endif 58 59%ifdef PREFIX 60 %define mangle(x) _ %+ x 61%else 62 %define mangle(x) x 63%endif 64 65; aout does not support align= 66; NOTE: This section is out of sync with x264, in order to 67; keep supporting OS/2. 68%macro SECTION_RODATA 0-1 16 69 %ifidn __OUTPUT_FORMAT__,aout 70 section .text 71 %else 72 SECTION .rodata align=%1 73 %endif 74%endmacro 75 76%macro SECTION_TEXT 0-1 16 77 %ifidn __OUTPUT_FORMAT__,aout 78 SECTION .text 79 %else 80 SECTION .text align=%1 81 %endif 82%endmacro 83 84%if WIN64 85 %define PIC 86%elif ARCH_X86_64 == 0 87; x86_32 doesn't require PIC. 88; Some distros prefer shared objects to be PIC, but nothing breaks if 89; the code contains a few textrels, so we'll skip that complexity. 90 %undef PIC 91%endif 92%ifdef PIC 93 default rel 94%endif 95 96%macro CPUNOP 1 97 %if HAVE_CPUNOP 98 CPU %1 99 %endif 100%endmacro 101 102; Always use long nops (reduces 0x90 spam in disassembly on x86_32) 103CPUNOP amdnop 104 105; Macros to eliminate most code duplication between x86_32 and x86_64: 106; Currently this works only for leaf functions which load all their arguments 107; into registers at the start, and make no other use of the stack. Luckily that 108; covers most of x264's asm. 109 110; PROLOGUE: 111; %1 = number of arguments. loads them from stack if needed. 112; %2 = number of registers used. pushes callee-saved regs if needed. 113; %3 = number of xmm registers used. pushes callee-saved xmm regs if needed. 114; %4 = (optional) stack size to be allocated. If not aligned (x86-32 ICC 10.x, 115; MSVC or YMM), the stack will be manually aligned (to 16 or 32 bytes), 116; and an extra register will be allocated to hold the original stack 117; pointer (to not invalidate r0m etc.). To prevent the use of an extra 118; register as stack pointer, request a negative stack size. 119; %4+/%5+ = list of names to define to registers 120; PROLOGUE can also be invoked by adding the same options to cglobal 121 122; e.g. 123; cglobal foo, 2,3,0, dst, src, tmp 124; declares a function (foo), taking two args (dst and src) and one local variable (tmp) 125 126; TODO Some functions can use some args directly from the stack. If they're the 127; last args then you can just not declare them, but if they're in the middle 128; we need more flexible macro. 129 130; RET: 131; Pops anything that was pushed by PROLOGUE, and returns. 132 133; REP_RET: 134; Use this instead of RET if it's a branch target. 135 136; registers: 137; rN and rNq are the native-size register holding function argument N 138; rNd, rNw, rNb are dword, word, and byte size 139; rNh is the high 8 bits of the word size 140; rNm is the original location of arg N (a register or on the stack), dword 141; rNmp is native size 142 143%macro DECLARE_REG 2-3 144 %define r%1q %2 145 %define r%1d %2d 146 %define r%1w %2w 147 %define r%1b %2b 148 %define r%1h %2h 149 %define %2q %2 150 %if %0 == 2 151 %define r%1m %2d 152 %define r%1mp %2 153 %elif ARCH_X86_64 ; memory 154 %define r%1m [rstk + stack_offset + %3] 155 %define r%1mp qword r %+ %1 %+ m 156 %else 157 %define r%1m [rstk + stack_offset + %3] 158 %define r%1mp dword r %+ %1 %+ m 159 %endif 160 %define r%1 %2 161%endmacro 162 163%macro DECLARE_REG_SIZE 3 164 %define r%1q r%1 165 %define e%1q r%1 166 %define r%1d e%1 167 %define e%1d e%1 168 %define r%1w %1 169 %define e%1w %1 170 %define r%1h %3 171 %define e%1h %3 172 %define r%1b %2 173 %define e%1b %2 174%if ARCH_X86_64 == 0 175 %define r%1 e%1 176%endif 177%endmacro 178 179DECLARE_REG_SIZE ax, al, ah 180DECLARE_REG_SIZE bx, bl, bh 181DECLARE_REG_SIZE cx, cl, ch 182DECLARE_REG_SIZE dx, dl, dh 183DECLARE_REG_SIZE si, sil, null 184DECLARE_REG_SIZE di, dil, null 185DECLARE_REG_SIZE bp, bpl, null 186 187; t# defines for when per-arch register allocation is more complex than just function arguments 188 189%macro DECLARE_REG_TMP 1-* 190 %assign %%i 0 191 %rep %0 192 CAT_XDEFINE t, %%i, r%1 193 %assign %%i %%i+1 194 %rotate 1 195 %endrep 196%endmacro 197 198%macro DECLARE_REG_TMP_SIZE 0-* 199 %rep %0 200 %define t%1q t%1 %+ q 201 %define t%1d t%1 %+ d 202 %define t%1w t%1 %+ w 203 %define t%1h t%1 %+ h 204 %define t%1b t%1 %+ b 205 %rotate 1 206 %endrep 207%endmacro 208 209DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14 210 211%if ARCH_X86_64 212 %define gprsize 8 213%else 214 %define gprsize 4 215%endif 216 217%macro PUSH 1 218 push %1 219 %ifidn rstk, rsp 220 %assign stack_offset stack_offset+gprsize 221 %endif 222%endmacro 223 224%macro POP 1 225 pop %1 226 %ifidn rstk, rsp 227 %assign stack_offset stack_offset-gprsize 228 %endif 229%endmacro 230 231%macro PUSH_IF_USED 1-* 232 %rep %0 233 %if %1 < regs_used 234 PUSH r%1 235 %endif 236 %rotate 1 237 %endrep 238%endmacro 239 240%macro POP_IF_USED 1-* 241 %rep %0 242 %if %1 < regs_used 243 pop r%1 244 %endif 245 %rotate 1 246 %endrep 247%endmacro 248 249%macro LOAD_IF_USED 1-* 250 %rep %0 251 %if %1 < num_args 252 mov r%1, r %+ %1 %+ mp 253 %endif 254 %rotate 1 255 %endrep 256%endmacro 257 258%macro SUB 2 259 sub %1, %2 260 %ifidn %1, rstk 261 %assign stack_offset stack_offset+(%2) 262 %endif 263%endmacro 264 265%macro ADD 2 266 add %1, %2 267 %ifidn %1, rstk 268 %assign stack_offset stack_offset-(%2) 269 %endif 270%endmacro 271 272%macro movifnidn 2 273 %ifnidn %1, %2 274 mov %1, %2 275 %endif 276%endmacro 277 278%macro movsxdifnidn 2 279 %ifnidn %1, %2 280 movsxd %1, %2 281 %endif 282%endmacro 283 284%macro ASSERT 1 285 %if (%1) == 0 286 %error assert failed 287 %endif 288%endmacro 289 290%macro DEFINE_ARGS 0-* 291 %ifdef n_arg_names 292 %assign %%i 0 293 %rep n_arg_names 294 CAT_UNDEF arg_name %+ %%i, q 295 CAT_UNDEF arg_name %+ %%i, d 296 CAT_UNDEF arg_name %+ %%i, w 297 CAT_UNDEF arg_name %+ %%i, h 298 CAT_UNDEF arg_name %+ %%i, b 299 CAT_UNDEF arg_name %+ %%i, m 300 CAT_UNDEF arg_name %+ %%i, mp 301 CAT_UNDEF arg_name, %%i 302 %assign %%i %%i+1 303 %endrep 304 %endif 305 306 %xdefine %%stack_offset stack_offset 307 %undef stack_offset ; so that the current value of stack_offset doesn't get baked in by xdefine 308 %assign %%i 0 309 %rep %0 310 %xdefine %1q r %+ %%i %+ q 311 %xdefine %1d r %+ %%i %+ d 312 %xdefine %1w r %+ %%i %+ w 313 %xdefine %1h r %+ %%i %+ h 314 %xdefine %1b r %+ %%i %+ b 315 %xdefine %1m r %+ %%i %+ m 316 %xdefine %1mp r %+ %%i %+ mp 317 CAT_XDEFINE arg_name, %%i, %1 318 %assign %%i %%i+1 319 %rotate 1 320 %endrep 321 %xdefine stack_offset %%stack_offset 322 %assign n_arg_names %0 323%endmacro 324 325%macro ALLOC_STACK 1-2 0 ; stack_size, n_xmm_regs (for win64 only) 326 %ifnum %1 327 %if %1 != 0 328 %assign %%stack_alignment ((mmsize + 15) & ~15) 329 %assign stack_size %1 330 %if stack_size < 0 331 %assign stack_size -stack_size 332 %endif 333 %assign stack_size_padded stack_size 334 %if WIN64 335 %assign stack_size_padded stack_size_padded + 32 ; reserve 32 bytes for shadow space 336 %if mmsize != 8 337 %assign xmm_regs_used %2 338 %if xmm_regs_used > 8 339 %assign stack_size_padded stack_size_padded + (xmm_regs_used-8)*16 340 %endif 341 %endif 342 %endif 343 %if mmsize <= 16 && HAVE_ALIGNED_STACK 344 %assign stack_size_padded stack_size_padded + %%stack_alignment - gprsize - (stack_offset & (%%stack_alignment - 1)) 345 SUB rsp, stack_size_padded 346 %else 347 %assign %%reg_num (regs_used - 1) 348 %xdefine rstk r %+ %%reg_num 349 ; align stack, and save original stack location directly above 350 ; it, i.e. in [rsp+stack_size_padded], so we can restore the 351 ; stack in a single instruction (i.e. mov rsp, rstk or mov 352 ; rsp, [rsp+stack_size_padded]) 353 mov rstk, rsp 354 %if %1 < 0 ; need to store rsp on stack 355 sub rsp, gprsize+stack_size_padded 356 and rsp, ~(%%stack_alignment-1) 357 %xdefine rstkm [rsp+stack_size_padded] 358 mov rstkm, rstk 359 %else ; can keep rsp in rstk during whole function 360 sub rsp, stack_size_padded 361 and rsp, ~(%%stack_alignment-1) 362 %xdefine rstkm rstk 363 %endif 364 %endif 365 WIN64_PUSH_XMM 366 %endif 367 %endif 368%endmacro 369 370%macro SETUP_STACK_POINTER 1 371 %ifnum %1 372 %if %1 != 0 && (HAVE_ALIGNED_STACK == 0 || mmsize == 32) 373 %if %1 > 0 374 %assign regs_used (regs_used + 1) 375 %elif ARCH_X86_64 && regs_used == num_args && num_args <= 4 + UNIX64 * 2 376 %warning "Stack pointer will overwrite register argument" 377 %endif 378 %endif 379 %endif 380%endmacro 381 382%macro DEFINE_ARGS_INTERNAL 3+ 383 %ifnum %2 384 DEFINE_ARGS %3 385 %elif %1 == 4 386 DEFINE_ARGS %2 387 %elif %1 > 4 388 DEFINE_ARGS %2, %3 389 %endif 390%endmacro 391 392%if WIN64 ; Windows x64 ;================================================= 393 394DECLARE_REG 0, rcx 395DECLARE_REG 1, rdx 396DECLARE_REG 2, R8 397DECLARE_REG 3, R9 398DECLARE_REG 4, R10, 40 399DECLARE_REG 5, R11, 48 400DECLARE_REG 6, rax, 56 401DECLARE_REG 7, rdi, 64 402DECLARE_REG 8, rsi, 72 403DECLARE_REG 9, rbx, 80 404DECLARE_REG 10, rbp, 88 405DECLARE_REG 11, R12, 96 406DECLARE_REG 12, R13, 104 407DECLARE_REG 13, R14, 112 408DECLARE_REG 14, R15, 120 409 410%macro PROLOGUE 2-5+ 0 ; #args, #regs, #xmm_regs, [stack_size,] arg_names... 411 %assign num_args %1 412 %assign regs_used %2 413 ASSERT regs_used >= num_args 414 SETUP_STACK_POINTER %4 415 ASSERT regs_used <= 15 416 PUSH_IF_USED 7, 8, 9, 10, 11, 12, 13, 14 417 ALLOC_STACK %4, %3 418 %if mmsize != 8 && stack_size == 0 419 WIN64_SPILL_XMM %3 420 %endif 421 LOAD_IF_USED 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 422 DEFINE_ARGS_INTERNAL %0, %4, %5 423%endmacro 424 425%macro WIN64_PUSH_XMM 0 426 ; Use the shadow space to store XMM6 and XMM7, the rest needs stack space allocated. 427 %if xmm_regs_used > 6 428 movaps [rstk + stack_offset + 8], xmm6 429 %endif 430 %if xmm_regs_used > 7 431 movaps [rstk + stack_offset + 24], xmm7 432 %endif 433 %if xmm_regs_used > 8 434 %assign %%i 8 435 %rep xmm_regs_used-8 436 movaps [rsp + (%%i-8)*16 + stack_size + 32], xmm %+ %%i 437 %assign %%i %%i+1 438 %endrep 439 %endif 440%endmacro 441 442%macro WIN64_SPILL_XMM 1 443 %assign xmm_regs_used %1 444 ASSERT xmm_regs_used <= 16 445 %if xmm_regs_used > 8 446 %assign stack_size_padded (xmm_regs_used-8)*16 + (~stack_offset&8) + 32 447 SUB rsp, stack_size_padded 448 %endif 449 WIN64_PUSH_XMM 450%endmacro 451 452%macro WIN64_RESTORE_XMM_INTERNAL 1 453 %assign %%pad_size 0 454 %if xmm_regs_used > 8 455 %assign %%i xmm_regs_used 456 %rep xmm_regs_used-8 457 %assign %%i %%i-1 458 movaps xmm %+ %%i, [%1 + (%%i-8)*16 + stack_size + 32] 459 %endrep 460 %endif 461 %if stack_size_padded > 0 462 %if stack_size > 0 && (mmsize == 32 || HAVE_ALIGNED_STACK == 0) 463 mov rsp, rstkm 464 %else 465 add %1, stack_size_padded 466 %assign %%pad_size stack_size_padded 467 %endif 468 %endif 469 %if xmm_regs_used > 7 470 movaps xmm7, [%1 + stack_offset - %%pad_size + 24] 471 %endif 472 %if xmm_regs_used > 6 473 movaps xmm6, [%1 + stack_offset - %%pad_size + 8] 474 %endif 475%endmacro 476 477%macro WIN64_RESTORE_XMM 1 478 WIN64_RESTORE_XMM_INTERNAL %1 479 %assign stack_offset (stack_offset-stack_size_padded) 480 %assign xmm_regs_used 0 481%endmacro 482 483%define has_epilogue regs_used > 7 || xmm_regs_used > 6 || mmsize == 32 || stack_size > 0 484 485%macro RET 0 486 WIN64_RESTORE_XMM_INTERNAL rsp 487 POP_IF_USED 14, 13, 12, 11, 10, 9, 8, 7 488%if mmsize == 32 489 vzeroupper 490%endif 491 AUTO_REP_RET 492%endmacro 493 494%elif ARCH_X86_64 ; *nix x64 ;============================================= 495 496DECLARE_REG 0, rdi 497DECLARE_REG 1, rsi 498DECLARE_REG 2, rdx 499DECLARE_REG 3, rcx 500DECLARE_REG 4, R8 501DECLARE_REG 5, R9 502DECLARE_REG 6, rax, 8 503DECLARE_REG 7, R10, 16 504DECLARE_REG 8, R11, 24 505DECLARE_REG 9, rbx, 32 506DECLARE_REG 10, rbp, 40 507DECLARE_REG 11, R12, 48 508DECLARE_REG 12, R13, 56 509DECLARE_REG 13, R14, 64 510DECLARE_REG 14, R15, 72 511 512%macro PROLOGUE 2-5+ ; #args, #regs, #xmm_regs, [stack_size,] arg_names... 513 %assign num_args %1 514 %assign regs_used %2 515 ASSERT regs_used >= num_args 516 SETUP_STACK_POINTER %4 517 ASSERT regs_used <= 15 518 PUSH_IF_USED 9, 10, 11, 12, 13, 14 519 ALLOC_STACK %4 520 LOAD_IF_USED 6, 7, 8, 9, 10, 11, 12, 13, 14 521 DEFINE_ARGS_INTERNAL %0, %4, %5 522%endmacro 523 524%define has_epilogue regs_used > 9 || mmsize == 32 || stack_size > 0 525 526%macro RET 0 527%if stack_size_padded > 0 528%if mmsize == 32 || HAVE_ALIGNED_STACK == 0 529 mov rsp, rstkm 530%else 531 add rsp, stack_size_padded 532%endif 533%endif 534 POP_IF_USED 14, 13, 12, 11, 10, 9 535%if mmsize == 32 536 vzeroupper 537%endif 538 AUTO_REP_RET 539%endmacro 540 541%else ; X86_32 ;============================================================== 542 543DECLARE_REG 0, eax, 4 544DECLARE_REG 1, ecx, 8 545DECLARE_REG 2, edx, 12 546DECLARE_REG 3, ebx, 16 547DECLARE_REG 4, esi, 20 548DECLARE_REG 5, edi, 24 549DECLARE_REG 6, ebp, 28 550%define rsp esp 551 552%macro DECLARE_ARG 1-* 553 %rep %0 554 %define r%1m [rstk + stack_offset + 4*%1 + 4] 555 %define r%1mp dword r%1m 556 %rotate 1 557 %endrep 558%endmacro 559 560DECLARE_ARG 7, 8, 9, 10, 11, 12, 13, 14 561 562%macro PROLOGUE 2-5+ ; #args, #regs, #xmm_regs, [stack_size,] arg_names... 563 %assign num_args %1 564 %assign regs_used %2 565 ASSERT regs_used >= num_args 566 %if num_args > 7 567 %assign num_args 7 568 %endif 569 %if regs_used > 7 570 %assign regs_used 7 571 %endif 572 SETUP_STACK_POINTER %4 573 ASSERT regs_used <= 7 574 PUSH_IF_USED 3, 4, 5, 6 575 ALLOC_STACK %4 576 LOAD_IF_USED 0, 1, 2, 3, 4, 5, 6 577 DEFINE_ARGS_INTERNAL %0, %4, %5 578%endmacro 579 580%define has_epilogue regs_used > 3 || mmsize == 32 || stack_size > 0 581 582%macro RET 0 583%if stack_size_padded > 0 584%if mmsize == 32 || HAVE_ALIGNED_STACK == 0 585 mov rsp, rstkm 586%else 587 add rsp, stack_size_padded 588%endif 589%endif 590 POP_IF_USED 6, 5, 4, 3 591%if mmsize == 32 592 vzeroupper 593%endif 594 AUTO_REP_RET 595%endmacro 596 597%endif ;====================================================================== 598 599%if WIN64 == 0 600%macro WIN64_SPILL_XMM 1 601%endmacro 602%macro WIN64_RESTORE_XMM 1 603%endmacro 604%macro WIN64_PUSH_XMM 0 605%endmacro 606%endif 607 608; On AMD cpus <=K10, an ordinary ret is slow if it immediately follows either 609; a branch or a branch target. So switch to a 2-byte form of ret in that case. 610; We can automatically detect "follows a branch", but not a branch target. 611; (SSSE3 is a sufficient condition to know that your cpu doesn't have this problem.) 612%macro REP_RET 0 613 %if has_epilogue 614 RET 615 %else 616 rep ret 617 %endif 618%endmacro 619 620%define last_branch_adr $$ 621%macro AUTO_REP_RET 0 622 %ifndef cpuflags 623 times ((last_branch_adr-$)>>31)+1 rep ; times 1 iff $ != last_branch_adr. 624 %elif notcpuflag(ssse3) 625 times ((last_branch_adr-$)>>31)+1 rep 626 %endif 627 ret 628%endmacro 629 630%macro BRANCH_INSTR 0-* 631 %rep %0 632 %macro %1 1-2 %1 633 %2 %1 634 %%branch_instr: 635 %xdefine last_branch_adr %%branch_instr 636 %endmacro 637 %rotate 1 638 %endrep 639%endmacro 640 641BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae, jna, jnae, jb, jbe, jnb, jnbe, jc, jnc, js, jns, jo, jno, jp, jnp 642 643%macro TAIL_CALL 2 ; callee, is_nonadjacent 644 %if has_epilogue 645 call %1 646 RET 647 %elif %2 648 jmp %1 649 %endif 650%endmacro 651 652;============================================================================= 653; arch-independent part 654;============================================================================= 655 656%assign function_align 16 657 658; Begin a function. 659; Applies any symbol mangling needed for C linkage, and sets up a define such that 660; subsequent uses of the function name automatically refer to the mangled version. 661; Appends cpuflags to the function name if cpuflags has been specified. 662; The "" empty default parameter is a workaround for nasm, which fails if SUFFIX 663; is empty and we call cglobal_internal with just %1 %+ SUFFIX (without %2). 664%macro cglobal 1-2+ "" ; name, [PROLOGUE args] 665 cglobal_internal 1, %1 %+ SUFFIX, %2 666%endmacro 667%macro cvisible 1-2+ "" ; name, [PROLOGUE args] 668 cglobal_internal 0, %1 %+ SUFFIX, %2 669%endmacro 670%macro cglobal_internal 2-3+ 671 %if %1 672 %xdefine %%FUNCTION_PREFIX private_prefix 673 %xdefine %%VISIBILITY hidden 674 %else 675 %xdefine %%FUNCTION_PREFIX public_prefix 676 %xdefine %%VISIBILITY 677 %endif 678 %ifndef cglobaled_%2 679 %xdefine %2 mangle(%%FUNCTION_PREFIX %+ _ %+ %2) 680 %xdefine %2.skip_prologue %2 %+ .skip_prologue 681 CAT_XDEFINE cglobaled_, %2, 1 682 %endif 683 %xdefine current_function %2 684 %ifidn __OUTPUT_FORMAT__,elf 685 global %2:function %%VISIBILITY 686 %else 687 global %2 688 %endif 689 align function_align 690 %2: 691 RESET_MM_PERMUTATION ; needed for x86-64, also makes disassembly somewhat nicer 692 %xdefine rstk rsp ; copy of the original stack pointer, used when greater alignment than the known stack alignment is required 693 %assign stack_offset 0 ; stack pointer offset relative to the return address 694 %assign stack_size 0 ; amount of stack space that can be freely used inside a function 695 %assign stack_size_padded 0 ; total amount of allocated stack space, including space for callee-saved xmm registers on WIN64 and alignment padding 696 %assign xmm_regs_used 0 ; number of XMM registers requested, used for dealing with callee-saved registers on WIN64 697 %ifnidn %3, "" 698 PROLOGUE %3 699 %endif 700%endmacro 701 702%macro cextern 1 703 %xdefine %1 mangle(private_prefix %+ _ %+ %1) 704 CAT_XDEFINE cglobaled_, %1, 1 705 extern %1 706%endmacro 707 708; like cextern, but without the prefix 709%macro cextern_naked 1 710 %xdefine %1 mangle(%1) 711 CAT_XDEFINE cglobaled_, %1, 1 712 extern %1 713%endmacro 714 715%macro const 1-2+ 716 %xdefine %1 mangle(private_prefix %+ _ %+ %1) 717 %ifidn __OUTPUT_FORMAT__,elf 718 global %1:data hidden 719 %else 720 global %1 721 %endif 722 %1: %2 723%endmacro 724 725; This is needed for ELF, otherwise the GNU linker assumes the stack is 726; executable by default. 727%ifidn __OUTPUT_FORMAT__,elf 728SECTION .note.GNU-stack noalloc noexec nowrite progbits 729%endif 730 731; cpuflags 732 733%assign cpuflags_mmx (1<<0) 734%assign cpuflags_mmx2 (1<<1) | cpuflags_mmx 735%assign cpuflags_3dnow (1<<2) | cpuflags_mmx 736%assign cpuflags_3dnowext (1<<3) | cpuflags_3dnow 737%assign cpuflags_sse (1<<4) | cpuflags_mmx2 738%assign cpuflags_sse2 (1<<5) | cpuflags_sse 739%assign cpuflags_sse2slow (1<<6) | cpuflags_sse2 740%assign cpuflags_sse3 (1<<7) | cpuflags_sse2 741%assign cpuflags_ssse3 (1<<8) | cpuflags_sse3 742%assign cpuflags_sse4 (1<<9) | cpuflags_ssse3 743%assign cpuflags_sse42 (1<<10)| cpuflags_sse4 744%assign cpuflags_avx (1<<11)| cpuflags_sse42 745%assign cpuflags_xop (1<<12)| cpuflags_avx 746%assign cpuflags_fma4 (1<<13)| cpuflags_avx 747%assign cpuflags_avx2 (1<<14)| cpuflags_avx 748%assign cpuflags_fma3 (1<<15)| cpuflags_avx 749 750%assign cpuflags_cache32 (1<<16) 751%assign cpuflags_cache64 (1<<17) 752%assign cpuflags_slowctz (1<<18) 753%assign cpuflags_lzcnt (1<<19) 754%assign cpuflags_aligned (1<<20) ; not a cpu feature, but a function variant 755%assign cpuflags_atom (1<<21) 756%assign cpuflags_bmi1 (1<<22)|cpuflags_lzcnt 757%assign cpuflags_bmi2 (1<<23)|cpuflags_bmi1 758 759%define cpuflag(x) ((cpuflags & (cpuflags_ %+ x)) == (cpuflags_ %+ x)) 760%define notcpuflag(x) ((cpuflags & (cpuflags_ %+ x)) != (cpuflags_ %+ x)) 761 762; Takes up to 2 cpuflags from the above list. 763; All subsequent functions (up to the next INIT_CPUFLAGS) is built for the specified cpu. 764; You shouldn't need to invoke this macro directly, it's a subroutine for INIT_MMX &co. 765%macro INIT_CPUFLAGS 0-2 766 CPUNOP amdnop 767 %if %0 >= 1 768 %xdefine cpuname %1 769 %assign cpuflags cpuflags_%1 770 %if %0 >= 2 771 %xdefine cpuname %1_%2 772 %assign cpuflags cpuflags | cpuflags_%2 773 %endif 774 %xdefine SUFFIX _ %+ cpuname 775 %if cpuflag(avx) 776 %assign avx_enabled 1 777 %endif 778 %if (mmsize == 16 && notcpuflag(sse2)) || (mmsize == 32 && notcpuflag(avx2)) 779 %define mova movaps 780 %define movu movups 781 %define movnta movntps 782 %endif 783 %if cpuflag(aligned) 784 %define movu mova 785 %elifidn %1, sse3 786 %define movu lddqu 787 %endif 788 %if notcpuflag(sse2) 789 CPUNOP basicnop 790 %endif 791 %else 792 %xdefine SUFFIX 793 %undef cpuname 794 %undef cpuflags 795 %endif 796%endmacro 797 798; Merge mmx and sse* 799; m# is a simd regsiter of the currently selected size 800; xm# is the corresponding xmmreg (if selcted xmm or ymm size), or mmreg (if selected mmx) 801; ym# is the corresponding ymmreg (if selcted xmm or ymm size), or mmreg (if selected mmx) 802; (All 3 remain in sync through SWAP.) 803 804%macro CAT_XDEFINE 3 805 %xdefine %1%2 %3 806%endmacro 807 808%macro CAT_UNDEF 2 809 %undef %1%2 810%endmacro 811 812%macro INIT_MMX 0-1+ 813 %assign avx_enabled 0 814 %define RESET_MM_PERMUTATION INIT_MMX %1 815 %define mmsize 8 816 %define num_mmregs 8 817 %define mova movq 818 %define movu movq 819 %define movh movd 820 %define movnta movntq 821 %assign %%i 0 822 %rep 8 823 CAT_XDEFINE m, %%i, mm %+ %%i 824 CAT_XDEFINE nmm, %%i, %%i 825 %assign %%i %%i+1 826 %endrep 827 %rep 8 828 CAT_UNDEF m, %%i 829 CAT_UNDEF nmm, %%i 830 %assign %%i %%i+1 831 %endrep 832 INIT_CPUFLAGS %1 833%endmacro 834 835%macro INIT_XMM 0-1+ 836 %assign avx_enabled 0 837 %define RESET_MM_PERMUTATION INIT_XMM %1 838 %define mmsize 16 839 %define num_mmregs 8 840 %if ARCH_X86_64 841 %define num_mmregs 16 842 %endif 843 %define mova movdqa 844 %define movu movdqu 845 %define movh movq 846 %define movnta movntdq 847 %assign %%i 0 848 %rep num_mmregs 849 CAT_XDEFINE m, %%i, xmm %+ %%i 850 CAT_XDEFINE nxmm, %%i, %%i 851 %assign %%i %%i+1 852 %endrep 853 INIT_CPUFLAGS %1 854%endmacro 855 856; FIXME: INIT_AVX can be replaced by INIT_XMM avx 857%macro INIT_AVX 0 858 INIT_XMM 859 %assign avx_enabled 1 860 %define PALIGNR PALIGNR_SSSE3 861 %define RESET_MM_PERMUTATION INIT_AVX 862%endmacro 863 864%macro INIT_YMM 0-1+ 865 %assign avx_enabled 1 866 %define RESET_MM_PERMUTATION INIT_YMM %1 867 %define mmsize 32 868 %define num_mmregs 8 869 %if ARCH_X86_64 870 %define num_mmregs 16 871 %endif 872 %define mova movdqa 873 %define movu movdqu 874 %undef movh 875 %define movnta movntdq 876 %assign %%i 0 877 %rep num_mmregs 878 CAT_XDEFINE m, %%i, ymm %+ %%i 879 CAT_XDEFINE nymm, %%i, %%i 880 %assign %%i %%i+1 881 %endrep 882 INIT_CPUFLAGS %1 883%endmacro 884 885INIT_XMM 886 887%macro DECLARE_MMCAST 1 888 %define mmmm%1 mm%1 889 %define mmxmm%1 mm%1 890 %define mmymm%1 mm%1 891 %define xmmmm%1 mm%1 892 %define xmmxmm%1 xmm%1 893 %define xmmymm%1 xmm%1 894 %define ymmmm%1 mm%1 895 %define ymmxmm%1 ymm%1 896 %define ymmymm%1 ymm%1 897 %define xm%1 xmm %+ m%1 898 %define ym%1 ymm %+ m%1 899%endmacro 900 901%assign i 0 902%rep 16 903 DECLARE_MMCAST i 904%assign i i+1 905%endrep 906 907; I often want to use macros that permute their arguments. e.g. there's no 908; efficient way to implement butterfly or transpose or dct without swapping some 909; arguments. 910; 911; I would like to not have to manually keep track of the permutations: 912; If I insert a permutation in the middle of a function, it should automatically 913; change everything that follows. For more complex macros I may also have multiple 914; implementations, e.g. the SSE2 and SSSE3 versions may have different permutations. 915; 916; Hence these macros. Insert a PERMUTE or some SWAPs at the end of a macro that 917; permutes its arguments. It's equivalent to exchanging the contents of the 918; registers, except that this way you exchange the register names instead, so it 919; doesn't cost any cycles. 920 921%macro PERMUTE 2-* ; takes a list of pairs to swap 922%rep %0/2 923 %xdefine %%tmp%2 m%2 924 %rotate 2 925%endrep 926%rep %0/2 927 %xdefine m%1 %%tmp%2 928 CAT_XDEFINE n, m%1, %1 929 %rotate 2 930%endrep 931%endmacro 932 933%macro SWAP 2+ ; swaps a single chain (sometimes more concise than pairs) 934%ifnum %1 ; SWAP 0, 1, ... 935 SWAP_INTERNAL_NUM %1, %2 936%else ; SWAP m0, m1, ... 937 SWAP_INTERNAL_NAME %1, %2 938%endif 939%endmacro 940 941%macro SWAP_INTERNAL_NUM 2-* 942 %rep %0-1 943 %xdefine %%tmp m%1 944 %xdefine m%1 m%2 945 %xdefine m%2 %%tmp 946 CAT_XDEFINE n, m%1, %1 947 CAT_XDEFINE n, m%2, %2 948 %rotate 1 949 %endrep 950%endmacro 951 952%macro SWAP_INTERNAL_NAME 2-* 953 %xdefine %%args n %+ %1 954 %rep %0-1 955 %xdefine %%args %%args, n %+ %2 956 %rotate 1 957 %endrep 958 SWAP_INTERNAL_NUM %%args 959%endmacro 960 961; If SAVE_MM_PERMUTATION is placed at the end of a function, then any later 962; calls to that function will automatically load the permutation, so values can 963; be returned in mmregs. 964%macro SAVE_MM_PERMUTATION 0-1 965 %if %0 966 %xdefine %%f %1_m 967 %else 968 %xdefine %%f current_function %+ _m 969 %endif 970 %assign %%i 0 971 %rep num_mmregs 972 CAT_XDEFINE %%f, %%i, m %+ %%i 973 %assign %%i %%i+1 974 %endrep 975%endmacro 976 977%macro LOAD_MM_PERMUTATION 1 ; name to load from 978 %ifdef %1_m0 979 %assign %%i 0 980 %rep num_mmregs 981 CAT_XDEFINE m, %%i, %1_m %+ %%i 982 CAT_XDEFINE n, m %+ %%i, %%i 983 %assign %%i %%i+1 984 %endrep 985 %endif 986%endmacro 987 988; Append cpuflags to the callee's name iff the appended name is known and the plain name isn't 989%macro call 1 990 call_internal %1 %+ SUFFIX, %1 991%endmacro 992%macro call_internal 2 993 %xdefine %%i %2 994 %ifndef cglobaled_%2 995 %ifdef cglobaled_%1 996 %xdefine %%i %1 997 %endif 998 %endif 999 call %%i 1000 LOAD_MM_PERMUTATION %%i 1001%endmacro 1002 1003; Substitutions that reduce instruction size but are functionally equivalent 1004%macro add 2 1005 %ifnum %2 1006 %if %2==128 1007 sub %1, -128 1008 %else 1009 add %1, %2 1010 %endif 1011 %else 1012 add %1, %2 1013 %endif 1014%endmacro 1015 1016%macro sub 2 1017 %ifnum %2 1018 %if %2==128 1019 add %1, -128 1020 %else 1021 sub %1, %2 1022 %endif 1023 %else 1024 sub %1, %2 1025 %endif 1026%endmacro 1027 1028;============================================================================= 1029; AVX abstraction layer 1030;============================================================================= 1031 1032%assign i 0 1033%rep 16 1034 %if i < 8 1035 CAT_XDEFINE sizeofmm, i, 8 1036 %endif 1037 CAT_XDEFINE sizeofxmm, i, 16 1038 CAT_XDEFINE sizeofymm, i, 32 1039%assign i i+1 1040%endrep 1041%undef i 1042 1043%macro CHECK_AVX_INSTR_EMU 3-* 1044 %xdefine %%opcode %1 1045 %xdefine %%dst %2 1046 %rep %0-2 1047 %ifidn %%dst, %3 1048 %error non-avx emulation of ``%%opcode'' is not supported 1049 %endif 1050 %rotate 1 1051 %endrep 1052%endmacro 1053 1054;%1 == instruction 1055;%2 == 1 if float, 0 if int 1056;%3 == 1 if non-destructive or 4-operand (xmm, xmm, xmm, imm), 0 otherwise 1057;%4 == 1 if commutative (i.e. doesn't matter which src arg is which), 0 if not 1058;%5+: operands 1059%macro RUN_AVX_INSTR 5-8+ 1060 %ifnum sizeof%6 1061 %assign __sizeofreg sizeof%6 1062 %elifnum sizeof%5 1063 %assign __sizeofreg sizeof%5 1064 %else 1065 %assign __sizeofreg mmsize 1066 %endif 1067 %assign __emulate_avx 0 1068 %if avx_enabled && __sizeofreg >= 16 1069 %xdefine __instr v%1 1070 %else 1071 %xdefine __instr %1 1072 %if %0 >= 7+%3 1073 %assign __emulate_avx 1 1074 %endif 1075 %endif 1076 1077 %if __emulate_avx 1078 %xdefine __src1 %6 1079 %xdefine __src2 %7 1080 %ifnidn %5, %6 1081 %if %0 >= 8 1082 CHECK_AVX_INSTR_EMU {%1 %5, %6, %7, %8}, %5, %7, %8 1083 %else 1084 CHECK_AVX_INSTR_EMU {%1 %5, %6, %7}, %5, %7 1085 %endif 1086 %if %4 && %3 == 0 1087 %ifnid %7 1088 ; 3-operand AVX instructions with a memory arg can only have it in src2, 1089 ; whereas SSE emulation prefers to have it in src1 (i.e. the mov). 1090 ; So, if the instruction is commutative with a memory arg, swap them. 1091 %xdefine __src1 %7 1092 %xdefine __src2 %6 1093 %endif 1094 %endif 1095 %if __sizeofreg == 8 1096 MOVQ %5, __src1 1097 %elif %2 1098 MOVAPS %5, __src1 1099 %else 1100 MOVDQA %5, __src1 1101 %endif 1102 %endif 1103 %if %0 >= 8 1104 %1 %5, __src2, %8 1105 %else 1106 %1 %5, __src2 1107 %endif 1108 %elif %0 >= 8 1109 __instr %5, %6, %7, %8 1110 %elif %0 == 7 1111 __instr %5, %6, %7 1112 %elif %0 == 6 1113 __instr %5, %6 1114 %else 1115 __instr %5 1116 %endif 1117%endmacro 1118 1119;%1 == instruction 1120;%2 == 1 if float, 0 if int 1121;%3 == 1 if non-destructive or 4-operand (xmm, xmm, xmm, imm), 0 otherwise 1122;%4 == 1 if commutative (i.e. doesn't matter which src arg is which), 0 if not 1123%macro AVX_INSTR 1-4 0, 1, 0 1124 %macro %1 1-9 fnord, fnord, fnord, fnord, %1, %2, %3, %4 1125 %ifidn %2, fnord 1126 RUN_AVX_INSTR %6, %7, %8, %9, %1 1127 %elifidn %3, fnord 1128 RUN_AVX_INSTR %6, %7, %8, %9, %1, %2 1129 %elifidn %4, fnord 1130 RUN_AVX_INSTR %6, %7, %8, %9, %1, %2, %3 1131 %elifidn %5, fnord 1132 RUN_AVX_INSTR %6, %7, %8, %9, %1, %2, %3, %4 1133 %else 1134 RUN_AVX_INSTR %6, %7, %8, %9, %1, %2, %3, %4, %5 1135 %endif 1136 %endmacro 1137%endmacro 1138 1139; Instructions with both VEX and non-VEX encodings 1140; Non-destructive instructions are written without parameters 1141AVX_INSTR addpd, 1, 0, 1 1142AVX_INSTR addps, 1, 0, 1 1143AVX_INSTR addsd, 1, 0, 1 1144AVX_INSTR addss, 1, 0, 1 1145AVX_INSTR addsubpd, 1, 0, 0 1146AVX_INSTR addsubps, 1, 0, 0 1147AVX_INSTR aesdec, 0, 0, 0 1148AVX_INSTR aesdeclast, 0, 0, 0 1149AVX_INSTR aesenc, 0, 0, 0 1150AVX_INSTR aesenclast, 0, 0, 0 1151AVX_INSTR aesimc 1152AVX_INSTR aeskeygenassist 1153AVX_INSTR andnpd, 1, 0, 0 1154AVX_INSTR andnps, 1, 0, 0 1155AVX_INSTR andpd, 1, 0, 1 1156AVX_INSTR andps, 1, 0, 1 1157AVX_INSTR blendpd, 1, 0, 0 1158AVX_INSTR blendps, 1, 0, 0 1159AVX_INSTR blendvpd, 1, 0, 0 1160AVX_INSTR blendvps, 1, 0, 0 1161AVX_INSTR cmppd, 1, 1, 0 1162AVX_INSTR cmpps, 1, 1, 0 1163AVX_INSTR cmpsd, 1, 1, 0 1164AVX_INSTR cmpss, 1, 1, 0 1165AVX_INSTR comisd 1166AVX_INSTR comiss 1167AVX_INSTR cvtdq2pd 1168AVX_INSTR cvtdq2ps 1169AVX_INSTR cvtpd2dq 1170AVX_INSTR cvtpd2ps 1171AVX_INSTR cvtps2dq 1172AVX_INSTR cvtps2pd 1173AVX_INSTR cvtsd2si 1174AVX_INSTR cvtsd2ss 1175AVX_INSTR cvtsi2sd 1176AVX_INSTR cvtsi2ss 1177AVX_INSTR cvtss2sd 1178AVX_INSTR cvtss2si 1179AVX_INSTR cvttpd2dq 1180AVX_INSTR cvttps2dq 1181AVX_INSTR cvttsd2si 1182AVX_INSTR cvttss2si 1183AVX_INSTR divpd, 1, 0, 0 1184AVX_INSTR divps, 1, 0, 0 1185AVX_INSTR divsd, 1, 0, 0 1186AVX_INSTR divss, 1, 0, 0 1187AVX_INSTR dppd, 1, 1, 0 1188AVX_INSTR dpps, 1, 1, 0 1189AVX_INSTR extractps 1190AVX_INSTR haddpd, 1, 0, 0 1191AVX_INSTR haddps, 1, 0, 0 1192AVX_INSTR hsubpd, 1, 0, 0 1193AVX_INSTR hsubps, 1, 0, 0 1194AVX_INSTR insertps, 1, 1, 0 1195AVX_INSTR lddqu 1196AVX_INSTR ldmxcsr 1197AVX_INSTR maskmovdqu 1198AVX_INSTR maxpd, 1, 0, 1 1199AVX_INSTR maxps, 1, 0, 1 1200AVX_INSTR maxsd, 1, 0, 1 1201AVX_INSTR maxss, 1, 0, 1 1202AVX_INSTR minpd, 1, 0, 1 1203AVX_INSTR minps, 1, 0, 1 1204AVX_INSTR minsd, 1, 0, 1 1205AVX_INSTR minss, 1, 0, 1 1206AVX_INSTR movapd 1207AVX_INSTR movaps 1208AVX_INSTR movd 1209AVX_INSTR movddup 1210AVX_INSTR movdqa 1211AVX_INSTR movdqu 1212AVX_INSTR movhlps, 1, 0, 0 1213AVX_INSTR movhpd, 1, 0, 0 1214AVX_INSTR movhps, 1, 0, 0 1215AVX_INSTR movlhps, 1, 0, 0 1216AVX_INSTR movlpd, 1, 0, 0 1217AVX_INSTR movlps, 1, 0, 0 1218AVX_INSTR movmskpd 1219AVX_INSTR movmskps 1220AVX_INSTR movntdq 1221AVX_INSTR movntdqa 1222AVX_INSTR movntpd 1223AVX_INSTR movntps 1224AVX_INSTR movq 1225AVX_INSTR movsd, 1, 0, 0 1226AVX_INSTR movshdup 1227AVX_INSTR movsldup 1228AVX_INSTR movss, 1, 0, 0 1229AVX_INSTR movupd 1230AVX_INSTR movups 1231AVX_INSTR mpsadbw, 0, 1, 0 1232AVX_INSTR mulpd, 1, 0, 1 1233AVX_INSTR mulps, 1, 0, 1 1234AVX_INSTR mulsd, 1, 0, 1 1235AVX_INSTR mulss, 1, 0, 1 1236AVX_INSTR orpd, 1, 0, 1 1237AVX_INSTR orps, 1, 0, 1 1238AVX_INSTR pabsb 1239AVX_INSTR pabsd 1240AVX_INSTR pabsw 1241AVX_INSTR packsswb, 0, 0, 0 1242AVX_INSTR packssdw, 0, 0, 0 1243AVX_INSTR packuswb, 0, 0, 0 1244AVX_INSTR packusdw, 0, 0, 0 1245AVX_INSTR paddb, 0, 0, 1 1246AVX_INSTR paddw, 0, 0, 1 1247AVX_INSTR paddd, 0, 0, 1 1248AVX_INSTR paddq, 0, 0, 1 1249AVX_INSTR paddsb, 0, 0, 1 1250AVX_INSTR paddsw, 0, 0, 1 1251AVX_INSTR paddusb, 0, 0, 1 1252AVX_INSTR paddusw, 0, 0, 1 1253AVX_INSTR palignr, 0, 1, 0 1254AVX_INSTR pand, 0, 0, 1 1255AVX_INSTR pandn, 0, 0, 0 1256AVX_INSTR pavgb, 0, 0, 1 1257AVX_INSTR pavgw, 0, 0, 1 1258AVX_INSTR pblendvb, 0, 0, 0 1259AVX_INSTR pblendw, 0, 1, 0 1260AVX_INSTR pclmulqdq, 0, 1, 0 1261AVX_INSTR pcmpestri 1262AVX_INSTR pcmpestrm 1263AVX_INSTR pcmpistri 1264AVX_INSTR pcmpistrm 1265AVX_INSTR pcmpeqb, 0, 0, 1 1266AVX_INSTR pcmpeqw, 0, 0, 1 1267AVX_INSTR pcmpeqd, 0, 0, 1 1268AVX_INSTR pcmpeqq, 0, 0, 1 1269AVX_INSTR pcmpgtb, 0, 0, 0 1270AVX_INSTR pcmpgtw, 0, 0, 0 1271AVX_INSTR pcmpgtd, 0, 0, 0 1272AVX_INSTR pcmpgtq, 0, 0, 0 1273AVX_INSTR pextrb 1274AVX_INSTR pextrd 1275AVX_INSTR pextrq 1276AVX_INSTR pextrw 1277AVX_INSTR phaddw, 0, 0, 0 1278AVX_INSTR phaddd, 0, 0, 0 1279AVX_INSTR phaddsw, 0, 0, 0 1280AVX_INSTR phminposuw 1281AVX_INSTR phsubw, 0, 0, 0 1282AVX_INSTR phsubd, 0, 0, 0 1283AVX_INSTR phsubsw, 0, 0, 0 1284AVX_INSTR pinsrb, 0, 1, 0 1285AVX_INSTR pinsrd, 0, 1, 0 1286AVX_INSTR pinsrq, 0, 1, 0 1287AVX_INSTR pinsrw, 0, 1, 0 1288AVX_INSTR pmaddwd, 0, 0, 1 1289AVX_INSTR pmaddubsw, 0, 0, 0 1290AVX_INSTR pmaxsb, 0, 0, 1 1291AVX_INSTR pmaxsw, 0, 0, 1 1292AVX_INSTR pmaxsd, 0, 0, 1 1293AVX_INSTR pmaxub, 0, 0, 1 1294AVX_INSTR pmaxuw, 0, 0, 1 1295AVX_INSTR pmaxud, 0, 0, 1 1296AVX_INSTR pminsb, 0, 0, 1 1297AVX_INSTR pminsw, 0, 0, 1 1298AVX_INSTR pminsd, 0, 0, 1 1299AVX_INSTR pminub, 0, 0, 1 1300AVX_INSTR pminuw, 0, 0, 1 1301AVX_INSTR pminud, 0, 0, 1 1302AVX_INSTR pmovmskb 1303AVX_INSTR pmovsxbw 1304AVX_INSTR pmovsxbd 1305AVX_INSTR pmovsxbq 1306AVX_INSTR pmovsxwd 1307AVX_INSTR pmovsxwq 1308AVX_INSTR pmovsxdq 1309AVX_INSTR pmovzxbw 1310AVX_INSTR pmovzxbd 1311AVX_INSTR pmovzxbq 1312AVX_INSTR pmovzxwd 1313AVX_INSTR pmovzxwq 1314AVX_INSTR pmovzxdq 1315AVX_INSTR pmuldq, 0, 0, 1 1316AVX_INSTR pmulhrsw, 0, 0, 1 1317AVX_INSTR pmulhuw, 0, 0, 1 1318AVX_INSTR pmulhw, 0, 0, 1 1319AVX_INSTR pmullw, 0, 0, 1 1320AVX_INSTR pmulld, 0, 0, 1 1321AVX_INSTR pmuludq, 0, 0, 1 1322AVX_INSTR por, 0, 0, 1 1323AVX_INSTR psadbw, 0, 0, 1 1324AVX_INSTR pshufb, 0, 0, 0 1325AVX_INSTR pshufd 1326AVX_INSTR pshufhw 1327AVX_INSTR pshuflw 1328AVX_INSTR psignb, 0, 0, 0 1329AVX_INSTR psignw, 0, 0, 0 1330AVX_INSTR psignd, 0, 0, 0 1331AVX_INSTR psllw, 0, 0, 0 1332AVX_INSTR pslld, 0, 0, 0 1333AVX_INSTR psllq, 0, 0, 0 1334AVX_INSTR pslldq, 0, 0, 0 1335AVX_INSTR psraw, 0, 0, 0 1336AVX_INSTR psrad, 0, 0, 0 1337AVX_INSTR psrlw, 0, 0, 0 1338AVX_INSTR psrld, 0, 0, 0 1339AVX_INSTR psrlq, 0, 0, 0 1340AVX_INSTR psrldq, 0, 0, 0 1341AVX_INSTR psubb, 0, 0, 0 1342AVX_INSTR psubw, 0, 0, 0 1343AVX_INSTR psubd, 0, 0, 0 1344AVX_INSTR psubq, 0, 0, 0 1345AVX_INSTR psubsb, 0, 0, 0 1346AVX_INSTR psubsw, 0, 0, 0 1347AVX_INSTR psubusb, 0, 0, 0 1348AVX_INSTR psubusw, 0, 0, 0 1349AVX_INSTR ptest 1350AVX_INSTR punpckhbw, 0, 0, 0 1351AVX_INSTR punpckhwd, 0, 0, 0 1352AVX_INSTR punpckhdq, 0, 0, 0 1353AVX_INSTR punpckhqdq, 0, 0, 0 1354AVX_INSTR punpcklbw, 0, 0, 0 1355AVX_INSTR punpcklwd, 0, 0, 0 1356AVX_INSTR punpckldq, 0, 0, 0 1357AVX_INSTR punpcklqdq, 0, 0, 0 1358AVX_INSTR pxor, 0, 0, 1 1359AVX_INSTR rcpps, 1, 0, 0 1360AVX_INSTR rcpss, 1, 0, 0 1361AVX_INSTR roundpd 1362AVX_INSTR roundps 1363AVX_INSTR roundsd 1364AVX_INSTR roundss 1365AVX_INSTR rsqrtps, 1, 0, 0 1366AVX_INSTR rsqrtss, 1, 0, 0 1367AVX_INSTR shufpd, 1, 1, 0 1368AVX_INSTR shufps, 1, 1, 0 1369AVX_INSTR sqrtpd, 1, 0, 0 1370AVX_INSTR sqrtps, 1, 0, 0 1371AVX_INSTR sqrtsd, 1, 0, 0 1372AVX_INSTR sqrtss, 1, 0, 0 1373AVX_INSTR stmxcsr 1374AVX_INSTR subpd, 1, 0, 0 1375AVX_INSTR subps, 1, 0, 0 1376AVX_INSTR subsd, 1, 0, 0 1377AVX_INSTR subss, 1, 0, 0 1378AVX_INSTR ucomisd 1379AVX_INSTR ucomiss 1380AVX_INSTR unpckhpd, 1, 0, 0 1381AVX_INSTR unpckhps, 1, 0, 0 1382AVX_INSTR unpcklpd, 1, 0, 0 1383AVX_INSTR unpcklps, 1, 0, 0 1384AVX_INSTR xorpd, 1, 0, 1 1385AVX_INSTR xorps, 1, 0, 1 1386 1387; 3DNow instructions, for sharing code between AVX, SSE and 3DN 1388AVX_INSTR pfadd, 1, 0, 1 1389AVX_INSTR pfsub, 1, 0, 0 1390AVX_INSTR pfmul, 1, 0, 1 1391 1392; base-4 constants for shuffles 1393%assign i 0 1394%rep 256 1395 %assign j ((i>>6)&3)*1000 + ((i>>4)&3)*100 + ((i>>2)&3)*10 + (i&3) 1396 %if j < 10 1397 CAT_XDEFINE q000, j, i 1398 %elif j < 100 1399 CAT_XDEFINE q00, j, i 1400 %elif j < 1000 1401 CAT_XDEFINE q0, j, i 1402 %else 1403 CAT_XDEFINE q, j, i 1404 %endif 1405%assign i i+1 1406%endrep 1407%undef i 1408%undef j 1409 1410; tzcnt is equivalent to "rep bsf" and is backwards-compatible with bsf. 1411; This lets us use tzcnt without bumping the yasm version requirement yet. 1412%define tzcnt rep bsf 1413 1414; convert FMA4 to FMA3 if possible 1415%macro FMA4_INSTR 4 1416 %macro %1 4-8 %1, %2, %3, %4 1417 %if cpuflag(fma4) 1418 v%5 %1, %2, %3, %4 1419 %elifidn %1, %2 1420 v%6 %1, %4, %3 ; %1 = %1 * %3 + %4 1421 %elifidn %1, %3 1422 v%7 %1, %2, %4 ; %1 = %2 * %1 + %4 1423 %elifidn %1, %4 1424 v%8 %1, %2, %3 ; %1 = %2 * %3 + %1 1425 %else 1426 %error fma3 emulation of ``%5 %1, %2, %3, %4'' is not supported 1427 %endif 1428 %endmacro 1429%endmacro 1430 1431FMA4_INSTR fmaddpd, fmadd132pd, fmadd213pd, fmadd231pd 1432FMA4_INSTR fmaddps, fmadd132ps, fmadd213ps, fmadd231ps 1433FMA4_INSTR fmaddsd, fmadd132sd, fmadd213sd, fmadd231sd 1434FMA4_INSTR fmaddss, fmadd132ss, fmadd213ss, fmadd231ss 1435 1436FMA4_INSTR fmaddsubpd, fmaddsub132pd, fmaddsub213pd, fmaddsub231pd 1437FMA4_INSTR fmaddsubps, fmaddsub132ps, fmaddsub213ps, fmaddsub231ps 1438FMA4_INSTR fmsubaddpd, fmsubadd132pd, fmsubadd213pd, fmsubadd231pd 1439FMA4_INSTR fmsubaddps, fmsubadd132ps, fmsubadd213ps, fmsubadd231ps 1440 1441FMA4_INSTR fmsubpd, fmsub132pd, fmsub213pd, fmsub231pd 1442FMA4_INSTR fmsubps, fmsub132ps, fmsub213ps, fmsub231ps 1443FMA4_INSTR fmsubsd, fmsub132sd, fmsub213sd, fmsub231sd 1444FMA4_INSTR fmsubss, fmsub132ss, fmsub213ss, fmsub231ss 1445 1446FMA4_INSTR fnmaddpd, fnmadd132pd, fnmadd213pd, fnmadd231pd 1447FMA4_INSTR fnmaddps, fnmadd132ps, fnmadd213ps, fnmadd231ps 1448FMA4_INSTR fnmaddsd, fnmadd132sd, fnmadd213sd, fnmadd231sd 1449FMA4_INSTR fnmaddss, fnmadd132ss, fnmadd213ss, fnmadd231ss 1450 1451FMA4_INSTR fnmsubpd, fnmsub132pd, fnmsub213pd, fnmsub231pd 1452FMA4_INSTR fnmsubps, fnmsub132ps, fnmsub213ps, fnmsub231ps 1453FMA4_INSTR fnmsubsd, fnmsub132sd, fnmsub213sd, fnmsub231sd 1454FMA4_INSTR fnmsubss, fnmsub132ss, fnmsub213ss, fnmsub231ss 1455 1456; workaround: vpbroadcastq is broken in x86_32 due to a yasm bug 1457%if ARCH_X86_64 == 0 1458%macro vpbroadcastq 2 1459%if sizeof%1 == 16 1460 movddup %1, %2 1461%else 1462 vbroadcastsd %1, %2 1463%endif 1464%endmacro 1465%endif 1466