Deleted Added
full compact
22c22
< /* $FreeBSD: head/contrib/gcc/config/i386/i386.c 237021 2012-06-13 20:21:08Z pfg $ */
---
> /* $FreeBSD: head/contrib/gcc/config/i386/i386.c 251212 2013-06-01 01:02:24Z pfg $ */
550a551,615
> struct processor_costs amdfam10_cost = {
> COSTS_N_INSNS (1), /* cost of an add instruction */
> COSTS_N_INSNS (2), /* cost of a lea instruction */
> COSTS_N_INSNS (1), /* variable shift costs */
> COSTS_N_INSNS (1), /* constant shift costs */
> {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
> COSTS_N_INSNS (4), /* HI */
> COSTS_N_INSNS (3), /* SI */
> COSTS_N_INSNS (4), /* DI */
> COSTS_N_INSNS (5)}, /* other */
> 0, /* cost of multiply per each bit set */
> {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
> COSTS_N_INSNS (35), /* HI */
> COSTS_N_INSNS (51), /* SI */
> COSTS_N_INSNS (83), /* DI */
> COSTS_N_INSNS (83)}, /* other */
> COSTS_N_INSNS (1), /* cost of movsx */
> COSTS_N_INSNS (1), /* cost of movzx */
> 8, /* "large" insn */
> 9, /* MOVE_RATIO */
> 4, /* cost for loading QImode using movzbl */
> {3, 4, 3}, /* cost of loading integer registers
> in QImode, HImode and SImode.
> Relative to reg-reg move (2). */
> {3, 4, 3}, /* cost of storing integer registers */
> 4, /* cost of reg,reg fld/fst */
> {4, 4, 12}, /* cost of loading fp registers
> in SFmode, DFmode and XFmode */
> {6, 6, 8}, /* cost of storing fp registers
> in SFmode, DFmode and XFmode */
> 2, /* cost of moving MMX register */
> {3, 3}, /* cost of loading MMX registers
> in SImode and DImode */
> {4, 4}, /* cost of storing MMX registers
> in SImode and DImode */
> 2, /* cost of moving SSE register */
> {4, 4, 3}, /* cost of loading SSE registers
> in SImode, DImode and TImode */
> {4, 4, 5}, /* cost of storing SSE registers
> in SImode, DImode and TImode */
> 3, /* MMX or SSE register to integer */
> /* On K8
> MOVD reg64, xmmreg Double FSTORE 4
> MOVD reg32, xmmreg Double FSTORE 4
> On AMDFAM10
> MOVD reg64, xmmreg Double FADD 3
> 1/1 1/1
> MOVD reg32, xmmreg Double FADD 3
> 1/1 1/1 */
> 64, /* size of prefetch block */
> /* New AMD processors never drop prefetches; if they cannot be performed
> immediately, they are queued. We set number of simultaneous prefetches
> to a large constant to reflect this (it probably is not a good idea not
> to limit number of prefetches at all, as their execution also takes some
> time). */
> 100, /* number of parallel prefetches */
> 5, /* Branch cost */
> COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
> COSTS_N_INSNS (4), /* cost of FMUL instruction. */
> COSTS_N_INSNS (19), /* cost of FDIV instruction. */
> COSTS_N_INSNS (2), /* cost of FABS instruction. */
> COSTS_N_INSNS (2), /* cost of FCHS instruction. */
> COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
> };
>
836a902
> #define m_AMDFAM10 (1<<PROCESSOR_AMDFAM10)
841a908
> #define m_ATHLON_K8_AMDFAM10 (m_K8 | m_ATHLON | m_AMDFAM10)
849,850c916,919
< const int x86_use_leave = m_386 | m_K6_GEODE | m_ATHLON_K8 | m_CORE2 | m_GENERIC64;
< const int x86_push_memory = m_386 | m_K6_GEODE | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
---
> const int x86_use_leave = m_386 | m_K6_GEODE | m_ATHLON_K8_AMDFAM10 | m_CORE2
> | m_GENERIC64;
> const int x86_push_memory = m_386 | m_K6_GEODE | m_ATHLON_K8_AMDFAM10 | m_PENT4
> | m_NOCONA | m_CORE2 | m_GENERIC;
852c921,923
< const int x86_movx = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC | m_GEODE /* m_386 | m_K6 */;
---
> /* Enable to zero extend integer registers to avoid partial dependencies */
> const int x86_movx = m_ATHLON_K8_AMDFAM10 | m_PPRO | m_PENT4 | m_NOCONA
> | m_CORE2 | m_GENERIC | m_GEODE /* m_386 | m_K6 */;
855,858c926,932
< const int x86_unroll_strlen = m_486 | m_PENT | m_PPRO | m_ATHLON_K8 | m_K6 | m_CORE2 | m_GENERIC;
< const int x86_cmove = m_PPRO | m_GEODE | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
< const int x86_3dnow_a = m_ATHLON_K8;
< const int x86_deep_branch = m_PPRO | m_K6_GEODE | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
---
> const int x86_unroll_strlen = m_486 | m_PENT | m_PPRO | m_ATHLON_K8_AMDFAM10
> | m_K6 | m_CORE2 | m_GENERIC;
> const int x86_cmove = m_PPRO | m_GEODE | m_ATHLON_K8_AMDFAM10 | m_PENT4
> | m_NOCONA;
> const int x86_3dnow_a = m_ATHLON_K8_AMDFAM10;
> const int x86_deep_branch = m_PPRO | m_K6_GEODE | m_ATHLON_K8_AMDFAM10
> | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
864c938,939
< const int x86_use_sahf = m_PPRO | m_K6_GEODE | m_PENT4 | m_NOCONA | m_GENERIC32; /*m_GENERIC | m_ATHLON_K8 ? */
---
> const int x86_use_sahf = m_PPRO | m_K6_GEODE | m_PENT4 | m_NOCONA | m_GENERIC32;
> /*m_GENERIC | m_ATHLON_K8 ? */
876c951,952
< const int x86_use_simode_fiop = ~(m_PPRO | m_ATHLON_K8 | m_PENT | m_CORE2 | m_GENERIC);
---
> const int x86_use_simode_fiop = ~(m_PPRO | m_ATHLON_K8_AMDFAM10 | m_PENT
> | m_CORE2 | m_GENERIC);
882c958,960
< const int x86_promote_QImode = m_K6_GEODE | m_PENT | m_386 | m_486 | m_ATHLON_K8 | m_CORE2 | m_GENERIC; /* m_PENT4 ? */
---
> const int x86_promote_QImode = m_K6_GEODE | m_PENT | m_386 | m_486
> | m_ATHLON_K8_AMDFAM10 | m_CORE2 | m_GENERIC;
> /* m_PENT4 ? */
892,899c970,994
< const int x86_sub_esp_4 = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
< const int x86_sub_esp_8 = m_ATHLON_K8 | m_PPRO | m_386 | m_486 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
< const int x86_add_esp_4 = m_ATHLON_K8 | m_K6_GEODE | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
< const int x86_add_esp_8 = m_ATHLON_K8 | m_PPRO | m_K6_GEODE | m_386 | m_486 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
< const int x86_integer_DFmode_moves = ~(m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC | m_GEODE);
< const int x86_partial_reg_dependency = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
< const int x86_memory_mismatch_stall = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
< const int x86_accumulate_outgoing_args = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC;
---
> /* Enable if add/sub rsp is preferred over 1 or 2 push/pop */
> const int x86_sub_esp_4 = m_ATHLON_K8_AMDFAM10 | m_PPRO | m_PENT4 | m_NOCONA
> | m_CORE2 | m_GENERIC;
> const int x86_sub_esp_8 = m_ATHLON_K8_AMDFAM10 | m_PPRO | m_386 | m_486
> | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
> const int x86_add_esp_4 = m_ATHLON_K8_AMDFAM10 | m_K6_GEODE | m_PENT4 | m_NOCONA
> | m_CORE2 | m_GENERIC;
> const int x86_add_esp_8 = m_ATHLON_K8_AMDFAM10 | m_PPRO | m_K6_GEODE | m_386
> | m_486 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
> /* Enable if integer moves are preferred for DFmode copies */
> const int x86_integer_DFmode_moves = ~(m_ATHLON_K8_AMDFAM10 | m_PENT4 | m_NOCONA
> | m_PPRO | m_CORE2 | m_GENERIC | m_GEODE);
> const int x86_partial_reg_dependency = m_ATHLON_K8_AMDFAM10 | m_PENT4 | m_NOCONA
> | m_CORE2 | m_GENERIC;
> const int x86_memory_mismatch_stall = m_ATHLON_K8_AMDFAM10 | m_PENT4 | m_NOCONA
> | m_CORE2 | m_GENERIC;
> /* If ACCUMULATE_OUTGOING_ARGS is enabled, the maximum amount of space required
> for outgoing arguments will be computed and placed into the variable
> `current_function_outgoing_args_size'. No space will be pushed onto the stack
> for each call; instead, the function prologue should increase the stack frame
> size by this amount. Setting both PUSH_ARGS and ACCUMULATE_OUTGOING_ARGS is
> not proper. */
> const int x86_accumulate_outgoing_args = m_ATHLON_K8_AMDFAM10 | m_PENT4
> | m_NOCONA | m_PPRO | m_CORE2
> | m_GENERIC;
903c998,1000
< const int x86_arch_always_fancy_math_387 = m_PENT | m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
---
> const int x86_arch_always_fancy_math_387 = m_PENT | m_PPRO
> | m_ATHLON_K8_AMDFAM10 | m_PENT4
> | m_NOCONA | m_CORE2 | m_GENERIC;
913c1010,1011
< const int x86_sse_partial_reg_dependency = m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC;
---
> const int x86_sse_partial_reg_dependency = m_PENT4 | m_NOCONA | m_PPRO | m_CORE2
> | m_GENERIC | m_AMDFAM10;
919c1017,1067
< const int x86_sse_typeless_stores = m_ATHLON_K8;
---
> /* Code generation for scalar reg-reg moves of single and double precision data:
> if (x86_sse_partial_reg_dependency == true | x86_sse_split_regs == true)
> movaps reg, reg
> else
> movss reg, reg
> if (x86_sse_partial_reg_dependency == true)
> movapd reg, reg
> else
> movsd reg, reg
>
> Code generation for scalar loads of double precision data:
> if (x86_sse_split_regs == true)
> movlpd mem, reg (gas syntax)
> else
> movsd mem, reg
>
> Code generation for unaligned packed loads of single precision data
> (x86_sse_unaligned_move_optimal overrides x86_sse_partial_reg_dependency):
> if (x86_sse_unaligned_move_optimal)
> movups mem, reg
>
> if (x86_sse_partial_reg_dependency == true)
> {
> xorps reg, reg
> movlps mem, reg
> movhps mem+8, reg
> }
> else
> {
> movlps mem, reg
> movhps mem+8, reg
> }
>
> Code generation for unaligned packed loads of double precision data
> (x86_sse_unaligned_move_optimal overrides x86_sse_split_regs):
> if (x86_sse_unaligned_move_optimal)
> movupd mem, reg
>
> if (x86_sse_split_regs == true)
> {
> movlpd mem, reg
> movhpd mem+8, reg
> }
> else
> {
> movsd mem, reg
> movhpd mem+8, reg
> }
> */
> const int x86_sse_unaligned_move_optimal = m_AMDFAM10;
> const int x86_sse_typeless_stores = m_ATHLON_K8_AMDFAM10;
921c1069
< const int x86_use_ffreep = m_ATHLON_K8;
---
> const int x86_use_ffreep = m_ATHLON_K8_AMDFAM10;
929c1077,1078
< const int x86_ext_80387_constants = m_K6_GEODE | m_ATHLON | m_PENT4 | m_NOCONA | m_CORE2 | m_PPRO | m_GENERIC32;
---
> const int x86_ext_80387_constants = m_K6_GEODE | m_ATHLON_K8 | m_PENT4
> | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC;
932,934c1081,1085
< const int x86_four_jump_limit = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
< const int x86_schedule = m_PPRO | m_ATHLON_K8 | m_K6_GEODE | m_PENT | m_CORE2 | m_GENERIC;
< const int x86_use_bt = m_ATHLON_K8;
---
> const int x86_four_jump_limit = m_PPRO | m_ATHLON_K8_AMDFAM10 | m_PENT4
> | m_NOCONA | m_CORE2 | m_GENERIC;
> const int x86_schedule = m_PPRO | m_ATHLON_K8_AMDFAM10 | m_K6_GEODE | m_PENT
> | m_CORE2 | m_GENERIC;
> const int x86_use_bt = m_ATHLON_K8_AMDFAM10;
939,940d1089
< /* Compare and exchange 16 bytes was added for nocona. */
< const int x86_cmpxchg16b = m_NOCONA | m_CORE2;
943c1092
< const int x86_pad_returns = m_ATHLON_K8 | m_CORE2 | m_GENERIC;
---
> const int x86_pad_returns = m_ATHLON_K8_AMDFAM10 | m_CORE2 | m_GENERIC;
1159a1309,1311
> /* true if cmpxchg16b is supported. */
> int x86_cmpxchg16b;
>
1514,1515c1666,1667
< target_flags &= ~(MASK_SSE2 | MASK_SSE3 | MASK_SSSE3);
< target_flags_explicit |= MASK_SSE2 | MASK_SSE3 | MASK_SSSE3;
---
> target_flags &= ~(MASK_SSE2 | MASK_SSE3 | MASK_SSSE3 | MASK_SSE4A);
> target_flags_explicit |= MASK_SSE2 | MASK_SSE3 | MASK_SSSE3 | MASK_SSE4A;
1522,1523c1674,1675
< target_flags &= ~(MASK_SSE3 | MASK_SSSE3);
< target_flags_explicit |= MASK_SSE3 | MASK_SSSE3;
---
> target_flags &= ~(MASK_SSE3 | MASK_SSSE3 | MASK_SSE4A);
> target_flags_explicit |= MASK_SSE3 | MASK_SSSE3 | MASK_SSE4A;
1530,1531c1682,1683
< target_flags &= ~MASK_SSSE3;
< target_flags_explicit |= MASK_SSSE3;
---
> target_flags &= ~(MASK_SSSE3 | MASK_SSE4A);
> target_flags_explicit |= MASK_SSSE3 | MASK_SSE4A;
1583c1735,1736
< {&generic64_cost, 0, 0, 16, 7, 16, 7, 16}
---
> {&generic64_cost, 0, 0, 16, 7, 16, 7, 16},
> {&amdfam10_cost, 0, 0, 32, 24, 32, 7, 32}
1601c1754,1758
< PTA_SSSE3 = 256
---
> PTA_SSSE3 = 256,
> PTA_CX16 = 512,
> PTA_POPCNT = 1024,
> PTA_ABM = 2048,
> PTA_SSE4A = 4096
1628c1785
< | PTA_MMX | PTA_PREFETCH_SSE},
---
> | PTA_MMX | PTA_PREFETCH_SSE | PTA_CX16},
1631c1788
< | PTA_PREFETCH_SSE},
---
> | PTA_PREFETCH_SSE | PTA_CX16},
1665a1823,1830
> {"amdfam10", PROCESSOR_AMDFAM10, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
> | PTA_64BIT | PTA_3DNOW_A | PTA_SSE
> | PTA_SSE2 | PTA_SSE3 | PTA_POPCNT
> | PTA_ABM | PTA_SSE4A | PTA_CX16},
> {"barcelona", PROCESSOR_AMDFAM10, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
> | PTA_64BIT | PTA_3DNOW_A | PTA_SSE
> | PTA_SSE2 | PTA_SSE3 | PTA_POPCNT
> | PTA_ABM | PTA_SSE4A | PTA_CX16},
1827a1993,2003
> if (processor_alias_table[i].flags & PTA_CX16)
> x86_cmpxchg16b = true;
> if (processor_alias_table[i].flags & PTA_POPCNT
> && !(target_flags_explicit & MASK_POPCNT))
> target_flags |= MASK_POPCNT;
> if (processor_alias_table[i].flags & PTA_ABM
> && !(target_flags_explicit & MASK_ABM))
> target_flags |= MASK_ABM;
> if (processor_alias_table[i].flags & PTA_SSE4A
> && !(target_flags_explicit & MASK_SSE4A))
> target_flags |= MASK_SSE4A;
2005a2182,2185
> /* Turn on SSE3 builtins for -msse4a. */
> if (TARGET_SSE4A)
> target_flags |= MASK_SSE3;
>
2024a2205,2208
> /* Turn on POPCNT builtins for -mabm. */
> if (TARGET_ABM)
> target_flags |= MASK_POPCNT;
>
9250,9251c9434,9435
< {
< rtx zero;
---
> {
> rtx zero;
9252a9437,9444
> if (TARGET_SSE_UNALIGNED_MOVE_OPTIMAL)
> {
> op0 = gen_lowpart (V2DFmode, op0);
> op1 = gen_lowpart (V2DFmode, op1);
> emit_insn (gen_sse2_movupd (op0, op1));
> return;
> }
>
9279c9471,9479
< {
---
> {
> if (TARGET_SSE_UNALIGNED_MOVE_OPTIMAL)
> {
> op0 = gen_lowpart (V4SFmode, op0);
> op1 = gen_lowpart (V4SFmode, op1);
> emit_insn (gen_sse_movups (op0, op1));
> return;
> }
>
13835a14036
> case PROCESSOR_AMDFAM10:
14033a14235
> case PROCESSOR_AMDFAM10:
14746a14949,14956
> /* AMDFAM10 - SSE4A New Instructions. */
> IX86_BUILTIN_MOVNTSD,
> IX86_BUILTIN_MOVNTSS,
> IX86_BUILTIN_EXTRQI,
> IX86_BUILTIN_EXTRQ,
> IX86_BUILTIN_INSERTQI,
> IX86_BUILTIN_INSERTQ,
>
15470a15681,15692
> tree v2di_ftype_v2di_unsigned_unsigned
> = build_function_type_list (V2DI_type_node, V2DI_type_node,
> unsigned_type_node, unsigned_type_node,
> NULL_TREE);
> tree v2di_ftype_v2di_v2di_unsigned_unsigned
> = build_function_type_list (V2DI_type_node, V2DI_type_node, V2DI_type_node,
> unsigned_type_node, unsigned_type_node,
> NULL_TREE);
> tree v2di_ftype_v2di_v16qi
> = build_function_type_list (V2DI_type_node, V2DI_type_node, V16QI_type_node,
> NULL_TREE);
>
15806a16029,16042
> /* AMDFAM10 SSE4A New built-ins */
> def_builtin (MASK_SSE4A, "__builtin_ia32_movntsd",
> void_ftype_pdouble_v2df, IX86_BUILTIN_MOVNTSD);
> def_builtin (MASK_SSE4A, "__builtin_ia32_movntss",
> void_ftype_pfloat_v4sf, IX86_BUILTIN_MOVNTSS);
> def_builtin (MASK_SSE4A, "__builtin_ia32_extrqi",
> v2di_ftype_v2di_unsigned_unsigned, IX86_BUILTIN_EXTRQI);
> def_builtin (MASK_SSE4A, "__builtin_ia32_extrq",
> v2di_ftype_v2di_v16qi, IX86_BUILTIN_EXTRQ);
> def_builtin (MASK_SSE4A, "__builtin_ia32_insertqi",
> v2di_ftype_v2di_v2di_unsigned_unsigned, IX86_BUILTIN_INSERTQI);
> def_builtin (MASK_SSE4A, "__builtin_ia32_insertq",
> v2di_ftype_v2di_v2di, IX86_BUILTIN_INSERTQ);
>
16303,16305c16539,16541
< tree arg0, arg1, arg2;
< rtx op0, op1, op2, pat;
< enum machine_mode tmode, mode0, mode1, mode2, mode3;
---
> tree arg0, arg1, arg2, arg3;
> rtx op0, op1, op2, op3, pat;
> enum machine_mode tmode, mode0, mode1, mode2, mode3, mode4;
16820a17057,17164
> case IX86_BUILTIN_MOVNTSD:
> return ix86_expand_store_builtin (CODE_FOR_sse4a_vmmovntv2df, arglist);
>
> case IX86_BUILTIN_MOVNTSS:
> return ix86_expand_store_builtin (CODE_FOR_sse4a_vmmovntv4sf, arglist);
>
> case IX86_BUILTIN_INSERTQ:
> case IX86_BUILTIN_EXTRQ:
> icode = (fcode == IX86_BUILTIN_EXTRQ
> ? CODE_FOR_sse4a_extrq
> : CODE_FOR_sse4a_insertq);
> arg0 = TREE_VALUE (arglist);
> arg1 = TREE_VALUE (TREE_CHAIN (arglist));
> op0 = expand_normal (arg0);
> op1 = expand_normal (arg1);
> tmode = insn_data[icode].operand[0].mode;
> mode1 = insn_data[icode].operand[1].mode;
> mode2 = insn_data[icode].operand[2].mode;
> if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
> op0 = copy_to_mode_reg (mode1, op0);
> if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
> op1 = copy_to_mode_reg (mode2, op1);
> if (optimize || target == 0
> || GET_MODE (target) != tmode
> || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
> target = gen_reg_rtx (tmode);
> pat = GEN_FCN (icode) (target, op0, op1);
> if (! pat)
> return NULL_RTX;
> emit_insn (pat);
> return target;
>
> case IX86_BUILTIN_EXTRQI:
> icode = CODE_FOR_sse4a_extrqi;
> arg0 = TREE_VALUE (arglist);
> arg1 = TREE_VALUE (TREE_CHAIN (arglist));
> arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
> op0 = expand_normal (arg0);
> op1 = expand_normal (arg1);
> op2 = expand_normal (arg2);
> tmode = insn_data[icode].operand[0].mode;
> mode1 = insn_data[icode].operand[1].mode;
> mode2 = insn_data[icode].operand[2].mode;
> mode3 = insn_data[icode].operand[3].mode;
> if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
> op0 = copy_to_mode_reg (mode1, op0);
> if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
> {
> error ("index mask must be an immediate");
> return gen_reg_rtx (tmode);
> }
> if (! (*insn_data[icode].operand[3].predicate) (op2, mode3))
> {
> error ("length mask must be an immediate");
> return gen_reg_rtx (tmode);
> }
> if (optimize || target == 0
> || GET_MODE (target) != tmode
> || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
> target = gen_reg_rtx (tmode);
> pat = GEN_FCN (icode) (target, op0, op1, op2);
> if (! pat)
> return NULL_RTX;
> emit_insn (pat);
> return target;
>
> case IX86_BUILTIN_INSERTQI:
> icode = CODE_FOR_sse4a_insertqi;
> arg0 = TREE_VALUE (arglist);
> arg1 = TREE_VALUE (TREE_CHAIN (arglist));
> arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
> arg3 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist))));
> op0 = expand_normal (arg0);
> op1 = expand_normal (arg1);
> op2 = expand_normal (arg2);
> op3 = expand_normal (arg3);
> tmode = insn_data[icode].operand[0].mode;
> mode1 = insn_data[icode].operand[1].mode;
> mode2 = insn_data[icode].operand[2].mode;
> mode3 = insn_data[icode].operand[3].mode;
> mode4 = insn_data[icode].operand[4].mode;
>
> if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
> op0 = copy_to_mode_reg (mode1, op0);
>
> if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
> op1 = copy_to_mode_reg (mode2, op1);
>
> if (! (*insn_data[icode].operand[3].predicate) (op2, mode3))
> {
> error ("index mask must be an immediate");
> return gen_reg_rtx (tmode);
> }
> if (! (*insn_data[icode].operand[4].predicate) (op3, mode4))
> {
> error ("length mask must be an immediate");
> return gen_reg_rtx (tmode);
> }
> if (optimize || target == 0
> || GET_MODE (target) != tmode
> || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
> target = gen_reg_rtx (tmode);
> pat = GEN_FCN (icode) (target, op0, op1, op2, op3);
> if (! pat)
> return NULL_RTX;
> emit_insn (pat);
> return target;
>