Deleted Added
full compact
i386.c (237021) i386.c (251212)
1/* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify
8it under the terms of the GNU General Public License as published by

--- 5 unchanged lines hidden (view full) ---

14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15GNU General Public License for more details.
16
17You should have received a copy of the GNU General Public License
18along with GCC; see the file COPYING. If not, write to
19the Free Software Foundation, 51 Franklin Street, Fifth Floor,
20Boston, MA 02110-1301, USA. */
21
1/* Subroutines used for code generation on IA-32.
2 Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify
8it under the terms of the GNU General Public License as published by

--- 5 unchanged lines hidden (view full) ---

14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15GNU General Public License for more details.
16
17You should have received a copy of the GNU General Public License
18along with GCC; see the file COPYING. If not, write to
19the Free Software Foundation, 51 Franklin Street, Fifth Floor,
20Boston, MA 02110-1301, USA. */
21
22/* $FreeBSD: head/contrib/gcc/config/i386/i386.c 237021 2012-06-13 20:21:08Z pfg $ */
22/* $FreeBSD: head/contrib/gcc/config/i386/i386.c 251212 2013-06-01 01:02:24Z pfg $ */
23
24#include "config.h"
25#include "system.h"
26#include "coretypes.h"
27#include "tm.h"
28#include "rtl.h"
29#include "tree.h"
30#include "tm_p.h"

--- 512 unchanged lines hidden (view full) ---

543 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
544 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
545 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
546 COSTS_N_INSNS (2), /* cost of FABS instruction. */
547 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
548 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
549};
550
23
24#include "config.h"
25#include "system.h"
26#include "coretypes.h"
27#include "tm.h"
28#include "rtl.h"
29#include "tree.h"
30#include "tm_p.h"

--- 512 unchanged lines hidden (view full) ---

543 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
544 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
545 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
546 COSTS_N_INSNS (2), /* cost of FABS instruction. */
547 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
548 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
549};
550
551struct processor_costs amdfam10_cost = {
552 COSTS_N_INSNS (1), /* cost of an add instruction */
553 COSTS_N_INSNS (2), /* cost of a lea instruction */
554 COSTS_N_INSNS (1), /* variable shift costs */
555 COSTS_N_INSNS (1), /* constant shift costs */
556 {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
557 COSTS_N_INSNS (4), /* HI */
558 COSTS_N_INSNS (3), /* SI */
559 COSTS_N_INSNS (4), /* DI */
560 COSTS_N_INSNS (5)}, /* other */
561 0, /* cost of multiply per each bit set */
562 {COSTS_N_INSNS (19), /* cost of a divide/mod for QI */
563 COSTS_N_INSNS (35), /* HI */
564 COSTS_N_INSNS (51), /* SI */
565 COSTS_N_INSNS (83), /* DI */
566 COSTS_N_INSNS (83)}, /* other */
567 COSTS_N_INSNS (1), /* cost of movsx */
568 COSTS_N_INSNS (1), /* cost of movzx */
569 8, /* "large" insn */
570 9, /* MOVE_RATIO */
571 4, /* cost for loading QImode using movzbl */
572 {3, 4, 3}, /* cost of loading integer registers
573 in QImode, HImode and SImode.
574 Relative to reg-reg move (2). */
575 {3, 4, 3}, /* cost of storing integer registers */
576 4, /* cost of reg,reg fld/fst */
577 {4, 4, 12}, /* cost of loading fp registers
578 in SFmode, DFmode and XFmode */
579 {6, 6, 8}, /* cost of storing fp registers
580 in SFmode, DFmode and XFmode */
581 2, /* cost of moving MMX register */
582 {3, 3}, /* cost of loading MMX registers
583 in SImode and DImode */
584 {4, 4}, /* cost of storing MMX registers
585 in SImode and DImode */
586 2, /* cost of moving SSE register */
587 {4, 4, 3}, /* cost of loading SSE registers
588 in SImode, DImode and TImode */
589 {4, 4, 5}, /* cost of storing SSE registers
590 in SImode, DImode and TImode */
591 3, /* MMX or SSE register to integer */
592 /* On K8
593 MOVD reg64, xmmreg Double FSTORE 4
594 MOVD reg32, xmmreg Double FSTORE 4
595 On AMDFAM10
596 MOVD reg64, xmmreg Double FADD 3
597 1/1 1/1
598 MOVD reg32, xmmreg Double FADD 3
599 1/1 1/1 */
600 64, /* size of prefetch block */
601 /* New AMD processors never drop prefetches; if they cannot be performed
602 immediately, they are queued. We set number of simultaneous prefetches
603 to a large constant to reflect this (it probably is not a good idea not
604 to limit number of prefetches at all, as their execution also takes some
605 time). */
606 100, /* number of parallel prefetches */
607 5, /* Branch cost */
608 COSTS_N_INSNS (4), /* cost of FADD and FSUB insns. */
609 COSTS_N_INSNS (4), /* cost of FMUL instruction. */
610 COSTS_N_INSNS (19), /* cost of FDIV instruction. */
611 COSTS_N_INSNS (2), /* cost of FABS instruction. */
612 COSTS_N_INSNS (2), /* cost of FCHS instruction. */
613 COSTS_N_INSNS (35), /* cost of FSQRT instruction. */
614};
615
551static const
552struct processor_costs pentium4_cost = {
553 COSTS_N_INSNS (1), /* cost of an add instruction */
554 COSTS_N_INSNS (3), /* cost of a lea instruction */
555 COSTS_N_INSNS (4), /* variable shift costs */
556 COSTS_N_INSNS (4), /* constant shift costs */
557 {COSTS_N_INSNS (15), /* cost of starting multiply for QI */
558 COSTS_N_INSNS (15), /* HI */

--- 270 unchanged lines hidden (view full) ---

829#define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
830#define m_GEODE (1<<PROCESSOR_GEODE)
831#define m_K6_GEODE (m_K6 | m_GEODE)
832#define m_K6 (1<<PROCESSOR_K6)
833#define m_ATHLON (1<<PROCESSOR_ATHLON)
834#define m_PENT4 (1<<PROCESSOR_PENTIUM4)
835#define m_K8 (1<<PROCESSOR_K8)
836#define m_ATHLON_K8 (m_K8 | m_ATHLON)
616static const
617struct processor_costs pentium4_cost = {
618 COSTS_N_INSNS (1), /* cost of an add instruction */
619 COSTS_N_INSNS (3), /* cost of a lea instruction */
620 COSTS_N_INSNS (4), /* variable shift costs */
621 COSTS_N_INSNS (4), /* constant shift costs */
622 {COSTS_N_INSNS (15), /* cost of starting multiply for QI */
623 COSTS_N_INSNS (15), /* HI */

--- 270 unchanged lines hidden (view full) ---

894#define m_PPRO (1<<PROCESSOR_PENTIUMPRO)
895#define m_GEODE (1<<PROCESSOR_GEODE)
896#define m_K6_GEODE (m_K6 | m_GEODE)
897#define m_K6 (1<<PROCESSOR_K6)
898#define m_ATHLON (1<<PROCESSOR_ATHLON)
899#define m_PENT4 (1<<PROCESSOR_PENTIUM4)
900#define m_K8 (1<<PROCESSOR_K8)
901#define m_ATHLON_K8 (m_K8 | m_ATHLON)
902#define m_AMDFAM10 (1<<PROCESSOR_AMDFAM10)
837#define m_NOCONA (1<<PROCESSOR_NOCONA)
838#define m_CORE2 (1<<PROCESSOR_CORE2)
839#define m_GENERIC32 (1<<PROCESSOR_GENERIC32)
840#define m_GENERIC64 (1<<PROCESSOR_GENERIC64)
841#define m_GENERIC (m_GENERIC32 | m_GENERIC64)
903#define m_NOCONA (1<<PROCESSOR_NOCONA)
904#define m_CORE2 (1<<PROCESSOR_CORE2)
905#define m_GENERIC32 (1<<PROCESSOR_GENERIC32)
906#define m_GENERIC64 (1<<PROCESSOR_GENERIC64)
907#define m_GENERIC (m_GENERIC32 | m_GENERIC64)
908#define m_ATHLON_K8_AMDFAM10 (m_K8 | m_ATHLON | m_AMDFAM10)
842
843/* Generic instruction choice should be common subset of supported CPUs
844 (PPro/PENT4/NOCONA/CORE2/Athlon/K8). */
845
846/* Leave is not affecting Nocona SPEC2000 results negatively, so enabling for
847 Generic64 seems like good code size tradeoff. We can't enable it for 32bit
848 generic because it is not working well with PPro base chips. */
909
910/* Generic instruction choice should be common subset of supported CPUs
911 (PPro/PENT4/NOCONA/CORE2/Athlon/K8). */
912
913/* Leave is not affecting Nocona SPEC2000 results negatively, so enabling for
914 Generic64 seems like good code size tradeoff. We can't enable it for 32bit
915 generic because it is not working well with PPro base chips. */
849const int x86_use_leave = m_386 | m_K6_GEODE | m_ATHLON_K8 | m_CORE2 | m_GENERIC64;
850const int x86_push_memory = m_386 | m_K6_GEODE | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
916const int x86_use_leave = m_386 | m_K6_GEODE | m_ATHLON_K8_AMDFAM10 | m_CORE2
917 | m_GENERIC64;
918const int x86_push_memory = m_386 | m_K6_GEODE | m_ATHLON_K8_AMDFAM10 | m_PENT4
919 | m_NOCONA | m_CORE2 | m_GENERIC;
851const int x86_zero_extend_with_and = m_486 | m_PENT;
920const int x86_zero_extend_with_and = m_486 | m_PENT;
852const int x86_movx = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC | m_GEODE /* m_386 | m_K6 */;
921/* Enable to zero extend integer registers to avoid partial dependencies */
922const int x86_movx = m_ATHLON_K8_AMDFAM10 | m_PPRO | m_PENT4 | m_NOCONA
923 | m_CORE2 | m_GENERIC | m_GEODE /* m_386 | m_K6 */;
853const int x86_double_with_add = ~m_386;
854const int x86_use_bit_test = m_386;
924const int x86_double_with_add = ~m_386;
925const int x86_use_bit_test = m_386;
855const int x86_unroll_strlen = m_486 | m_PENT | m_PPRO | m_ATHLON_K8 | m_K6 | m_CORE2 | m_GENERIC;
856const int x86_cmove = m_PPRO | m_GEODE | m_ATHLON_K8 | m_PENT4 | m_NOCONA;
857const int x86_3dnow_a = m_ATHLON_K8;
858const int x86_deep_branch = m_PPRO | m_K6_GEODE | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
926const int x86_unroll_strlen = m_486 | m_PENT | m_PPRO | m_ATHLON_K8_AMDFAM10
927 | m_K6 | m_CORE2 | m_GENERIC;
928const int x86_cmove = m_PPRO | m_GEODE | m_ATHLON_K8_AMDFAM10 | m_PENT4
929 | m_NOCONA;
930const int x86_3dnow_a = m_ATHLON_K8_AMDFAM10;
931const int x86_deep_branch = m_PPRO | m_K6_GEODE | m_ATHLON_K8_AMDFAM10
932 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
859/* Branch hints were put in P4 based on simulation result. But
860 after P4 was made, no performance benefit was observed with
861 branch hints. It also increases the code size. As the result,
862 icc never generates branch hints. */
863const int x86_branch_hints = 0;
933/* Branch hints were put in P4 based on simulation result. But
934 after P4 was made, no performance benefit was observed with
935 branch hints. It also increases the code size. As the result,
936 icc never generates branch hints. */
937const int x86_branch_hints = 0;
864const int x86_use_sahf = m_PPRO | m_K6_GEODE | m_PENT4 | m_NOCONA | m_GENERIC32; /*m_GENERIC | m_ATHLON_K8 ? */
938const int x86_use_sahf = m_PPRO | m_K6_GEODE | m_PENT4 | m_NOCONA | m_GENERIC32;
939 /*m_GENERIC | m_ATHLON_K8 ? */
865/* We probably ought to watch for partial register stalls on Generic32
866 compilation setting as well. However in current implementation the
867 partial register stalls are not eliminated very well - they can
868 be introduced via subregs synthesized by combine and can happen
869 in caller/callee saving sequences.
870 Because this option pays back little on PPro based chips and is in conflict
871 with partial reg. dependencies used by Athlon/P4 based chips, it is better
872 to leave it off for generic32 for now. */
873const int x86_partial_reg_stall = m_PPRO;
874const int x86_partial_flag_reg_stall = m_CORE2 | m_GENERIC;
875const int x86_use_himode_fiop = m_386 | m_486 | m_K6_GEODE;
940/* We probably ought to watch for partial register stalls on Generic32
941 compilation setting as well. However in current implementation the
942 partial register stalls are not eliminated very well - they can
943 be introduced via subregs synthesized by combine and can happen
944 in caller/callee saving sequences.
945 Because this option pays back little on PPro based chips and is in conflict
946 with partial reg. dependencies used by Athlon/P4 based chips, it is better
947 to leave it off for generic32 for now. */
948const int x86_partial_reg_stall = m_PPRO;
949const int x86_partial_flag_reg_stall = m_CORE2 | m_GENERIC;
950const int x86_use_himode_fiop = m_386 | m_486 | m_K6_GEODE;
876const int x86_use_simode_fiop = ~(m_PPRO | m_ATHLON_K8 | m_PENT | m_CORE2 | m_GENERIC);
951const int x86_use_simode_fiop = ~(m_PPRO | m_ATHLON_K8_AMDFAM10 | m_PENT
952 | m_CORE2 | m_GENERIC);
877const int x86_use_mov0 = m_K6;
878const int x86_use_cltd = ~(m_PENT | m_K6 | m_CORE2 | m_GENERIC);
879const int x86_read_modify_write = ~m_PENT;
880const int x86_read_modify = ~(m_PENT | m_PPRO);
881const int x86_split_long_moves = m_PPRO;
953const int x86_use_mov0 = m_K6;
954const int x86_use_cltd = ~(m_PENT | m_K6 | m_CORE2 | m_GENERIC);
955const int x86_read_modify_write = ~m_PENT;
956const int x86_read_modify = ~(m_PENT | m_PPRO);
957const int x86_split_long_moves = m_PPRO;
882const int x86_promote_QImode = m_K6_GEODE | m_PENT | m_386 | m_486 | m_ATHLON_K8 | m_CORE2 | m_GENERIC; /* m_PENT4 ? */
958const int x86_promote_QImode = m_K6_GEODE | m_PENT | m_386 | m_486
959 | m_ATHLON_K8_AMDFAM10 | m_CORE2 | m_GENERIC;
960 /* m_PENT4 ? */
883const int x86_fast_prefix = ~(m_PENT | m_486 | m_386);
884const int x86_single_stringop = m_386 | m_PENT4 | m_NOCONA;
885const int x86_qimode_math = ~(0);
886const int x86_promote_qi_regs = 0;
887/* On PPro this flag is meant to avoid partial register stalls. Just like
888 the x86_partial_reg_stall this option might be considered for Generic32
889 if our scheme for avoiding partial stalls was more effective. */
890const int x86_himode_math = ~(m_PPRO);
891const int x86_promote_hi_regs = m_PPRO;
961const int x86_fast_prefix = ~(m_PENT | m_486 | m_386);
962const int x86_single_stringop = m_386 | m_PENT4 | m_NOCONA;
963const int x86_qimode_math = ~(0);
964const int x86_promote_qi_regs = 0;
965/* On PPro this flag is meant to avoid partial register stalls. Just like
966 the x86_partial_reg_stall this option might be considered for Generic32
967 if our scheme for avoiding partial stalls was more effective. */
968const int x86_himode_math = ~(m_PPRO);
969const int x86_promote_hi_regs = m_PPRO;
892const int x86_sub_esp_4 = m_ATHLON_K8 | m_PPRO | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
893const int x86_sub_esp_8 = m_ATHLON_K8 | m_PPRO | m_386 | m_486 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
894const int x86_add_esp_4 = m_ATHLON_K8 | m_K6_GEODE | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
895const int x86_add_esp_8 = m_ATHLON_K8 | m_PPRO | m_K6_GEODE | m_386 | m_486 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
896const int x86_integer_DFmode_moves = ~(m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC | m_GEODE);
897const int x86_partial_reg_dependency = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
898const int x86_memory_mismatch_stall = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
899const int x86_accumulate_outgoing_args = m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC;
970/* Enable if add/sub rsp is preferred over 1 or 2 push/pop */
971const int x86_sub_esp_4 = m_ATHLON_K8_AMDFAM10 | m_PPRO | m_PENT4 | m_NOCONA
972 | m_CORE2 | m_GENERIC;
973const int x86_sub_esp_8 = m_ATHLON_K8_AMDFAM10 | m_PPRO | m_386 | m_486
974 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
975const int x86_add_esp_4 = m_ATHLON_K8_AMDFAM10 | m_K6_GEODE | m_PENT4 | m_NOCONA
976 | m_CORE2 | m_GENERIC;
977const int x86_add_esp_8 = m_ATHLON_K8_AMDFAM10 | m_PPRO | m_K6_GEODE | m_386
978 | m_486 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
979/* Enable if integer moves are preferred for DFmode copies */
980const int x86_integer_DFmode_moves = ~(m_ATHLON_K8_AMDFAM10 | m_PENT4 | m_NOCONA
981 | m_PPRO | m_CORE2 | m_GENERIC | m_GEODE);
982const int x86_partial_reg_dependency = m_ATHLON_K8_AMDFAM10 | m_PENT4 | m_NOCONA
983 | m_CORE2 | m_GENERIC;
984const int x86_memory_mismatch_stall = m_ATHLON_K8_AMDFAM10 | m_PENT4 | m_NOCONA
985 | m_CORE2 | m_GENERIC;
986/* If ACCUMULATE_OUTGOING_ARGS is enabled, the maximum amount of space required
987 for outgoing arguments will be computed and placed into the variable
988 `current_function_outgoing_args_size'. No space will be pushed onto the stack
989 for each call; instead, the function prologue should increase the stack frame
990 size by this amount. Setting both PUSH_ARGS and ACCUMULATE_OUTGOING_ARGS is
991 not proper. */
992const int x86_accumulate_outgoing_args = m_ATHLON_K8_AMDFAM10 | m_PENT4
993 | m_NOCONA | m_PPRO | m_CORE2
994 | m_GENERIC;
900const int x86_prologue_using_move = m_ATHLON_K8 | m_PPRO | m_CORE2 | m_GENERIC;
901const int x86_epilogue_using_move = m_ATHLON_K8 | m_PPRO | m_CORE2 | m_GENERIC;
902const int x86_shift1 = ~m_486;
995const int x86_prologue_using_move = m_ATHLON_K8 | m_PPRO | m_CORE2 | m_GENERIC;
996const int x86_epilogue_using_move = m_ATHLON_K8 | m_PPRO | m_CORE2 | m_GENERIC;
997const int x86_shift1 = ~m_486;
903const int x86_arch_always_fancy_math_387 = m_PENT | m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
998const int x86_arch_always_fancy_math_387 = m_PENT | m_PPRO
999 | m_ATHLON_K8_AMDFAM10 | m_PENT4
1000 | m_NOCONA | m_CORE2 | m_GENERIC;
904/* In Generic model we have an conflict here in between PPro/Pentium4 based chips
905 that thread 128bit SSE registers as single units versus K8 based chips that
906 divide SSE registers to two 64bit halves.
907 x86_sse_partial_reg_dependency promote all store destinations to be 128bit
908 to allow register renaming on 128bit SSE units, but usually results in one
909 extra microop on 64bit SSE units. Experimental results shows that disabling
910 this option on P4 brings over 20% SPECfp regression, while enabling it on
911 K8 brings roughly 2.4% regression that can be partly masked by careful scheduling
912 of moves. */
1001/* In Generic model we have an conflict here in between PPro/Pentium4 based chips
1002 that thread 128bit SSE registers as single units versus K8 based chips that
1003 divide SSE registers to two 64bit halves.
1004 x86_sse_partial_reg_dependency promote all store destinations to be 128bit
1005 to allow register renaming on 128bit SSE units, but usually results in one
1006 extra microop on 64bit SSE units. Experimental results shows that disabling
1007 this option on P4 brings over 20% SPECfp regression, while enabling it on
1008 K8 brings roughly 2.4% regression that can be partly masked by careful scheduling
1009 of moves. */
913const int x86_sse_partial_reg_dependency = m_PENT4 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC;
1010const int x86_sse_partial_reg_dependency = m_PENT4 | m_NOCONA | m_PPRO | m_CORE2
1011 | m_GENERIC | m_AMDFAM10;
914/* Set for machines where the type and dependencies are resolved on SSE
915 register parts instead of whole registers, so we may maintain just
916 lower part of scalar values in proper format leaving the upper part
917 undefined. */
918const int x86_sse_split_regs = m_ATHLON_K8;
1012/* Set for machines where the type and dependencies are resolved on SSE
1013 register parts instead of whole registers, so we may maintain just
1014 lower part of scalar values in proper format leaving the upper part
1015 undefined. */
1016const int x86_sse_split_regs = m_ATHLON_K8;
919const int x86_sse_typeless_stores = m_ATHLON_K8;
1017/* Code generation for scalar reg-reg moves of single and double precision data:
1018 if (x86_sse_partial_reg_dependency == true | x86_sse_split_regs == true)
1019 movaps reg, reg
1020 else
1021 movss reg, reg
1022 if (x86_sse_partial_reg_dependency == true)
1023 movapd reg, reg
1024 else
1025 movsd reg, reg
1026
1027 Code generation for scalar loads of double precision data:
1028 if (x86_sse_split_regs == true)
1029 movlpd mem, reg (gas syntax)
1030 else
1031 movsd mem, reg
1032
1033 Code generation for unaligned packed loads of single precision data
1034 (x86_sse_unaligned_move_optimal overrides x86_sse_partial_reg_dependency):
1035 if (x86_sse_unaligned_move_optimal)
1036 movups mem, reg
1037
1038 if (x86_sse_partial_reg_dependency == true)
1039 {
1040 xorps reg, reg
1041 movlps mem, reg
1042 movhps mem+8, reg
1043 }
1044 else
1045 {
1046 movlps mem, reg
1047 movhps mem+8, reg
1048 }
1049
1050 Code generation for unaligned packed loads of double precision data
1051 (x86_sse_unaligned_move_optimal overrides x86_sse_split_regs):
1052 if (x86_sse_unaligned_move_optimal)
1053 movupd mem, reg
1054
1055 if (x86_sse_split_regs == true)
1056 {
1057 movlpd mem, reg
1058 movhpd mem+8, reg
1059 }
1060 else
1061 {
1062 movsd mem, reg
1063 movhpd mem+8, reg
1064 }
1065 */
1066const int x86_sse_unaligned_move_optimal = m_AMDFAM10;
1067const int x86_sse_typeless_stores = m_ATHLON_K8_AMDFAM10;
920const int x86_sse_load0_by_pxor = m_PPRO | m_PENT4 | m_NOCONA;
1068const int x86_sse_load0_by_pxor = m_PPRO | m_PENT4 | m_NOCONA;
921const int x86_use_ffreep = m_ATHLON_K8;
1069const int x86_use_ffreep = m_ATHLON_K8_AMDFAM10;
922const int x86_rep_movl_optimal = m_386 | m_PENT | m_PPRO | m_K6_GEODE | m_CORE2;
923const int x86_use_incdec = ~(m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC);
924
925/* ??? Allowing interunit moves makes it all too easy for the compiler to put
926 integer data in xmm registers. Which results in pretty abysmal code. */
927const int x86_inter_unit_moves = 0 /* ~(m_ATHLON_K8) */;
928
1070const int x86_rep_movl_optimal = m_386 | m_PENT | m_PPRO | m_K6_GEODE | m_CORE2;
1071const int x86_use_incdec = ~(m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC);
1072
1073/* ??? Allowing interunit moves makes it all too easy for the compiler to put
1074 integer data in xmm registers. Which results in pretty abysmal code. */
1075const int x86_inter_unit_moves = 0 /* ~(m_ATHLON_K8) */;
1076
929const int x86_ext_80387_constants = m_K6_GEODE | m_ATHLON | m_PENT4 | m_NOCONA | m_CORE2 | m_PPRO | m_GENERIC32;
1077const int x86_ext_80387_constants = m_K6_GEODE | m_ATHLON_K8 | m_PENT4
1078 | m_NOCONA | m_PPRO | m_CORE2 | m_GENERIC;
930/* Some CPU cores are not able to predict more than 4 branch instructions in
931 the 16 byte window. */
1079/* Some CPU cores are not able to predict more than 4 branch instructions in
1080 the 16 byte window. */
932const int x86_four_jump_limit = m_PPRO | m_ATHLON_K8 | m_PENT4 | m_NOCONA | m_CORE2 | m_GENERIC;
933const int x86_schedule = m_PPRO | m_ATHLON_K8 | m_K6_GEODE | m_PENT | m_CORE2 | m_GENERIC;
934const int x86_use_bt = m_ATHLON_K8;
1081const int x86_four_jump_limit = m_PPRO | m_ATHLON_K8_AMDFAM10 | m_PENT4
1082 | m_NOCONA | m_CORE2 | m_GENERIC;
1083const int x86_schedule = m_PPRO | m_ATHLON_K8_AMDFAM10 | m_K6_GEODE | m_PENT
1084 | m_CORE2 | m_GENERIC;
1085const int x86_use_bt = m_ATHLON_K8_AMDFAM10;
935/* Compare and exchange was added for 80486. */
936const int x86_cmpxchg = ~m_386;
937/* Compare and exchange 8 bytes was added for pentium. */
938const int x86_cmpxchg8b = ~(m_386 | m_486);
1086/* Compare and exchange was added for 80486. */
1087const int x86_cmpxchg = ~m_386;
1088/* Compare and exchange 8 bytes was added for pentium. */
1089const int x86_cmpxchg8b = ~(m_386 | m_486);
939/* Compare and exchange 16 bytes was added for nocona. */
940const int x86_cmpxchg16b = m_NOCONA | m_CORE2;
941/* Exchange and add was added for 80486. */
942const int x86_xadd = ~m_386;
1090/* Exchange and add was added for 80486. */
1091const int x86_xadd = ~m_386;
943const int x86_pad_returns = m_ATHLON_K8 | m_CORE2 | m_GENERIC;
1092const int x86_pad_returns = m_ATHLON_K8_AMDFAM10 | m_CORE2 | m_GENERIC;
944
945/* In case the average insn count for single function invocation is
946 lower than this constant, emit fast (but longer) prologue and
947 epilogue code. */
948#define FAST_PROLOGUE_INSN_COUNT 20
949
950/* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
951static const char *const qi_reg_name[] = QI_REGISTER_NAMES;

--- 200 unchanged lines hidden (view full) ---

1152/* Which cpu are we scheduling for. */
1153enum processor_type ix86_tune;
1154/* Which instruction set architecture to use. */
1155enum processor_type ix86_arch;
1156
1157/* true if sse prefetch instruction is not NOOP. */
1158int x86_prefetch_sse;
1159
1093
1094/* In case the average insn count for single function invocation is
1095 lower than this constant, emit fast (but longer) prologue and
1096 epilogue code. */
1097#define FAST_PROLOGUE_INSN_COUNT 20
1098
1099/* Names for 8 (low), 8 (high), and 16-bit registers, respectively. */
1100static const char *const qi_reg_name[] = QI_REGISTER_NAMES;

--- 200 unchanged lines hidden (view full) ---

1301/* Which cpu are we scheduling for. */
1302enum processor_type ix86_tune;
1303/* Which instruction set architecture to use. */
1304enum processor_type ix86_arch;
1305
1306/* true if sse prefetch instruction is not NOOP. */
1307int x86_prefetch_sse;
1308
1309/* true if cmpxchg16b is supported. */
1310int x86_cmpxchg16b;
1311
1160/* ix86_regparm_string as a number */
1161static int ix86_regparm;
1162
1163/* -mstackrealign option */
1164extern int ix86_force_align_arg_pointer;
1165static const char ix86_force_align_arg_pointer_string[] = "force_align_arg_pointer";
1166
1167/* Preferred alignment for stack boundary in bits. */

--- 338 unchanged lines hidden (view full) ---

1506 target_flags &= ~(MASK_3DNOW | MASK_3DNOW_A);
1507 target_flags_explicit |= MASK_3DNOW | MASK_3DNOW_A;
1508 }
1509 return true;
1510
1511 case OPT_msse:
1512 if (!value)
1513 {
1312/* ix86_regparm_string as a number */
1313static int ix86_regparm;
1314
1315/* -mstackrealign option */
1316extern int ix86_force_align_arg_pointer;
1317static const char ix86_force_align_arg_pointer_string[] = "force_align_arg_pointer";
1318
1319/* Preferred alignment for stack boundary in bits. */

--- 338 unchanged lines hidden (view full) ---

1658 target_flags &= ~(MASK_3DNOW | MASK_3DNOW_A);
1659 target_flags_explicit |= MASK_3DNOW | MASK_3DNOW_A;
1660 }
1661 return true;
1662
1663 case OPT_msse:
1664 if (!value)
1665 {
1514 target_flags &= ~(MASK_SSE2 | MASK_SSE3 | MASK_SSSE3);
1515 target_flags_explicit |= MASK_SSE2 | MASK_SSE3 | MASK_SSSE3;
1666 target_flags &= ~(MASK_SSE2 | MASK_SSE3 | MASK_SSSE3 | MASK_SSE4A);
1667 target_flags_explicit |= MASK_SSE2 | MASK_SSE3 | MASK_SSSE3 | MASK_SSE4A;
1516 }
1517 return true;
1518
1519 case OPT_msse2:
1520 if (!value)
1521 {
1668 }
1669 return true;
1670
1671 case OPT_msse2:
1672 if (!value)
1673 {
1522 target_flags &= ~(MASK_SSE3 | MASK_SSSE3);
1523 target_flags_explicit |= MASK_SSE3 | MASK_SSSE3;
1674 target_flags &= ~(MASK_SSE3 | MASK_SSSE3 | MASK_SSE4A);
1675 target_flags_explicit |= MASK_SSE3 | MASK_SSSE3 | MASK_SSE4A;
1524 }
1525 return true;
1526
1527 case OPT_msse3:
1528 if (!value)
1529 {
1676 }
1677 return true;
1678
1679 case OPT_msse3:
1680 if (!value)
1681 {
1530 target_flags &= ~MASK_SSSE3;
1531 target_flags_explicit |= MASK_SSSE3;
1682 target_flags &= ~(MASK_SSSE3 | MASK_SSE4A);
1683 target_flags_explicit |= MASK_SSSE3 | MASK_SSE4A;
1532 }
1533 return true;
1534
1535 default:
1536 return true;
1537 }
1538}
1539

--- 35 unchanged lines hidden (view full) ---

1575 {&geode_cost, 0, 0, 0, 0, 0, 0, 0},
1576 {&k6_cost, 0, 0, 32, 7, 32, 7, 32},
1577 {&athlon_cost, 0, 0, 16, 7, 16, 7, 16},
1578 {&pentium4_cost, 0, 0, 0, 0, 0, 0, 0},
1579 {&k8_cost, 0, 0, 16, 7, 16, 7, 16},
1580 {&nocona_cost, 0, 0, 0, 0, 0, 0, 0},
1581 {&core2_cost, 0, 0, 16, 7, 16, 7, 16},
1582 {&generic32_cost, 0, 0, 16, 7, 16, 7, 16},
1684 }
1685 return true;
1686
1687 default:
1688 return true;
1689 }
1690}
1691

--- 35 unchanged lines hidden (view full) ---

1727 {&geode_cost, 0, 0, 0, 0, 0, 0, 0},
1728 {&k6_cost, 0, 0, 32, 7, 32, 7, 32},
1729 {&athlon_cost, 0, 0, 16, 7, 16, 7, 16},
1730 {&pentium4_cost, 0, 0, 0, 0, 0, 0, 0},
1731 {&k8_cost, 0, 0, 16, 7, 16, 7, 16},
1732 {&nocona_cost, 0, 0, 0, 0, 0, 0, 0},
1733 {&core2_cost, 0, 0, 16, 7, 16, 7, 16},
1734 {&generic32_cost, 0, 0, 16, 7, 16, 7, 16},
1583 {&generic64_cost, 0, 0, 16, 7, 16, 7, 16}
1735 {&generic64_cost, 0, 0, 16, 7, 16, 7, 16},
1736 {&amdfam10_cost, 0, 0, 32, 24, 32, 7, 32}
1584 };
1585
1586 static const char * const cpu_names[] = TARGET_CPU_DEFAULT_NAMES;
1587 static struct pta
1588 {
1589 const char *const name; /* processor name or nickname. */
1590 const enum processor_type processor;
1591 const enum pta_flags
1592 {
1593 PTA_SSE = 1,
1594 PTA_SSE2 = 2,
1595 PTA_SSE3 = 4,
1596 PTA_MMX = 8,
1597 PTA_PREFETCH_SSE = 16,
1598 PTA_3DNOW = 32,
1599 PTA_3DNOW_A = 64,
1600 PTA_64BIT = 128,
1737 };
1738
1739 static const char * const cpu_names[] = TARGET_CPU_DEFAULT_NAMES;
1740 static struct pta
1741 {
1742 const char *const name; /* processor name or nickname. */
1743 const enum processor_type processor;
1744 const enum pta_flags
1745 {
1746 PTA_SSE = 1,
1747 PTA_SSE2 = 2,
1748 PTA_SSE3 = 4,
1749 PTA_MMX = 8,
1750 PTA_PREFETCH_SSE = 16,
1751 PTA_3DNOW = 32,
1752 PTA_3DNOW_A = 64,
1753 PTA_64BIT = 128,
1601 PTA_SSSE3 = 256
1754 PTA_SSSE3 = 256,
1755 PTA_CX16 = 512,
1756 PTA_POPCNT = 1024,
1757 PTA_ABM = 2048,
1758 PTA_SSE4A = 4096
1602 } flags;
1603 }
1604 const processor_alias_table[] =
1605 {
1606 {"i386", PROCESSOR_I386, 0},
1607 {"i486", PROCESSOR_I486, 0},
1608 {"i586", PROCESSOR_PENTIUM, 0},
1609 {"pentium", PROCESSOR_PENTIUM, 0},

--- 10 unchanged lines hidden (view full) ---

1620 {"pentium-m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE | PTA_SSE2},
1621 {"pentium4", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2
1622 | PTA_MMX | PTA_PREFETCH_SSE},
1623 {"pentium4m", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2
1624 | PTA_MMX | PTA_PREFETCH_SSE},
1625 {"prescott", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3
1626 | PTA_MMX | PTA_PREFETCH_SSE},
1627 {"nocona", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3 | PTA_64BIT
1759 } flags;
1760 }
1761 const processor_alias_table[] =
1762 {
1763 {"i386", PROCESSOR_I386, 0},
1764 {"i486", PROCESSOR_I486, 0},
1765 {"i586", PROCESSOR_PENTIUM, 0},
1766 {"pentium", PROCESSOR_PENTIUM, 0},

--- 10 unchanged lines hidden (view full) ---

1777 {"pentium-m", PROCESSOR_PENTIUMPRO, PTA_MMX | PTA_SSE | PTA_PREFETCH_SSE | PTA_SSE2},
1778 {"pentium4", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2
1779 | PTA_MMX | PTA_PREFETCH_SSE},
1780 {"pentium4m", PROCESSOR_PENTIUM4, PTA_SSE | PTA_SSE2
1781 | PTA_MMX | PTA_PREFETCH_SSE},
1782 {"prescott", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3
1783 | PTA_MMX | PTA_PREFETCH_SSE},
1784 {"nocona", PROCESSOR_NOCONA, PTA_SSE | PTA_SSE2 | PTA_SSE3 | PTA_64BIT
1628 | PTA_MMX | PTA_PREFETCH_SSE},
1785 | PTA_MMX | PTA_PREFETCH_SSE | PTA_CX16},
1629 {"core2", PROCESSOR_CORE2, PTA_SSE | PTA_SSE2 | PTA_SSE3 | PTA_SSSE3
1630 | PTA_64BIT | PTA_MMX
1786 {"core2", PROCESSOR_CORE2, PTA_SSE | PTA_SSE2 | PTA_SSE3 | PTA_SSSE3
1787 | PTA_64BIT | PTA_MMX
1631 | PTA_PREFETCH_SSE},
1788 | PTA_PREFETCH_SSE | PTA_CX16},
1632 {"geode", PROCESSOR_GEODE, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1633 | PTA_3DNOW_A},
1634 {"k6", PROCESSOR_K6, PTA_MMX},
1635 {"k6-2", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
1636 {"k6-3", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
1637 {"athlon", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1638 | PTA_3DNOW_A},
1639 {"athlon-tbird", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE

--- 18 unchanged lines hidden (view full) ---

1658 | PTA_SSE3 },
1659 {"athlon64", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1660 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1661 {"athlon64-sse3", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1662 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2
1663 | PTA_SSE3 },
1664 {"athlon-fx", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1665 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1789 {"geode", PROCESSOR_GEODE, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1790 | PTA_3DNOW_A},
1791 {"k6", PROCESSOR_K6, PTA_MMX},
1792 {"k6-2", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
1793 {"k6-3", PROCESSOR_K6, PTA_MMX | PTA_3DNOW},
1794 {"athlon", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1795 | PTA_3DNOW_A},
1796 {"athlon-tbird", PROCESSOR_ATHLON, PTA_MMX | PTA_PREFETCH_SSE

--- 18 unchanged lines hidden (view full) ---

1815 | PTA_SSE3 },
1816 {"athlon64", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1817 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1818 {"athlon64-sse3", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1819 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2
1820 | PTA_SSE3 },
1821 {"athlon-fx", PROCESSOR_K8, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW | PTA_64BIT
1822 | PTA_3DNOW_A | PTA_SSE | PTA_SSE2},
1823 {"amdfam10", PROCESSOR_AMDFAM10, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1824 | PTA_64BIT | PTA_3DNOW_A | PTA_SSE
1825 | PTA_SSE2 | PTA_SSE3 | PTA_POPCNT
1826 | PTA_ABM | PTA_SSE4A | PTA_CX16},
1827 {"barcelona", PROCESSOR_AMDFAM10, PTA_MMX | PTA_PREFETCH_SSE | PTA_3DNOW
1828 | PTA_64BIT | PTA_3DNOW_A | PTA_SSE
1829 | PTA_SSE2 | PTA_SSE3 | PTA_POPCNT
1830 | PTA_ABM | PTA_SSE4A | PTA_CX16},
1666 {"generic32", PROCESSOR_GENERIC32, 0 /* flags are only used for -march switch. */ },
1667 {"generic64", PROCESSOR_GENERIC64, PTA_64BIT /* flags are only used for -march switch. */ },
1668 };
1669
1670 int const pta_size = ARRAY_SIZE (processor_alias_table);
1671
1672#ifdef SUBTARGET_OVERRIDE_OPTIONS
1673 SUBTARGET_OVERRIDE_OPTIONS;

--- 146 unchanged lines hidden (view full) ---

1820 if (processor_alias_table[i].flags & PTA_SSE3
1821 && !(target_flags_explicit & MASK_SSE3))
1822 target_flags |= MASK_SSE3;
1823 if (processor_alias_table[i].flags & PTA_SSSE3
1824 && !(target_flags_explicit & MASK_SSSE3))
1825 target_flags |= MASK_SSSE3;
1826 if (processor_alias_table[i].flags & PTA_PREFETCH_SSE)
1827 x86_prefetch_sse = true;
1831 {"generic32", PROCESSOR_GENERIC32, 0 /* flags are only used for -march switch. */ },
1832 {"generic64", PROCESSOR_GENERIC64, PTA_64BIT /* flags are only used for -march switch. */ },
1833 };
1834
1835 int const pta_size = ARRAY_SIZE (processor_alias_table);
1836
1837#ifdef SUBTARGET_OVERRIDE_OPTIONS
1838 SUBTARGET_OVERRIDE_OPTIONS;

--- 146 unchanged lines hidden (view full) ---

1985 if (processor_alias_table[i].flags & PTA_SSE3
1986 && !(target_flags_explicit & MASK_SSE3))
1987 target_flags |= MASK_SSE3;
1988 if (processor_alias_table[i].flags & PTA_SSSE3
1989 && !(target_flags_explicit & MASK_SSSE3))
1990 target_flags |= MASK_SSSE3;
1991 if (processor_alias_table[i].flags & PTA_PREFETCH_SSE)
1992 x86_prefetch_sse = true;
1993 if (processor_alias_table[i].flags & PTA_CX16)
1994 x86_cmpxchg16b = true;
1995 if (processor_alias_table[i].flags & PTA_POPCNT
1996 && !(target_flags_explicit & MASK_POPCNT))
1997 target_flags |= MASK_POPCNT;
1998 if (processor_alias_table[i].flags & PTA_ABM
1999 && !(target_flags_explicit & MASK_ABM))
2000 target_flags |= MASK_ABM;
2001 if (processor_alias_table[i].flags & PTA_SSE4A
2002 && !(target_flags_explicit & MASK_SSE4A))
2003 target_flags |= MASK_SSE4A;
1828 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
1829 error ("CPU you selected does not support x86-64 "
1830 "instruction set");
1831 break;
1832 }
1833
1834 if (i == pta_size)
1835 error ("bad value (%s) for -march= switch", ix86_arch_string);

--- 162 unchanged lines hidden (view full) ---

1998 software floating point, don't use 387 inline intrinsics. */
1999 if (!TARGET_80387)
2000 target_flags |= MASK_NO_FANCY_MATH_387;
2001
2002 /* Turn on SSE3 builtins for -mssse3. */
2003 if (TARGET_SSSE3)
2004 target_flags |= MASK_SSE3;
2005
2004 if (TARGET_64BIT && !(processor_alias_table[i].flags & PTA_64BIT))
2005 error ("CPU you selected does not support x86-64 "
2006 "instruction set");
2007 break;
2008 }
2009
2010 if (i == pta_size)
2011 error ("bad value (%s) for -march= switch", ix86_arch_string);

--- 162 unchanged lines hidden (view full) ---

2174 software floating point, don't use 387 inline intrinsics. */
2175 if (!TARGET_80387)
2176 target_flags |= MASK_NO_FANCY_MATH_387;
2177
2178 /* Turn on SSE3 builtins for -mssse3. */
2179 if (TARGET_SSSE3)
2180 target_flags |= MASK_SSE3;
2181
2182 /* Turn on SSE3 builtins for -msse4a. */
2183 if (TARGET_SSE4A)
2184 target_flags |= MASK_SSE3;
2185
2006 /* Turn on SSE2 builtins for -msse3. */
2007 if (TARGET_SSE3)
2008 target_flags |= MASK_SSE2;
2009
2010 /* Turn on SSE builtins for -msse2. */
2011 if (TARGET_SSE2)
2012 target_flags |= MASK_SSE;
2013
2014 /* Turn on MMX builtins for -msse. */
2015 if (TARGET_SSE)
2016 {
2017 target_flags |= MASK_MMX & ~target_flags_explicit;
2018 x86_prefetch_sse = true;
2019 }
2020
2021 /* Turn on MMX builtins for 3Dnow. */
2022 if (TARGET_3DNOW)
2023 target_flags |= MASK_MMX;
2024
2186 /* Turn on SSE2 builtins for -msse3. */
2187 if (TARGET_SSE3)
2188 target_flags |= MASK_SSE2;
2189
2190 /* Turn on SSE builtins for -msse2. */
2191 if (TARGET_SSE2)
2192 target_flags |= MASK_SSE;
2193
2194 /* Turn on MMX builtins for -msse. */
2195 if (TARGET_SSE)
2196 {
2197 target_flags |= MASK_MMX & ~target_flags_explicit;
2198 x86_prefetch_sse = true;
2199 }
2200
2201 /* Turn on MMX builtins for 3Dnow. */
2202 if (TARGET_3DNOW)
2203 target_flags |= MASK_MMX;
2204
2205 /* Turn on POPCNT builtins for -mabm. */
2206 if (TARGET_ABM)
2207 target_flags |= MASK_POPCNT;
2208
2025 if (TARGET_64BIT)
2026 {
2027 if (TARGET_ALIGN_DOUBLE)
2028 error ("-malign-double makes no sense in the 64bit mode");
2029 if (TARGET_RTD)
2030 error ("-mrtd calling convention not supported in the 64bit mode");
2031
2032 /* Enable by default the SSE and MMX builtins. Do allow the user to

--- 7209 unchanged lines hidden (view full) ---

9242 {
9243 op0 = gen_lowpart (V16QImode, op0);
9244 op1 = gen_lowpart (V16QImode, op1);
9245 emit_insn (gen_sse2_movdqu (op0, op1));
9246 return;
9247 }
9248
9249 if (TARGET_SSE2 && mode == V2DFmode)
2209 if (TARGET_64BIT)
2210 {
2211 if (TARGET_ALIGN_DOUBLE)
2212 error ("-malign-double makes no sense in the 64bit mode");
2213 if (TARGET_RTD)
2214 error ("-mrtd calling convention not supported in the 64bit mode");
2215
2216 /* Enable by default the SSE and MMX builtins. Do allow the user to

--- 7209 unchanged lines hidden (view full) ---

9426 {
9427 op0 = gen_lowpart (V16QImode, op0);
9428 op1 = gen_lowpart (V16QImode, op1);
9429 emit_insn (gen_sse2_movdqu (op0, op1));
9430 return;
9431 }
9432
9433 if (TARGET_SSE2 && mode == V2DFmode)
9250 {
9251 rtx zero;
9434 {
9435 rtx zero;
9252
9436
9437 if (TARGET_SSE_UNALIGNED_MOVE_OPTIMAL)
9438 {
9439 op0 = gen_lowpart (V2DFmode, op0);
9440 op1 = gen_lowpart (V2DFmode, op1);
9441 emit_insn (gen_sse2_movupd (op0, op1));
9442 return;
9443 }
9444
9253 /* When SSE registers are split into halves, we can avoid
9254 writing to the top half twice. */
9255 if (TARGET_SSE_SPLIT_REGS)
9256 {
9257 emit_insn (gen_rtx_CLOBBER (VOIDmode, op0));
9258 zero = op0;
9259 }
9260 else

--- 10 unchanged lines hidden (view full) ---

9271 }
9272
9273 m = adjust_address (op1, DFmode, 0);
9274 emit_insn (gen_sse2_loadlpd (op0, zero, m));
9275 m = adjust_address (op1, DFmode, 8);
9276 emit_insn (gen_sse2_loadhpd (op0, op0, m));
9277 }
9278 else
9445 /* When SSE registers are split into halves, we can avoid
9446 writing to the top half twice. */
9447 if (TARGET_SSE_SPLIT_REGS)
9448 {
9449 emit_insn (gen_rtx_CLOBBER (VOIDmode, op0));
9450 zero = op0;
9451 }
9452 else

--- 10 unchanged lines hidden (view full) ---

9463 }
9464
9465 m = adjust_address (op1, DFmode, 0);
9466 emit_insn (gen_sse2_loadlpd (op0, zero, m));
9467 m = adjust_address (op1, DFmode, 8);
9468 emit_insn (gen_sse2_loadhpd (op0, op0, m));
9469 }
9470 else
9279 {
9471 {
9472 if (TARGET_SSE_UNALIGNED_MOVE_OPTIMAL)
9473 {
9474 op0 = gen_lowpart (V4SFmode, op0);
9475 op1 = gen_lowpart (V4SFmode, op1);
9476 emit_insn (gen_sse_movups (op0, op1));
9477 return;
9478 }
9479
9280 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
9281 emit_move_insn (op0, CONST0_RTX (mode));
9282 else
9283 emit_insn (gen_rtx_CLOBBER (VOIDmode, op0));
9284
9285 if (mode != V4SFmode)
9286 op0 = gen_lowpart (V4SFmode, op0);
9287 m = adjust_address (op1, V2SFmode, 0);

--- 4540 unchanged lines hidden (view full) ---

13828 case PROCESSOR_PENTIUM:
13829 case PROCESSOR_K6:
13830 return 2;
13831
13832 case PROCESSOR_PENTIUMPRO:
13833 case PROCESSOR_PENTIUM4:
13834 case PROCESSOR_ATHLON:
13835 case PROCESSOR_K8:
9480 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
9481 emit_move_insn (op0, CONST0_RTX (mode));
9482 else
9483 emit_insn (gen_rtx_CLOBBER (VOIDmode, op0));
9484
9485 if (mode != V4SFmode)
9486 op0 = gen_lowpart (V4SFmode, op0);
9487 m = adjust_address (op1, V2SFmode, 0);

--- 4540 unchanged lines hidden (view full) ---

14028 case PROCESSOR_PENTIUM:
14029 case PROCESSOR_K6:
14030 return 2;
14031
14032 case PROCESSOR_PENTIUMPRO:
14033 case PROCESSOR_PENTIUM4:
14034 case PROCESSOR_ATHLON:
14035 case PROCESSOR_K8:
14036 case PROCESSOR_AMDFAM10:
13836 case PROCESSOR_NOCONA:
13837 case PROCESSOR_GENERIC32:
13838 case PROCESSOR_GENERIC64:
13839 return 3;
13840
13841 case PROCESSOR_CORE2:
13842 return 4;
13843

--- 182 unchanged lines hidden (view full) ---

14026 cost -= 2;
14027 else
14028 cost = 1;
14029 }
14030 break;
14031
14032 case PROCESSOR_ATHLON:
14033 case PROCESSOR_K8:
14037 case PROCESSOR_NOCONA:
14038 case PROCESSOR_GENERIC32:
14039 case PROCESSOR_GENERIC64:
14040 return 3;
14041
14042 case PROCESSOR_CORE2:
14043 return 4;
14044

--- 182 unchanged lines hidden (view full) ---

14227 cost -= 2;
14228 else
14229 cost = 1;
14230 }
14231 break;
14232
14233 case PROCESSOR_ATHLON:
14234 case PROCESSOR_K8:
14235 case PROCESSOR_AMDFAM10:
14034 case PROCESSOR_GENERIC32:
14035 case PROCESSOR_GENERIC64:
14036 memory = get_attr_memory (insn);
14037
14038 /* Show ability of reorder buffer to hide latency of load by executing
14039 in parallel with previous instruction in case
14040 previous instruction is not needed to compute the address. */
14041 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)

--- 697 unchanged lines hidden (view full) ---

14739 IX86_BUILTIN_PSIGNB128,
14740 IX86_BUILTIN_PSIGNW128,
14741 IX86_BUILTIN_PSIGND128,
14742 IX86_BUILTIN_PALIGNR128,
14743 IX86_BUILTIN_PABSB128,
14744 IX86_BUILTIN_PABSW128,
14745 IX86_BUILTIN_PABSD128,
14746
14236 case PROCESSOR_GENERIC32:
14237 case PROCESSOR_GENERIC64:
14238 memory = get_attr_memory (insn);
14239
14240 /* Show ability of reorder buffer to hide latency of load by executing
14241 in parallel with previous instruction in case
14242 previous instruction is not needed to compute the address. */
14243 if ((memory == MEMORY_LOAD || memory == MEMORY_BOTH)

--- 697 unchanged lines hidden (view full) ---

14941 IX86_BUILTIN_PSIGNB128,
14942 IX86_BUILTIN_PSIGNW128,
14943 IX86_BUILTIN_PSIGND128,
14944 IX86_BUILTIN_PALIGNR128,
14945 IX86_BUILTIN_PABSB128,
14946 IX86_BUILTIN_PABSW128,
14947 IX86_BUILTIN_PABSD128,
14948
14949 /* AMDFAM10 - SSE4A New Instructions. */
14950 IX86_BUILTIN_MOVNTSD,
14951 IX86_BUILTIN_MOVNTSS,
14952 IX86_BUILTIN_EXTRQI,
14953 IX86_BUILTIN_EXTRQ,
14954 IX86_BUILTIN_INSERTQI,
14955 IX86_BUILTIN_INSERTQ,
14956
14747 IX86_BUILTIN_VEC_INIT_V2SI,
14748 IX86_BUILTIN_VEC_INIT_V4HI,
14749 IX86_BUILTIN_VEC_INIT_V8QI,
14750 IX86_BUILTIN_VEC_EXT_V2DF,
14751 IX86_BUILTIN_VEC_EXT_V2DI,
14752 IX86_BUILTIN_VEC_EXT_V4SF,
14753 IX86_BUILTIN_VEC_EXT_V4SI,
14754 IX86_BUILTIN_VEC_EXT_V8HI,

--- 708 unchanged lines hidden (view full) ---

15463 tree int_ftype_v16qi
15464 = build_function_type_list (integer_type_node, V16QI_type_node, NULL_TREE);
15465 tree v16qi_ftype_pcchar
15466 = build_function_type_list (V16QI_type_node, pcchar_type_node, NULL_TREE);
15467 tree void_ftype_pchar_v16qi
15468 = build_function_type_list (void_type_node,
15469 pchar_type_node, V16QI_type_node, NULL_TREE);
15470
14957 IX86_BUILTIN_VEC_INIT_V2SI,
14958 IX86_BUILTIN_VEC_INIT_V4HI,
14959 IX86_BUILTIN_VEC_INIT_V8QI,
14960 IX86_BUILTIN_VEC_EXT_V2DF,
14961 IX86_BUILTIN_VEC_EXT_V2DI,
14962 IX86_BUILTIN_VEC_EXT_V4SF,
14963 IX86_BUILTIN_VEC_EXT_V4SI,
14964 IX86_BUILTIN_VEC_EXT_V8HI,

--- 708 unchanged lines hidden (view full) ---

15673 tree int_ftype_v16qi
15674 = build_function_type_list (integer_type_node, V16QI_type_node, NULL_TREE);
15675 tree v16qi_ftype_pcchar
15676 = build_function_type_list (V16QI_type_node, pcchar_type_node, NULL_TREE);
15677 tree void_ftype_pchar_v16qi
15678 = build_function_type_list (void_type_node,
15679 pchar_type_node, V16QI_type_node, NULL_TREE);
15680
15681 tree v2di_ftype_v2di_unsigned_unsigned
15682 = build_function_type_list (V2DI_type_node, V2DI_type_node,
15683 unsigned_type_node, unsigned_type_node,
15684 NULL_TREE);
15685 tree v2di_ftype_v2di_v2di_unsigned_unsigned
15686 = build_function_type_list (V2DI_type_node, V2DI_type_node, V2DI_type_node,
15687 unsigned_type_node, unsigned_type_node,
15688 NULL_TREE);
15689 tree v2di_ftype_v2di_v16qi
15690 = build_function_type_list (V2DI_type_node, V2DI_type_node, V16QI_type_node,
15691 NULL_TREE);
15692
15471 tree float80_type;
15472 tree float128_type;
15473 tree ftype;
15474
15475 /* The __float80 type. */
15476 if (TYPE_MODE (long_double_type_node) == XFmode)
15477 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
15478 "__float80");

--- 320 unchanged lines hidden (view full) ---

15799 v16qi_ftype_pcchar, IX86_BUILTIN_LDDQU);
15800
15801 /* SSSE3. */
15802 def_builtin (MASK_SSSE3, "__builtin_ia32_palignr128",
15803 v2di_ftype_v2di_v2di_int, IX86_BUILTIN_PALIGNR128);
15804 def_builtin (MASK_SSSE3, "__builtin_ia32_palignr", di_ftype_di_di_int,
15805 IX86_BUILTIN_PALIGNR);
15806
15693 tree float80_type;
15694 tree float128_type;
15695 tree ftype;
15696
15697 /* The __float80 type. */
15698 if (TYPE_MODE (long_double_type_node) == XFmode)
15699 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
15700 "__float80");

--- 320 unchanged lines hidden (view full) ---

16021 v16qi_ftype_pcchar, IX86_BUILTIN_LDDQU);
16022
16023 /* SSSE3. */
16024 def_builtin (MASK_SSSE3, "__builtin_ia32_palignr128",
16025 v2di_ftype_v2di_v2di_int, IX86_BUILTIN_PALIGNR128);
16026 def_builtin (MASK_SSSE3, "__builtin_ia32_palignr", di_ftype_di_di_int,
16027 IX86_BUILTIN_PALIGNR);
16028
16029 /* AMDFAM10 SSE4A New built-ins */
16030 def_builtin (MASK_SSE4A, "__builtin_ia32_movntsd",
16031 void_ftype_pdouble_v2df, IX86_BUILTIN_MOVNTSD);
16032 def_builtin (MASK_SSE4A, "__builtin_ia32_movntss",
16033 void_ftype_pfloat_v4sf, IX86_BUILTIN_MOVNTSS);
16034 def_builtin (MASK_SSE4A, "__builtin_ia32_extrqi",
16035 v2di_ftype_v2di_unsigned_unsigned, IX86_BUILTIN_EXTRQI);
16036 def_builtin (MASK_SSE4A, "__builtin_ia32_extrq",
16037 v2di_ftype_v2di_v16qi, IX86_BUILTIN_EXTRQ);
16038 def_builtin (MASK_SSE4A, "__builtin_ia32_insertqi",
16039 v2di_ftype_v2di_v2di_unsigned_unsigned, IX86_BUILTIN_INSERTQI);
16040 def_builtin (MASK_SSE4A, "__builtin_ia32_insertq",
16041 v2di_ftype_v2di_v2di, IX86_BUILTIN_INSERTQ);
16042
15807 /* Access to the vec_init patterns. */
15808 ftype = build_function_type_list (V2SI_type_node, integer_type_node,
15809 integer_type_node, NULL_TREE);
15810 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v2si",
15811 ftype, IX86_BUILTIN_VEC_INIT_V2SI);
15812
15813 ftype = build_function_type_list (V4HI_type_node, short_integer_type_node,
15814 short_integer_type_node,

--- 480 unchanged lines hidden (view full) ---

16295 enum machine_mode mode ATTRIBUTE_UNUSED,
16296 int ignore ATTRIBUTE_UNUSED)
16297{
16298 const struct builtin_description *d;
16299 size_t i;
16300 enum insn_code icode;
16301 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
16302 tree arglist = TREE_OPERAND (exp, 1);
16043 /* Access to the vec_init patterns. */
16044 ftype = build_function_type_list (V2SI_type_node, integer_type_node,
16045 integer_type_node, NULL_TREE);
16046 def_builtin (MASK_MMX, "__builtin_ia32_vec_init_v2si",
16047 ftype, IX86_BUILTIN_VEC_INIT_V2SI);
16048
16049 ftype = build_function_type_list (V4HI_type_node, short_integer_type_node,
16050 short_integer_type_node,

--- 480 unchanged lines hidden (view full) ---

16531 enum machine_mode mode ATTRIBUTE_UNUSED,
16532 int ignore ATTRIBUTE_UNUSED)
16533{
16534 const struct builtin_description *d;
16535 size_t i;
16536 enum insn_code icode;
16537 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
16538 tree arglist = TREE_OPERAND (exp, 1);
16303 tree arg0, arg1, arg2;
16304 rtx op0, op1, op2, pat;
16305 enum machine_mode tmode, mode0, mode1, mode2, mode3;
16539 tree arg0, arg1, arg2, arg3;
16540 rtx op0, op1, op2, op3, pat;
16541 enum machine_mode tmode, mode0, mode1, mode2, mode3, mode4;
16306 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
16307
16308 switch (fcode)
16309 {
16310 case IX86_BUILTIN_EMMS:
16311 emit_insn (gen_mmx_emms ());
16312 return 0;
16313

--- 499 unchanged lines hidden (view full) ---

16813 target = gen_reg_rtx (mode);
16814 pat = GEN_FCN (icode) (simplify_gen_subreg (tmode, target, mode, 0),
16815 op0, op1, op2);
16816 if (! pat)
16817 return 0;
16818 emit_insn (pat);
16819 return target;
16820
16542 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
16543
16544 switch (fcode)
16545 {
16546 case IX86_BUILTIN_EMMS:
16547 emit_insn (gen_mmx_emms ());
16548 return 0;
16549

--- 499 unchanged lines hidden (view full) ---

17049 target = gen_reg_rtx (mode);
17050 pat = GEN_FCN (icode) (simplify_gen_subreg (tmode, target, mode, 0),
17051 op0, op1, op2);
17052 if (! pat)
17053 return 0;
17054 emit_insn (pat);
17055 return target;
17056
17057 case IX86_BUILTIN_MOVNTSD:
17058 return ix86_expand_store_builtin (CODE_FOR_sse4a_vmmovntv2df, arglist);
17059
17060 case IX86_BUILTIN_MOVNTSS:
17061 return ix86_expand_store_builtin (CODE_FOR_sse4a_vmmovntv4sf, arglist);
17062
17063 case IX86_BUILTIN_INSERTQ:
17064 case IX86_BUILTIN_EXTRQ:
17065 icode = (fcode == IX86_BUILTIN_EXTRQ
17066 ? CODE_FOR_sse4a_extrq
17067 : CODE_FOR_sse4a_insertq);
17068 arg0 = TREE_VALUE (arglist);
17069 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17070 op0 = expand_normal (arg0);
17071 op1 = expand_normal (arg1);
17072 tmode = insn_data[icode].operand[0].mode;
17073 mode1 = insn_data[icode].operand[1].mode;
17074 mode2 = insn_data[icode].operand[2].mode;
17075 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
17076 op0 = copy_to_mode_reg (mode1, op0);
17077 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
17078 op1 = copy_to_mode_reg (mode2, op1);
17079 if (optimize || target == 0
17080 || GET_MODE (target) != tmode
17081 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
17082 target = gen_reg_rtx (tmode);
17083 pat = GEN_FCN (icode) (target, op0, op1);
17084 if (! pat)
17085 return NULL_RTX;
17086 emit_insn (pat);
17087 return target;
17088
17089 case IX86_BUILTIN_EXTRQI:
17090 icode = CODE_FOR_sse4a_extrqi;
17091 arg0 = TREE_VALUE (arglist);
17092 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17093 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
17094 op0 = expand_normal (arg0);
17095 op1 = expand_normal (arg1);
17096 op2 = expand_normal (arg2);
17097 tmode = insn_data[icode].operand[0].mode;
17098 mode1 = insn_data[icode].operand[1].mode;
17099 mode2 = insn_data[icode].operand[2].mode;
17100 mode3 = insn_data[icode].operand[3].mode;
17101 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
17102 op0 = copy_to_mode_reg (mode1, op0);
17103 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
17104 {
17105 error ("index mask must be an immediate");
17106 return gen_reg_rtx (tmode);
17107 }
17108 if (! (*insn_data[icode].operand[3].predicate) (op2, mode3))
17109 {
17110 error ("length mask must be an immediate");
17111 return gen_reg_rtx (tmode);
17112 }
17113 if (optimize || target == 0
17114 || GET_MODE (target) != tmode
17115 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
17116 target = gen_reg_rtx (tmode);
17117 pat = GEN_FCN (icode) (target, op0, op1, op2);
17118 if (! pat)
17119 return NULL_RTX;
17120 emit_insn (pat);
17121 return target;
17122
17123 case IX86_BUILTIN_INSERTQI:
17124 icode = CODE_FOR_sse4a_insertqi;
17125 arg0 = TREE_VALUE (arglist);
17126 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
17127 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
17128 arg3 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist))));
17129 op0 = expand_normal (arg0);
17130 op1 = expand_normal (arg1);
17131 op2 = expand_normal (arg2);
17132 op3 = expand_normal (arg3);
17133 tmode = insn_data[icode].operand[0].mode;
17134 mode1 = insn_data[icode].operand[1].mode;
17135 mode2 = insn_data[icode].operand[2].mode;
17136 mode3 = insn_data[icode].operand[3].mode;
17137 mode4 = insn_data[icode].operand[4].mode;
17138
17139 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
17140 op0 = copy_to_mode_reg (mode1, op0);
17141
17142 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
17143 op1 = copy_to_mode_reg (mode2, op1);
17144
17145 if (! (*insn_data[icode].operand[3].predicate) (op2, mode3))
17146 {
17147 error ("index mask must be an immediate");
17148 return gen_reg_rtx (tmode);
17149 }
17150 if (! (*insn_data[icode].operand[4].predicate) (op3, mode4))
17151 {
17152 error ("length mask must be an immediate");
17153 return gen_reg_rtx (tmode);
17154 }
17155 if (optimize || target == 0
17156 || GET_MODE (target) != tmode
17157 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
17158 target = gen_reg_rtx (tmode);
17159 pat = GEN_FCN (icode) (target, op0, op1, op2, op3);
17160 if (! pat)
17161 return NULL_RTX;
17162 emit_insn (pat);
17163 return target;
17164
16821 case IX86_BUILTIN_VEC_INIT_V2SI:
16822 case IX86_BUILTIN_VEC_INIT_V4HI:
16823 case IX86_BUILTIN_VEC_INIT_V8QI:
16824 return ix86_expand_vec_init_builtin (TREE_TYPE (exp), arglist, target);
16825
16826 case IX86_BUILTIN_VEC_EXT_V2DF:
16827 case IX86_BUILTIN_VEC_EXT_V2DI:
16828 case IX86_BUILTIN_VEC_EXT_V4SF:

--- 2671 unchanged lines hidden ---
17165 case IX86_BUILTIN_VEC_INIT_V2SI:
17166 case IX86_BUILTIN_VEC_INIT_V4HI:
17167 case IX86_BUILTIN_VEC_INIT_V8QI:
17168 return ix86_expand_vec_init_builtin (TREE_TYPE (exp), arglist, target);
17169
17170 case IX86_BUILTIN_VEC_EXT_V2DF:
17171 case IX86_BUILTIN_VEC_EXT_V2DI:
17172 case IX86_BUILTIN_VEC_EXT_V4SF:

--- 2671 unchanged lines hidden ---