TargetLoweringBase.cpp revision 302408
1//===-- TargetLoweringBase.cpp - Implement the TargetLoweringBase class ---===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This implements the TargetLoweringBase class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/Target/TargetLowering.h"
15#include "llvm/ADT/BitVector.h"
16#include "llvm/ADT/STLExtras.h"
17#include "llvm/ADT/Triple.h"
18#include "llvm/CodeGen/Analysis.h"
19#include "llvm/CodeGen/MachineFrameInfo.h"
20#include "llvm/CodeGen/MachineFunction.h"
21#include "llvm/CodeGen/MachineInstrBuilder.h"
22#include "llvm/CodeGen/MachineJumpTableInfo.h"
23#include "llvm/CodeGen/StackMaps.h"
24#include "llvm/IR/DataLayout.h"
25#include "llvm/IR/DerivedTypes.h"
26#include "llvm/IR/GlobalVariable.h"
27#include "llvm/IR/Mangler.h"
28#include "llvm/MC/MCAsmInfo.h"
29#include "llvm/MC/MCContext.h"
30#include "llvm/MC/MCExpr.h"
31#include "llvm/Support/CommandLine.h"
32#include "llvm/Support/ErrorHandling.h"
33#include "llvm/Support/MathExtras.h"
34#include "llvm/Target/TargetLoweringObjectFile.h"
35#include "llvm/Target/TargetMachine.h"
36#include "llvm/Target/TargetRegisterInfo.h"
37#include "llvm/Target/TargetSubtargetInfo.h"
38#include <cctype>
39using namespace llvm;
40
41static cl::opt<bool> JumpIsExpensiveOverride(
42    "jump-is-expensive", cl::init(false),
43    cl::desc("Do not create extra branches to split comparison logic."),
44    cl::Hidden);
45
46/// InitLibcallNames - Set default libcall names.
47///
48static void InitLibcallNames(const char **Names, const Triple &TT) {
49  Names[RTLIB::SHL_I16] = "__ashlhi3";
50  Names[RTLIB::SHL_I32] = "__ashlsi3";
51  Names[RTLIB::SHL_I64] = "__ashldi3";
52  Names[RTLIB::SHL_I128] = "__ashlti3";
53  Names[RTLIB::SRL_I16] = "__lshrhi3";
54  Names[RTLIB::SRL_I32] = "__lshrsi3";
55  Names[RTLIB::SRL_I64] = "__lshrdi3";
56  Names[RTLIB::SRL_I128] = "__lshrti3";
57  Names[RTLIB::SRA_I16] = "__ashrhi3";
58  Names[RTLIB::SRA_I32] = "__ashrsi3";
59  Names[RTLIB::SRA_I64] = "__ashrdi3";
60  Names[RTLIB::SRA_I128] = "__ashrti3";
61  Names[RTLIB::MUL_I8] = "__mulqi3";
62  Names[RTLIB::MUL_I16] = "__mulhi3";
63  Names[RTLIB::MUL_I32] = "__mulsi3";
64  Names[RTLIB::MUL_I64] = "__muldi3";
65  Names[RTLIB::MUL_I128] = "__multi3";
66  Names[RTLIB::MULO_I32] = "__mulosi4";
67  Names[RTLIB::MULO_I64] = "__mulodi4";
68  Names[RTLIB::MULO_I128] = "__muloti4";
69  Names[RTLIB::SDIV_I8] = "__divqi3";
70  Names[RTLIB::SDIV_I16] = "__divhi3";
71  Names[RTLIB::SDIV_I32] = "__divsi3";
72  Names[RTLIB::SDIV_I64] = "__divdi3";
73  Names[RTLIB::SDIV_I128] = "__divti3";
74  Names[RTLIB::UDIV_I8] = "__udivqi3";
75  Names[RTLIB::UDIV_I16] = "__udivhi3";
76  Names[RTLIB::UDIV_I32] = "__udivsi3";
77  Names[RTLIB::UDIV_I64] = "__udivdi3";
78  Names[RTLIB::UDIV_I128] = "__udivti3";
79  Names[RTLIB::SREM_I8] = "__modqi3";
80  Names[RTLIB::SREM_I16] = "__modhi3";
81  Names[RTLIB::SREM_I32] = "__modsi3";
82  Names[RTLIB::SREM_I64] = "__moddi3";
83  Names[RTLIB::SREM_I128] = "__modti3";
84  Names[RTLIB::UREM_I8] = "__umodqi3";
85  Names[RTLIB::UREM_I16] = "__umodhi3";
86  Names[RTLIB::UREM_I32] = "__umodsi3";
87  Names[RTLIB::UREM_I64] = "__umoddi3";
88  Names[RTLIB::UREM_I128] = "__umodti3";
89
90  // These are generally not available.
91  Names[RTLIB::SDIVREM_I8] = nullptr;
92  Names[RTLIB::SDIVREM_I16] = nullptr;
93  Names[RTLIB::SDIVREM_I32] = nullptr;
94  Names[RTLIB::SDIVREM_I64] = nullptr;
95  Names[RTLIB::SDIVREM_I128] = nullptr;
96  Names[RTLIB::UDIVREM_I8] = nullptr;
97  Names[RTLIB::UDIVREM_I16] = nullptr;
98  Names[RTLIB::UDIVREM_I32] = nullptr;
99  Names[RTLIB::UDIVREM_I64] = nullptr;
100  Names[RTLIB::UDIVREM_I128] = nullptr;
101
102  Names[RTLIB::NEG_I32] = "__negsi2";
103  Names[RTLIB::NEG_I64] = "__negdi2";
104  Names[RTLIB::ADD_F32] = "__addsf3";
105  Names[RTLIB::ADD_F64] = "__adddf3";
106  Names[RTLIB::ADD_F80] = "__addxf3";
107  Names[RTLIB::ADD_F128] = "__addtf3";
108  Names[RTLIB::ADD_PPCF128] = "__gcc_qadd";
109  Names[RTLIB::SUB_F32] = "__subsf3";
110  Names[RTLIB::SUB_F64] = "__subdf3";
111  Names[RTLIB::SUB_F80] = "__subxf3";
112  Names[RTLIB::SUB_F128] = "__subtf3";
113  Names[RTLIB::SUB_PPCF128] = "__gcc_qsub";
114  Names[RTLIB::MUL_F32] = "__mulsf3";
115  Names[RTLIB::MUL_F64] = "__muldf3";
116  Names[RTLIB::MUL_F80] = "__mulxf3";
117  Names[RTLIB::MUL_F128] = "__multf3";
118  Names[RTLIB::MUL_PPCF128] = "__gcc_qmul";
119  Names[RTLIB::DIV_F32] = "__divsf3";
120  Names[RTLIB::DIV_F64] = "__divdf3";
121  Names[RTLIB::DIV_F80] = "__divxf3";
122  Names[RTLIB::DIV_F128] = "__divtf3";
123  Names[RTLIB::DIV_PPCF128] = "__gcc_qdiv";
124  Names[RTLIB::REM_F32] = "fmodf";
125  Names[RTLIB::REM_F64] = "fmod";
126  Names[RTLIB::REM_F80] = "fmodl";
127  Names[RTLIB::REM_F128] = "fmodl";
128  Names[RTLIB::REM_PPCF128] = "fmodl";
129  Names[RTLIB::FMA_F32] = "fmaf";
130  Names[RTLIB::FMA_F64] = "fma";
131  Names[RTLIB::FMA_F80] = "fmal";
132  Names[RTLIB::FMA_F128] = "fmal";
133  Names[RTLIB::FMA_PPCF128] = "fmal";
134  Names[RTLIB::POWI_F32] = "__powisf2";
135  Names[RTLIB::POWI_F64] = "__powidf2";
136  Names[RTLIB::POWI_F80] = "__powixf2";
137  Names[RTLIB::POWI_F128] = "__powitf2";
138  Names[RTLIB::POWI_PPCF128] = "__powitf2";
139  Names[RTLIB::SQRT_F32] = "sqrtf";
140  Names[RTLIB::SQRT_F64] = "sqrt";
141  Names[RTLIB::SQRT_F80] = "sqrtl";
142  Names[RTLIB::SQRT_F128] = "sqrtl";
143  Names[RTLIB::SQRT_PPCF128] = "sqrtl";
144  Names[RTLIB::LOG_F32] = "logf";
145  Names[RTLIB::LOG_F64] = "log";
146  Names[RTLIB::LOG_F80] = "logl";
147  Names[RTLIB::LOG_F128] = "logl";
148  Names[RTLIB::LOG_PPCF128] = "logl";
149  Names[RTLIB::LOG2_F32] = "log2f";
150  Names[RTLIB::LOG2_F64] = "log2";
151  Names[RTLIB::LOG2_F80] = "log2l";
152  Names[RTLIB::LOG2_F128] = "log2l";
153  Names[RTLIB::LOG2_PPCF128] = "log2l";
154  Names[RTLIB::LOG10_F32] = "log10f";
155  Names[RTLIB::LOG10_F64] = "log10";
156  Names[RTLIB::LOG10_F80] = "log10l";
157  Names[RTLIB::LOG10_F128] = "log10l";
158  Names[RTLIB::LOG10_PPCF128] = "log10l";
159  Names[RTLIB::EXP_F32] = "expf";
160  Names[RTLIB::EXP_F64] = "exp";
161  Names[RTLIB::EXP_F80] = "expl";
162  Names[RTLIB::EXP_F128] = "expl";
163  Names[RTLIB::EXP_PPCF128] = "expl";
164  Names[RTLIB::EXP2_F32] = "exp2f";
165  Names[RTLIB::EXP2_F64] = "exp2";
166  Names[RTLIB::EXP2_F80] = "exp2l";
167  Names[RTLIB::EXP2_F128] = "exp2l";
168  Names[RTLIB::EXP2_PPCF128] = "exp2l";
169  Names[RTLIB::SIN_F32] = "sinf";
170  Names[RTLIB::SIN_F64] = "sin";
171  Names[RTLIB::SIN_F80] = "sinl";
172  Names[RTLIB::SIN_F128] = "sinl";
173  Names[RTLIB::SIN_PPCF128] = "sinl";
174  Names[RTLIB::COS_F32] = "cosf";
175  Names[RTLIB::COS_F64] = "cos";
176  Names[RTLIB::COS_F80] = "cosl";
177  Names[RTLIB::COS_F128] = "cosl";
178  Names[RTLIB::COS_PPCF128] = "cosl";
179  Names[RTLIB::POW_F32] = "powf";
180  Names[RTLIB::POW_F64] = "pow";
181  Names[RTLIB::POW_F80] = "powl";
182  Names[RTLIB::POW_F128] = "powl";
183  Names[RTLIB::POW_PPCF128] = "powl";
184  Names[RTLIB::CEIL_F32] = "ceilf";
185  Names[RTLIB::CEIL_F64] = "ceil";
186  Names[RTLIB::CEIL_F80] = "ceill";
187  Names[RTLIB::CEIL_F128] = "ceill";
188  Names[RTLIB::CEIL_PPCF128] = "ceill";
189  Names[RTLIB::TRUNC_F32] = "truncf";
190  Names[RTLIB::TRUNC_F64] = "trunc";
191  Names[RTLIB::TRUNC_F80] = "truncl";
192  Names[RTLIB::TRUNC_F128] = "truncl";
193  Names[RTLIB::TRUNC_PPCF128] = "truncl";
194  Names[RTLIB::RINT_F32] = "rintf";
195  Names[RTLIB::RINT_F64] = "rint";
196  Names[RTLIB::RINT_F80] = "rintl";
197  Names[RTLIB::RINT_F128] = "rintl";
198  Names[RTLIB::RINT_PPCF128] = "rintl";
199  Names[RTLIB::NEARBYINT_F32] = "nearbyintf";
200  Names[RTLIB::NEARBYINT_F64] = "nearbyint";
201  Names[RTLIB::NEARBYINT_F80] = "nearbyintl";
202  Names[RTLIB::NEARBYINT_F128] = "nearbyintl";
203  Names[RTLIB::NEARBYINT_PPCF128] = "nearbyintl";
204  Names[RTLIB::ROUND_F32] = "roundf";
205  Names[RTLIB::ROUND_F64] = "round";
206  Names[RTLIB::ROUND_F80] = "roundl";
207  Names[RTLIB::ROUND_F128] = "roundl";
208  Names[RTLIB::ROUND_PPCF128] = "roundl";
209  Names[RTLIB::FLOOR_F32] = "floorf";
210  Names[RTLIB::FLOOR_F64] = "floor";
211  Names[RTLIB::FLOOR_F80] = "floorl";
212  Names[RTLIB::FLOOR_F128] = "floorl";
213  Names[RTLIB::FLOOR_PPCF128] = "floorl";
214  Names[RTLIB::FMIN_F32] = "fminf";
215  Names[RTLIB::FMIN_F64] = "fmin";
216  Names[RTLIB::FMIN_F80] = "fminl";
217  Names[RTLIB::FMIN_F128] = "fminl";
218  Names[RTLIB::FMIN_PPCF128] = "fminl";
219  Names[RTLIB::FMAX_F32] = "fmaxf";
220  Names[RTLIB::FMAX_F64] = "fmax";
221  Names[RTLIB::FMAX_F80] = "fmaxl";
222  Names[RTLIB::FMAX_F128] = "fmaxl";
223  Names[RTLIB::FMAX_PPCF128] = "fmaxl";
224  Names[RTLIB::ROUND_F32] = "roundf";
225  Names[RTLIB::ROUND_F64] = "round";
226  Names[RTLIB::ROUND_F80] = "roundl";
227  Names[RTLIB::ROUND_F128] = "roundl";
228  Names[RTLIB::ROUND_PPCF128] = "roundl";
229  Names[RTLIB::COPYSIGN_F32] = "copysignf";
230  Names[RTLIB::COPYSIGN_F64] = "copysign";
231  Names[RTLIB::COPYSIGN_F80] = "copysignl";
232  Names[RTLIB::COPYSIGN_F128] = "copysignl";
233  Names[RTLIB::COPYSIGN_PPCF128] = "copysignl";
234  Names[RTLIB::FPEXT_F64_F128] = "__extenddftf2";
235  Names[RTLIB::FPEXT_F32_F128] = "__extendsftf2";
236  Names[RTLIB::FPEXT_F32_F64] = "__extendsfdf2";
237  Names[RTLIB::FPEXT_F16_F32] = "__gnu_h2f_ieee";
238  Names[RTLIB::FPROUND_F32_F16] = "__gnu_f2h_ieee";
239  Names[RTLIB::FPROUND_F64_F16] = "__truncdfhf2";
240  Names[RTLIB::FPROUND_F80_F16] = "__truncxfhf2";
241  Names[RTLIB::FPROUND_F128_F16] = "__trunctfhf2";
242  Names[RTLIB::FPROUND_PPCF128_F16] = "__trunctfhf2";
243  Names[RTLIB::FPROUND_F64_F32] = "__truncdfsf2";
244  Names[RTLIB::FPROUND_F80_F32] = "__truncxfsf2";
245  Names[RTLIB::FPROUND_F128_F32] = "__trunctfsf2";
246  Names[RTLIB::FPROUND_PPCF128_F32] = "__trunctfsf2";
247  Names[RTLIB::FPROUND_F80_F64] = "__truncxfdf2";
248  Names[RTLIB::FPROUND_F128_F64] = "__trunctfdf2";
249  Names[RTLIB::FPROUND_PPCF128_F64] = "__trunctfdf2";
250  Names[RTLIB::FPTOSINT_F32_I32] = "__fixsfsi";
251  Names[RTLIB::FPTOSINT_F32_I64] = "__fixsfdi";
252  Names[RTLIB::FPTOSINT_F32_I128] = "__fixsfti";
253  Names[RTLIB::FPTOSINT_F64_I32] = "__fixdfsi";
254  Names[RTLIB::FPTOSINT_F64_I64] = "__fixdfdi";
255  Names[RTLIB::FPTOSINT_F64_I128] = "__fixdfti";
256  Names[RTLIB::FPTOSINT_F80_I32] = "__fixxfsi";
257  Names[RTLIB::FPTOSINT_F80_I64] = "__fixxfdi";
258  Names[RTLIB::FPTOSINT_F80_I128] = "__fixxfti";
259  Names[RTLIB::FPTOSINT_F128_I32] = "__fixtfsi";
260  Names[RTLIB::FPTOSINT_F128_I64] = "__fixtfdi";
261  Names[RTLIB::FPTOSINT_F128_I128] = "__fixtfti";
262  Names[RTLIB::FPTOSINT_PPCF128_I32] = "__fixtfsi";
263  Names[RTLIB::FPTOSINT_PPCF128_I64] = "__fixtfdi";
264  Names[RTLIB::FPTOSINT_PPCF128_I128] = "__fixtfti";
265  Names[RTLIB::FPTOUINT_F32_I32] = "__fixunssfsi";
266  Names[RTLIB::FPTOUINT_F32_I64] = "__fixunssfdi";
267  Names[RTLIB::FPTOUINT_F32_I128] = "__fixunssfti";
268  Names[RTLIB::FPTOUINT_F64_I32] = "__fixunsdfsi";
269  Names[RTLIB::FPTOUINT_F64_I64] = "__fixunsdfdi";
270  Names[RTLIB::FPTOUINT_F64_I128] = "__fixunsdfti";
271  Names[RTLIB::FPTOUINT_F80_I32] = "__fixunsxfsi";
272  Names[RTLIB::FPTOUINT_F80_I64] = "__fixunsxfdi";
273  Names[RTLIB::FPTOUINT_F80_I128] = "__fixunsxfti";
274  Names[RTLIB::FPTOUINT_F128_I32] = "__fixunstfsi";
275  Names[RTLIB::FPTOUINT_F128_I64] = "__fixunstfdi";
276  Names[RTLIB::FPTOUINT_F128_I128] = "__fixunstfti";
277  Names[RTLIB::FPTOUINT_PPCF128_I32] = "__fixunstfsi";
278  Names[RTLIB::FPTOUINT_PPCF128_I64] = "__fixunstfdi";
279  Names[RTLIB::FPTOUINT_PPCF128_I128] = "__fixunstfti";
280  Names[RTLIB::SINTTOFP_I32_F32] = "__floatsisf";
281  Names[RTLIB::SINTTOFP_I32_F64] = "__floatsidf";
282  Names[RTLIB::SINTTOFP_I32_F80] = "__floatsixf";
283  Names[RTLIB::SINTTOFP_I32_F128] = "__floatsitf";
284  Names[RTLIB::SINTTOFP_I32_PPCF128] = "__floatsitf";
285  Names[RTLIB::SINTTOFP_I64_F32] = "__floatdisf";
286  Names[RTLIB::SINTTOFP_I64_F64] = "__floatdidf";
287  Names[RTLIB::SINTTOFP_I64_F80] = "__floatdixf";
288  Names[RTLIB::SINTTOFP_I64_F128] = "__floatditf";
289  Names[RTLIB::SINTTOFP_I64_PPCF128] = "__floatditf";
290  Names[RTLIB::SINTTOFP_I128_F32] = "__floattisf";
291  Names[RTLIB::SINTTOFP_I128_F64] = "__floattidf";
292  Names[RTLIB::SINTTOFP_I128_F80] = "__floattixf";
293  Names[RTLIB::SINTTOFP_I128_F128] = "__floattitf";
294  Names[RTLIB::SINTTOFP_I128_PPCF128] = "__floattitf";
295  Names[RTLIB::UINTTOFP_I32_F32] = "__floatunsisf";
296  Names[RTLIB::UINTTOFP_I32_F64] = "__floatunsidf";
297  Names[RTLIB::UINTTOFP_I32_F80] = "__floatunsixf";
298  Names[RTLIB::UINTTOFP_I32_F128] = "__floatunsitf";
299  Names[RTLIB::UINTTOFP_I32_PPCF128] = "__floatunsitf";
300  Names[RTLIB::UINTTOFP_I64_F32] = "__floatundisf";
301  Names[RTLIB::UINTTOFP_I64_F64] = "__floatundidf";
302  Names[RTLIB::UINTTOFP_I64_F80] = "__floatundixf";
303  Names[RTLIB::UINTTOFP_I64_F128] = "__floatunditf";
304  Names[RTLIB::UINTTOFP_I64_PPCF128] = "__floatunditf";
305  Names[RTLIB::UINTTOFP_I128_F32] = "__floatuntisf";
306  Names[RTLIB::UINTTOFP_I128_F64] = "__floatuntidf";
307  Names[RTLIB::UINTTOFP_I128_F80] = "__floatuntixf";
308  Names[RTLIB::UINTTOFP_I128_F128] = "__floatuntitf";
309  Names[RTLIB::UINTTOFP_I128_PPCF128] = "__floatuntitf";
310  Names[RTLIB::OEQ_F32] = "__eqsf2";
311  Names[RTLIB::OEQ_F64] = "__eqdf2";
312  Names[RTLIB::OEQ_F128] = "__eqtf2";
313  Names[RTLIB::UNE_F32] = "__nesf2";
314  Names[RTLIB::UNE_F64] = "__nedf2";
315  Names[RTLIB::UNE_F128] = "__netf2";
316  Names[RTLIB::OGE_F32] = "__gesf2";
317  Names[RTLIB::OGE_F64] = "__gedf2";
318  Names[RTLIB::OGE_F128] = "__getf2";
319  Names[RTLIB::OLT_F32] = "__ltsf2";
320  Names[RTLIB::OLT_F64] = "__ltdf2";
321  Names[RTLIB::OLT_F128] = "__lttf2";
322  Names[RTLIB::OLE_F32] = "__lesf2";
323  Names[RTLIB::OLE_F64] = "__ledf2";
324  Names[RTLIB::OLE_F128] = "__letf2";
325  Names[RTLIB::OGT_F32] = "__gtsf2";
326  Names[RTLIB::OGT_F64] = "__gtdf2";
327  Names[RTLIB::OGT_F128] = "__gttf2";
328  Names[RTLIB::UO_F32] = "__unordsf2";
329  Names[RTLIB::UO_F64] = "__unorddf2";
330  Names[RTLIB::UO_F128] = "__unordtf2";
331  Names[RTLIB::O_F32] = "__unordsf2";
332  Names[RTLIB::O_F64] = "__unorddf2";
333  Names[RTLIB::O_F128] = "__unordtf2";
334  Names[RTLIB::MEMCPY] = "memcpy";
335  Names[RTLIB::MEMMOVE] = "memmove";
336  Names[RTLIB::MEMSET] = "memset";
337  Names[RTLIB::UNWIND_RESUME] = "_Unwind_Resume";
338  Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_1] = "__sync_val_compare_and_swap_1";
339  Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_2] = "__sync_val_compare_and_swap_2";
340  Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_4] = "__sync_val_compare_and_swap_4";
341  Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_8] = "__sync_val_compare_and_swap_8";
342  Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_16] = "__sync_val_compare_and_swap_16";
343  Names[RTLIB::SYNC_LOCK_TEST_AND_SET_1] = "__sync_lock_test_and_set_1";
344  Names[RTLIB::SYNC_LOCK_TEST_AND_SET_2] = "__sync_lock_test_and_set_2";
345  Names[RTLIB::SYNC_LOCK_TEST_AND_SET_4] = "__sync_lock_test_and_set_4";
346  Names[RTLIB::SYNC_LOCK_TEST_AND_SET_8] = "__sync_lock_test_and_set_8";
347  Names[RTLIB::SYNC_LOCK_TEST_AND_SET_16] = "__sync_lock_test_and_set_16";
348  Names[RTLIB::SYNC_FETCH_AND_ADD_1] = "__sync_fetch_and_add_1";
349  Names[RTLIB::SYNC_FETCH_AND_ADD_2] = "__sync_fetch_and_add_2";
350  Names[RTLIB::SYNC_FETCH_AND_ADD_4] = "__sync_fetch_and_add_4";
351  Names[RTLIB::SYNC_FETCH_AND_ADD_8] = "__sync_fetch_and_add_8";
352  Names[RTLIB::SYNC_FETCH_AND_ADD_16] = "__sync_fetch_and_add_16";
353  Names[RTLIB::SYNC_FETCH_AND_SUB_1] = "__sync_fetch_and_sub_1";
354  Names[RTLIB::SYNC_FETCH_AND_SUB_2] = "__sync_fetch_and_sub_2";
355  Names[RTLIB::SYNC_FETCH_AND_SUB_4] = "__sync_fetch_and_sub_4";
356  Names[RTLIB::SYNC_FETCH_AND_SUB_8] = "__sync_fetch_and_sub_8";
357  Names[RTLIB::SYNC_FETCH_AND_SUB_16] = "__sync_fetch_and_sub_16";
358  Names[RTLIB::SYNC_FETCH_AND_AND_1] = "__sync_fetch_and_and_1";
359  Names[RTLIB::SYNC_FETCH_AND_AND_2] = "__sync_fetch_and_and_2";
360  Names[RTLIB::SYNC_FETCH_AND_AND_4] = "__sync_fetch_and_and_4";
361  Names[RTLIB::SYNC_FETCH_AND_AND_8] = "__sync_fetch_and_and_8";
362  Names[RTLIB::SYNC_FETCH_AND_AND_16] = "__sync_fetch_and_and_16";
363  Names[RTLIB::SYNC_FETCH_AND_OR_1] = "__sync_fetch_and_or_1";
364  Names[RTLIB::SYNC_FETCH_AND_OR_2] = "__sync_fetch_and_or_2";
365  Names[RTLIB::SYNC_FETCH_AND_OR_4] = "__sync_fetch_and_or_4";
366  Names[RTLIB::SYNC_FETCH_AND_OR_8] = "__sync_fetch_and_or_8";
367  Names[RTLIB::SYNC_FETCH_AND_OR_16] = "__sync_fetch_and_or_16";
368  Names[RTLIB::SYNC_FETCH_AND_XOR_1] = "__sync_fetch_and_xor_1";
369  Names[RTLIB::SYNC_FETCH_AND_XOR_2] = "__sync_fetch_and_xor_2";
370  Names[RTLIB::SYNC_FETCH_AND_XOR_4] = "__sync_fetch_and_xor_4";
371  Names[RTLIB::SYNC_FETCH_AND_XOR_8] = "__sync_fetch_and_xor_8";
372  Names[RTLIB::SYNC_FETCH_AND_XOR_16] = "__sync_fetch_and_xor_16";
373  Names[RTLIB::SYNC_FETCH_AND_NAND_1] = "__sync_fetch_and_nand_1";
374  Names[RTLIB::SYNC_FETCH_AND_NAND_2] = "__sync_fetch_and_nand_2";
375  Names[RTLIB::SYNC_FETCH_AND_NAND_4] = "__sync_fetch_and_nand_4";
376  Names[RTLIB::SYNC_FETCH_AND_NAND_8] = "__sync_fetch_and_nand_8";
377  Names[RTLIB::SYNC_FETCH_AND_NAND_16] = "__sync_fetch_and_nand_16";
378  Names[RTLIB::SYNC_FETCH_AND_MAX_1] = "__sync_fetch_and_max_1";
379  Names[RTLIB::SYNC_FETCH_AND_MAX_2] = "__sync_fetch_and_max_2";
380  Names[RTLIB::SYNC_FETCH_AND_MAX_4] = "__sync_fetch_and_max_4";
381  Names[RTLIB::SYNC_FETCH_AND_MAX_8] = "__sync_fetch_and_max_8";
382  Names[RTLIB::SYNC_FETCH_AND_MAX_16] = "__sync_fetch_and_max_16";
383  Names[RTLIB::SYNC_FETCH_AND_UMAX_1] = "__sync_fetch_and_umax_1";
384  Names[RTLIB::SYNC_FETCH_AND_UMAX_2] = "__sync_fetch_and_umax_2";
385  Names[RTLIB::SYNC_FETCH_AND_UMAX_4] = "__sync_fetch_and_umax_4";
386  Names[RTLIB::SYNC_FETCH_AND_UMAX_8] = "__sync_fetch_and_umax_8";
387  Names[RTLIB::SYNC_FETCH_AND_UMAX_16] = "__sync_fetch_and_umax_16";
388  Names[RTLIB::SYNC_FETCH_AND_MIN_1] = "__sync_fetch_and_min_1";
389  Names[RTLIB::SYNC_FETCH_AND_MIN_2] = "__sync_fetch_and_min_2";
390  Names[RTLIB::SYNC_FETCH_AND_MIN_4] = "__sync_fetch_and_min_4";
391  Names[RTLIB::SYNC_FETCH_AND_MIN_8] = "__sync_fetch_and_min_8";
392  Names[RTLIB::SYNC_FETCH_AND_MIN_16] = "__sync_fetch_and_min_16";
393  Names[RTLIB::SYNC_FETCH_AND_UMIN_1] = "__sync_fetch_and_umin_1";
394  Names[RTLIB::SYNC_FETCH_AND_UMIN_2] = "__sync_fetch_and_umin_2";
395  Names[RTLIB::SYNC_FETCH_AND_UMIN_4] = "__sync_fetch_and_umin_4";
396  Names[RTLIB::SYNC_FETCH_AND_UMIN_8] = "__sync_fetch_and_umin_8";
397  Names[RTLIB::SYNC_FETCH_AND_UMIN_16] = "__sync_fetch_and_umin_16";
398
399  if (TT.getEnvironment() == Triple::GNU) {
400    Names[RTLIB::SINCOS_F32] = "sincosf";
401    Names[RTLIB::SINCOS_F64] = "sincos";
402    Names[RTLIB::SINCOS_F80] = "sincosl";
403    Names[RTLIB::SINCOS_F128] = "sincosl";
404    Names[RTLIB::SINCOS_PPCF128] = "sincosl";
405  } else {
406    // These are generally not available.
407    Names[RTLIB::SINCOS_F32] = nullptr;
408    Names[RTLIB::SINCOS_F64] = nullptr;
409    Names[RTLIB::SINCOS_F80] = nullptr;
410    Names[RTLIB::SINCOS_F128] = nullptr;
411    Names[RTLIB::SINCOS_PPCF128] = nullptr;
412  }
413
414  if (!TT.isOSOpenBSD()) {
415    Names[RTLIB::STACKPROTECTOR_CHECK_FAIL] = "__stack_chk_fail";
416  } else {
417    // These are generally not available.
418    Names[RTLIB::STACKPROTECTOR_CHECK_FAIL] = nullptr;
419  }
420
421  // For f16/f32 conversions, Darwin uses the standard naming scheme, instead
422  // of the gnueabi-style __gnu_*_ieee.
423  // FIXME: What about other targets?
424  if (TT.isOSDarwin()) {
425    Names[RTLIB::FPEXT_F16_F32] = "__extendhfsf2";
426    Names[RTLIB::FPROUND_F32_F16] = "__truncsfhf2";
427  }
428}
429
430/// InitLibcallCallingConvs - Set default libcall CallingConvs.
431///
432static void InitLibcallCallingConvs(CallingConv::ID *CCs) {
433  for (int i = 0; i < RTLIB::UNKNOWN_LIBCALL; ++i) {
434    CCs[i] = CallingConv::C;
435  }
436}
437
438/// getFPEXT - Return the FPEXT_*_* value for the given types, or
439/// UNKNOWN_LIBCALL if there is none.
440RTLIB::Libcall RTLIB::getFPEXT(EVT OpVT, EVT RetVT) {
441  if (OpVT == MVT::f16) {
442    if (RetVT == MVT::f32)
443      return FPEXT_F16_F32;
444  } else if (OpVT == MVT::f32) {
445    if (RetVT == MVT::f64)
446      return FPEXT_F32_F64;
447    if (RetVT == MVT::f128)
448      return FPEXT_F32_F128;
449  } else if (OpVT == MVT::f64) {
450    if (RetVT == MVT::f128)
451      return FPEXT_F64_F128;
452  }
453
454  return UNKNOWN_LIBCALL;
455}
456
457/// getFPROUND - Return the FPROUND_*_* value for the given types, or
458/// UNKNOWN_LIBCALL if there is none.
459RTLIB::Libcall RTLIB::getFPROUND(EVT OpVT, EVT RetVT) {
460  if (RetVT == MVT::f16) {
461    if (OpVT == MVT::f32)
462      return FPROUND_F32_F16;
463    if (OpVT == MVT::f64)
464      return FPROUND_F64_F16;
465    if (OpVT == MVT::f80)
466      return FPROUND_F80_F16;
467    if (OpVT == MVT::f128)
468      return FPROUND_F128_F16;
469    if (OpVT == MVT::ppcf128)
470      return FPROUND_PPCF128_F16;
471  } else if (RetVT == MVT::f32) {
472    if (OpVT == MVT::f64)
473      return FPROUND_F64_F32;
474    if (OpVT == MVT::f80)
475      return FPROUND_F80_F32;
476    if (OpVT == MVT::f128)
477      return FPROUND_F128_F32;
478    if (OpVT == MVT::ppcf128)
479      return FPROUND_PPCF128_F32;
480  } else if (RetVT == MVT::f64) {
481    if (OpVT == MVT::f80)
482      return FPROUND_F80_F64;
483    if (OpVT == MVT::f128)
484      return FPROUND_F128_F64;
485    if (OpVT == MVT::ppcf128)
486      return FPROUND_PPCF128_F64;
487  }
488
489  return UNKNOWN_LIBCALL;
490}
491
492/// getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or
493/// UNKNOWN_LIBCALL if there is none.
494RTLIB::Libcall RTLIB::getFPTOSINT(EVT OpVT, EVT RetVT) {
495  if (OpVT == MVT::f32) {
496    if (RetVT == MVT::i32)
497      return FPTOSINT_F32_I32;
498    if (RetVT == MVT::i64)
499      return FPTOSINT_F32_I64;
500    if (RetVT == MVT::i128)
501      return FPTOSINT_F32_I128;
502  } else if (OpVT == MVT::f64) {
503    if (RetVT == MVT::i32)
504      return FPTOSINT_F64_I32;
505    if (RetVT == MVT::i64)
506      return FPTOSINT_F64_I64;
507    if (RetVT == MVT::i128)
508      return FPTOSINT_F64_I128;
509  } else if (OpVT == MVT::f80) {
510    if (RetVT == MVT::i32)
511      return FPTOSINT_F80_I32;
512    if (RetVT == MVT::i64)
513      return FPTOSINT_F80_I64;
514    if (RetVT == MVT::i128)
515      return FPTOSINT_F80_I128;
516  } else if (OpVT == MVT::f128) {
517    if (RetVT == MVT::i32)
518      return FPTOSINT_F128_I32;
519    if (RetVT == MVT::i64)
520      return FPTOSINT_F128_I64;
521    if (RetVT == MVT::i128)
522      return FPTOSINT_F128_I128;
523  } else if (OpVT == MVT::ppcf128) {
524    if (RetVT == MVT::i32)
525      return FPTOSINT_PPCF128_I32;
526    if (RetVT == MVT::i64)
527      return FPTOSINT_PPCF128_I64;
528    if (RetVT == MVT::i128)
529      return FPTOSINT_PPCF128_I128;
530  }
531  return UNKNOWN_LIBCALL;
532}
533
534/// getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or
535/// UNKNOWN_LIBCALL if there is none.
536RTLIB::Libcall RTLIB::getFPTOUINT(EVT OpVT, EVT RetVT) {
537  if (OpVT == MVT::f32) {
538    if (RetVT == MVT::i32)
539      return FPTOUINT_F32_I32;
540    if (RetVT == MVT::i64)
541      return FPTOUINT_F32_I64;
542    if (RetVT == MVT::i128)
543      return FPTOUINT_F32_I128;
544  } else if (OpVT == MVT::f64) {
545    if (RetVT == MVT::i32)
546      return FPTOUINT_F64_I32;
547    if (RetVT == MVT::i64)
548      return FPTOUINT_F64_I64;
549    if (RetVT == MVT::i128)
550      return FPTOUINT_F64_I128;
551  } else if (OpVT == MVT::f80) {
552    if (RetVT == MVT::i32)
553      return FPTOUINT_F80_I32;
554    if (RetVT == MVT::i64)
555      return FPTOUINT_F80_I64;
556    if (RetVT == MVT::i128)
557      return FPTOUINT_F80_I128;
558  } else if (OpVT == MVT::f128) {
559    if (RetVT == MVT::i32)
560      return FPTOUINT_F128_I32;
561    if (RetVT == MVT::i64)
562      return FPTOUINT_F128_I64;
563    if (RetVT == MVT::i128)
564      return FPTOUINT_F128_I128;
565  } else if (OpVT == MVT::ppcf128) {
566    if (RetVT == MVT::i32)
567      return FPTOUINT_PPCF128_I32;
568    if (RetVT == MVT::i64)
569      return FPTOUINT_PPCF128_I64;
570    if (RetVT == MVT::i128)
571      return FPTOUINT_PPCF128_I128;
572  }
573  return UNKNOWN_LIBCALL;
574}
575
576/// getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or
577/// UNKNOWN_LIBCALL if there is none.
578RTLIB::Libcall RTLIB::getSINTTOFP(EVT OpVT, EVT RetVT) {
579  if (OpVT == MVT::i32) {
580    if (RetVT == MVT::f32)
581      return SINTTOFP_I32_F32;
582    if (RetVT == MVT::f64)
583      return SINTTOFP_I32_F64;
584    if (RetVT == MVT::f80)
585      return SINTTOFP_I32_F80;
586    if (RetVT == MVT::f128)
587      return SINTTOFP_I32_F128;
588    if (RetVT == MVT::ppcf128)
589      return SINTTOFP_I32_PPCF128;
590  } else if (OpVT == MVT::i64) {
591    if (RetVT == MVT::f32)
592      return SINTTOFP_I64_F32;
593    if (RetVT == MVT::f64)
594      return SINTTOFP_I64_F64;
595    if (RetVT == MVT::f80)
596      return SINTTOFP_I64_F80;
597    if (RetVT == MVT::f128)
598      return SINTTOFP_I64_F128;
599    if (RetVT == MVT::ppcf128)
600      return SINTTOFP_I64_PPCF128;
601  } else if (OpVT == MVT::i128) {
602    if (RetVT == MVT::f32)
603      return SINTTOFP_I128_F32;
604    if (RetVT == MVT::f64)
605      return SINTTOFP_I128_F64;
606    if (RetVT == MVT::f80)
607      return SINTTOFP_I128_F80;
608    if (RetVT == MVT::f128)
609      return SINTTOFP_I128_F128;
610    if (RetVT == MVT::ppcf128)
611      return SINTTOFP_I128_PPCF128;
612  }
613  return UNKNOWN_LIBCALL;
614}
615
616/// getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or
617/// UNKNOWN_LIBCALL if there is none.
618RTLIB::Libcall RTLIB::getUINTTOFP(EVT OpVT, EVT RetVT) {
619  if (OpVT == MVT::i32) {
620    if (RetVT == MVT::f32)
621      return UINTTOFP_I32_F32;
622    if (RetVT == MVT::f64)
623      return UINTTOFP_I32_F64;
624    if (RetVT == MVT::f80)
625      return UINTTOFP_I32_F80;
626    if (RetVT == MVT::f128)
627      return UINTTOFP_I32_F128;
628    if (RetVT == MVT::ppcf128)
629      return UINTTOFP_I32_PPCF128;
630  } else if (OpVT == MVT::i64) {
631    if (RetVT == MVT::f32)
632      return UINTTOFP_I64_F32;
633    if (RetVT == MVT::f64)
634      return UINTTOFP_I64_F64;
635    if (RetVT == MVT::f80)
636      return UINTTOFP_I64_F80;
637    if (RetVT == MVT::f128)
638      return UINTTOFP_I64_F128;
639    if (RetVT == MVT::ppcf128)
640      return UINTTOFP_I64_PPCF128;
641  } else if (OpVT == MVT::i128) {
642    if (RetVT == MVT::f32)
643      return UINTTOFP_I128_F32;
644    if (RetVT == MVT::f64)
645      return UINTTOFP_I128_F64;
646    if (RetVT == MVT::f80)
647      return UINTTOFP_I128_F80;
648    if (RetVT == MVT::f128)
649      return UINTTOFP_I128_F128;
650    if (RetVT == MVT::ppcf128)
651      return UINTTOFP_I128_PPCF128;
652  }
653  return UNKNOWN_LIBCALL;
654}
655
656RTLIB::Libcall RTLIB::getATOMIC(unsigned Opc, MVT VT) {
657#define OP_TO_LIBCALL(Name, Enum)                                              \
658  case Name:                                                                   \
659    switch (VT.SimpleTy) {                                                     \
660    default:                                                                   \
661      return UNKNOWN_LIBCALL;                                                  \
662    case MVT::i8:                                                              \
663      return Enum##_1;                                                         \
664    case MVT::i16:                                                             \
665      return Enum##_2;                                                         \
666    case MVT::i32:                                                             \
667      return Enum##_4;                                                         \
668    case MVT::i64:                                                             \
669      return Enum##_8;                                                         \
670    case MVT::i128:                                                            \
671      return Enum##_16;                                                        \
672    }
673
674  switch (Opc) {
675    OP_TO_LIBCALL(ISD::ATOMIC_SWAP, SYNC_LOCK_TEST_AND_SET)
676    OP_TO_LIBCALL(ISD::ATOMIC_CMP_SWAP, SYNC_VAL_COMPARE_AND_SWAP)
677    OP_TO_LIBCALL(ISD::ATOMIC_LOAD_ADD, SYNC_FETCH_AND_ADD)
678    OP_TO_LIBCALL(ISD::ATOMIC_LOAD_SUB, SYNC_FETCH_AND_SUB)
679    OP_TO_LIBCALL(ISD::ATOMIC_LOAD_AND, SYNC_FETCH_AND_AND)
680    OP_TO_LIBCALL(ISD::ATOMIC_LOAD_OR, SYNC_FETCH_AND_OR)
681    OP_TO_LIBCALL(ISD::ATOMIC_LOAD_XOR, SYNC_FETCH_AND_XOR)
682    OP_TO_LIBCALL(ISD::ATOMIC_LOAD_NAND, SYNC_FETCH_AND_NAND)
683    OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MAX, SYNC_FETCH_AND_MAX)
684    OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMAX, SYNC_FETCH_AND_UMAX)
685    OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MIN, SYNC_FETCH_AND_MIN)
686    OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMIN, SYNC_FETCH_AND_UMIN)
687  }
688
689#undef OP_TO_LIBCALL
690
691  return UNKNOWN_LIBCALL;
692}
693
694/// InitCmpLibcallCCs - Set default comparison libcall CC.
695///
696static void InitCmpLibcallCCs(ISD::CondCode *CCs) {
697  memset(CCs, ISD::SETCC_INVALID, sizeof(ISD::CondCode)*RTLIB::UNKNOWN_LIBCALL);
698  CCs[RTLIB::OEQ_F32] = ISD::SETEQ;
699  CCs[RTLIB::OEQ_F64] = ISD::SETEQ;
700  CCs[RTLIB::OEQ_F128] = ISD::SETEQ;
701  CCs[RTLIB::UNE_F32] = ISD::SETNE;
702  CCs[RTLIB::UNE_F64] = ISD::SETNE;
703  CCs[RTLIB::UNE_F128] = ISD::SETNE;
704  CCs[RTLIB::OGE_F32] = ISD::SETGE;
705  CCs[RTLIB::OGE_F64] = ISD::SETGE;
706  CCs[RTLIB::OGE_F128] = ISD::SETGE;
707  CCs[RTLIB::OLT_F32] = ISD::SETLT;
708  CCs[RTLIB::OLT_F64] = ISD::SETLT;
709  CCs[RTLIB::OLT_F128] = ISD::SETLT;
710  CCs[RTLIB::OLE_F32] = ISD::SETLE;
711  CCs[RTLIB::OLE_F64] = ISD::SETLE;
712  CCs[RTLIB::OLE_F128] = ISD::SETLE;
713  CCs[RTLIB::OGT_F32] = ISD::SETGT;
714  CCs[RTLIB::OGT_F64] = ISD::SETGT;
715  CCs[RTLIB::OGT_F128] = ISD::SETGT;
716  CCs[RTLIB::UO_F32] = ISD::SETNE;
717  CCs[RTLIB::UO_F64] = ISD::SETNE;
718  CCs[RTLIB::UO_F128] = ISD::SETNE;
719  CCs[RTLIB::O_F32] = ISD::SETEQ;
720  CCs[RTLIB::O_F64] = ISD::SETEQ;
721  CCs[RTLIB::O_F128] = ISD::SETEQ;
722}
723
724/// NOTE: The TargetMachine owns TLOF.
725TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm) : TM(tm) {
726  initActions();
727
728  // Perform these initializations only once.
729  MaxStoresPerMemset = MaxStoresPerMemcpy = MaxStoresPerMemmove = 8;
730  MaxStoresPerMemsetOptSize = MaxStoresPerMemcpyOptSize
731    = MaxStoresPerMemmoveOptSize = 4;
732  UseUnderscoreSetJmp = false;
733  UseUnderscoreLongJmp = false;
734  SelectIsExpensive = false;
735  HasMultipleConditionRegisters = false;
736  HasExtractBitsInsn = false;
737  FsqrtIsCheap = false;
738  JumpIsExpensive = JumpIsExpensiveOverride;
739  PredictableSelectIsExpensive = false;
740  MaskAndBranchFoldingIsLegal = false;
741  EnableExtLdPromotion = false;
742  HasFloatingPointExceptions = true;
743  StackPointerRegisterToSaveRestore = 0;
744  BooleanContents = UndefinedBooleanContent;
745  BooleanFloatContents = UndefinedBooleanContent;
746  BooleanVectorContents = UndefinedBooleanContent;
747  SchedPreferenceInfo = Sched::ILP;
748  JumpBufSize = 0;
749  JumpBufAlignment = 0;
750  MinFunctionAlignment = 0;
751  PrefFunctionAlignment = 0;
752  PrefLoopAlignment = 0;
753  GatherAllAliasesMaxDepth = 6;
754  MinStackArgumentAlignment = 1;
755  InsertFencesForAtomic = false;
756  MinimumJumpTableEntries = 4;
757
758  InitLibcallNames(LibcallRoutineNames, TM.getTargetTriple());
759  InitCmpLibcallCCs(CmpLibcallCCs);
760  InitLibcallCallingConvs(LibcallCallingConvs);
761}
762
763void TargetLoweringBase::initActions() {
764  // All operations default to being supported.
765  memset(OpActions, 0, sizeof(OpActions));
766  memset(LoadExtActions, 0, sizeof(LoadExtActions));
767  memset(TruncStoreActions, 0, sizeof(TruncStoreActions));
768  memset(IndexedModeActions, 0, sizeof(IndexedModeActions));
769  memset(CondCodeActions, 0, sizeof(CondCodeActions));
770  memset(RegClassForVT, 0,MVT::LAST_VALUETYPE*sizeof(TargetRegisterClass*));
771  memset(TargetDAGCombineArray, 0, array_lengthof(TargetDAGCombineArray));
772
773  // Set default actions for various operations.
774  for (MVT VT : MVT::all_valuetypes()) {
775    // Default all indexed load / store to expand.
776    for (unsigned IM = (unsigned)ISD::PRE_INC;
777         IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) {
778      setIndexedLoadAction(IM, VT, Expand);
779      setIndexedStoreAction(IM, VT, Expand);
780    }
781
782    // Most backends expect to see the node which just returns the value loaded.
783    setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Expand);
784
785    // These operations default to expand.
786    setOperationAction(ISD::FGETSIGN, VT, Expand);
787    setOperationAction(ISD::CONCAT_VECTORS, VT, Expand);
788    setOperationAction(ISD::FMINNUM, VT, Expand);
789    setOperationAction(ISD::FMAXNUM, VT, Expand);
790    setOperationAction(ISD::FMINNAN, VT, Expand);
791    setOperationAction(ISD::FMAXNAN, VT, Expand);
792    setOperationAction(ISD::FMAD, VT, Expand);
793    setOperationAction(ISD::SMIN, VT, Expand);
794    setOperationAction(ISD::SMAX, VT, Expand);
795    setOperationAction(ISD::UMIN, VT, Expand);
796    setOperationAction(ISD::UMAX, VT, Expand);
797
798    // Overflow operations default to expand
799    setOperationAction(ISD::SADDO, VT, Expand);
800    setOperationAction(ISD::SSUBO, VT, Expand);
801    setOperationAction(ISD::UADDO, VT, Expand);
802    setOperationAction(ISD::USUBO, VT, Expand);
803    setOperationAction(ISD::SMULO, VT, Expand);
804    setOperationAction(ISD::UMULO, VT, Expand);
805
806    setOperationAction(ISD::BITREVERSE, VT, Expand);
807
808    // These library functions default to expand.
809    setOperationAction(ISD::FROUND, VT, Expand);
810
811    // These operations default to expand for vector types.
812    if (VT.isVector()) {
813      setOperationAction(ISD::FCOPYSIGN, VT, Expand);
814      setOperationAction(ISD::ANY_EXTEND_VECTOR_INREG, VT, Expand);
815      setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Expand);
816      setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Expand);
817    }
818
819    // For most targets @llvm.get.dynamic.area.offest just returns 0.
820    setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, VT, Expand);
821  }
822
823  // Most targets ignore the @llvm.prefetch intrinsic.
824  setOperationAction(ISD::PREFETCH, MVT::Other, Expand);
825
826  // Most targets also ignore the @llvm.readcyclecounter intrinsic.
827  setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Expand);
828
829  // ConstantFP nodes default to expand.  Targets can either change this to
830  // Legal, in which case all fp constants are legal, or use isFPImmLegal()
831  // to optimize expansions for certain constants.
832  setOperationAction(ISD::ConstantFP, MVT::f16, Expand);
833  setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
834  setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
835  setOperationAction(ISD::ConstantFP, MVT::f80, Expand);
836  setOperationAction(ISD::ConstantFP, MVT::f128, Expand);
837
838  // These library functions default to expand.
839  for (MVT VT : {MVT::f32, MVT::f64, MVT::f128}) {
840    setOperationAction(ISD::FLOG ,      VT, Expand);
841    setOperationAction(ISD::FLOG2,      VT, Expand);
842    setOperationAction(ISD::FLOG10,     VT, Expand);
843    setOperationAction(ISD::FEXP ,      VT, Expand);
844    setOperationAction(ISD::FEXP2,      VT, Expand);
845    setOperationAction(ISD::FFLOOR,     VT, Expand);
846    setOperationAction(ISD::FMINNUM,    VT, Expand);
847    setOperationAction(ISD::FMAXNUM,    VT, Expand);
848    setOperationAction(ISD::FNEARBYINT, VT, Expand);
849    setOperationAction(ISD::FCEIL,      VT, Expand);
850    setOperationAction(ISD::FRINT,      VT, Expand);
851    setOperationAction(ISD::FTRUNC,     VT, Expand);
852    setOperationAction(ISD::FROUND,     VT, Expand);
853  }
854
855  // Default ISD::TRAP to expand (which turns it into abort).
856  setOperationAction(ISD::TRAP, MVT::Other, Expand);
857
858  // On most systems, DEBUGTRAP and TRAP have no difference. The "Expand"
859  // here is to inform DAG Legalizer to replace DEBUGTRAP with TRAP.
860  //
861  setOperationAction(ISD::DEBUGTRAP, MVT::Other, Expand);
862}
863
864MVT TargetLoweringBase::getScalarShiftAmountTy(const DataLayout &DL,
865                                               EVT) const {
866  return MVT::getIntegerVT(8 * DL.getPointerSize(0));
867}
868
869EVT TargetLoweringBase::getShiftAmountTy(EVT LHSTy,
870                                         const DataLayout &DL) const {
871  assert(LHSTy.isInteger() && "Shift amount is not an integer type!");
872  if (LHSTy.isVector())
873    return LHSTy;
874  return getScalarShiftAmountTy(DL, LHSTy);
875}
876
877/// canOpTrap - Returns true if the operation can trap for the value type.
878/// VT must be a legal type.
879bool TargetLoweringBase::canOpTrap(unsigned Op, EVT VT) const {
880  assert(isTypeLegal(VT));
881  switch (Op) {
882  default:
883    return false;
884  case ISD::FDIV:
885  case ISD::FREM:
886  case ISD::SDIV:
887  case ISD::UDIV:
888  case ISD::SREM:
889  case ISD::UREM:
890    return true;
891  }
892}
893
894void TargetLoweringBase::setJumpIsExpensive(bool isExpensive) {
895  // If the command-line option was specified, ignore this request.
896  if (!JumpIsExpensiveOverride.getNumOccurrences())
897    JumpIsExpensive = isExpensive;
898}
899
900TargetLoweringBase::LegalizeKind
901TargetLoweringBase::getTypeConversion(LLVMContext &Context, EVT VT) const {
902  // If this is a simple type, use the ComputeRegisterProp mechanism.
903  if (VT.isSimple()) {
904    MVT SVT = VT.getSimpleVT();
905    assert((unsigned)SVT.SimpleTy < array_lengthof(TransformToType));
906    MVT NVT = TransformToType[SVT.SimpleTy];
907    LegalizeTypeAction LA = ValueTypeActions.getTypeAction(SVT);
908
909    assert((LA == TypeLegal || LA == TypeSoftenFloat ||
910            ValueTypeActions.getTypeAction(NVT) != TypePromoteInteger) &&
911           "Promote may not follow Expand or Promote");
912
913    if (LA == TypeSplitVector)
914      return LegalizeKind(LA,
915                          EVT::getVectorVT(Context, SVT.getVectorElementType(),
916                                           SVT.getVectorNumElements() / 2));
917    if (LA == TypeScalarizeVector)
918      return LegalizeKind(LA, SVT.getVectorElementType());
919    return LegalizeKind(LA, NVT);
920  }
921
922  // Handle Extended Scalar Types.
923  if (!VT.isVector()) {
924    assert(VT.isInteger() && "Float types must be simple");
925    unsigned BitSize = VT.getSizeInBits();
926    // First promote to a power-of-two size, then expand if necessary.
927    if (BitSize < 8 || !isPowerOf2_32(BitSize)) {
928      EVT NVT = VT.getRoundIntegerType(Context);
929      assert(NVT != VT && "Unable to round integer VT");
930      LegalizeKind NextStep = getTypeConversion(Context, NVT);
931      // Avoid multi-step promotion.
932      if (NextStep.first == TypePromoteInteger)
933        return NextStep;
934      // Return rounded integer type.
935      return LegalizeKind(TypePromoteInteger, NVT);
936    }
937
938    return LegalizeKind(TypeExpandInteger,
939                        EVT::getIntegerVT(Context, VT.getSizeInBits() / 2));
940  }
941
942  // Handle vector types.
943  unsigned NumElts = VT.getVectorNumElements();
944  EVT EltVT = VT.getVectorElementType();
945
946  // Vectors with only one element are always scalarized.
947  if (NumElts == 1)
948    return LegalizeKind(TypeScalarizeVector, EltVT);
949
950  // Try to widen vector elements until the element type is a power of two and
951  // promote it to a legal type later on, for example:
952  // <3 x i8> -> <4 x i8> -> <4 x i32>
953  if (EltVT.isInteger()) {
954    // Vectors with a number of elements that is not a power of two are always
955    // widened, for example <3 x i8> -> <4 x i8>.
956    if (!VT.isPow2VectorType()) {
957      NumElts = (unsigned)NextPowerOf2(NumElts);
958      EVT NVT = EVT::getVectorVT(Context, EltVT, NumElts);
959      return LegalizeKind(TypeWidenVector, NVT);
960    }
961
962    // Examine the element type.
963    LegalizeKind LK = getTypeConversion(Context, EltVT);
964
965    // If type is to be expanded, split the vector.
966    //  <4 x i140> -> <2 x i140>
967    if (LK.first == TypeExpandInteger)
968      return LegalizeKind(TypeSplitVector,
969                          EVT::getVectorVT(Context, EltVT, NumElts / 2));
970
971    // Promote the integer element types until a legal vector type is found
972    // or until the element integer type is too big. If a legal type was not
973    // found, fallback to the usual mechanism of widening/splitting the
974    // vector.
975    EVT OldEltVT = EltVT;
976    while (1) {
977      // Increase the bitwidth of the element to the next pow-of-two
978      // (which is greater than 8 bits).
979      EltVT = EVT::getIntegerVT(Context, 1 + EltVT.getSizeInBits())
980                  .getRoundIntegerType(Context);
981
982      // Stop trying when getting a non-simple element type.
983      // Note that vector elements may be greater than legal vector element
984      // types. Example: X86 XMM registers hold 64bit element on 32bit
985      // systems.
986      if (!EltVT.isSimple())
987        break;
988
989      // Build a new vector type and check if it is legal.
990      MVT NVT = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
991      // Found a legal promoted vector type.
992      if (NVT != MVT() && ValueTypeActions.getTypeAction(NVT) == TypeLegal)
993        return LegalizeKind(TypePromoteInteger,
994                            EVT::getVectorVT(Context, EltVT, NumElts));
995    }
996
997    // Reset the type to the unexpanded type if we did not find a legal vector
998    // type with a promoted vector element type.
999    EltVT = OldEltVT;
1000  }
1001
1002  // Try to widen the vector until a legal type is found.
1003  // If there is no wider legal type, split the vector.
1004  while (1) {
1005    // Round up to the next power of 2.
1006    NumElts = (unsigned)NextPowerOf2(NumElts);
1007
1008    // If there is no simple vector type with this many elements then there
1009    // cannot be a larger legal vector type.  Note that this assumes that
1010    // there are no skipped intermediate vector types in the simple types.
1011    if (!EltVT.isSimple())
1012      break;
1013    MVT LargerVector = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
1014    if (LargerVector == MVT())
1015      break;
1016
1017    // If this type is legal then widen the vector.
1018    if (ValueTypeActions.getTypeAction(LargerVector) == TypeLegal)
1019      return LegalizeKind(TypeWidenVector, LargerVector);
1020  }
1021
1022  // Widen odd vectors to next power of two.
1023  if (!VT.isPow2VectorType()) {
1024    EVT NVT = VT.getPow2VectorType(Context);
1025    return LegalizeKind(TypeWidenVector, NVT);
1026  }
1027
1028  // Vectors with illegal element types are expanded.
1029  EVT NVT = EVT::getVectorVT(Context, EltVT, VT.getVectorNumElements() / 2);
1030  return LegalizeKind(TypeSplitVector, NVT);
1031}
1032
1033static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT,
1034                                          unsigned &NumIntermediates,
1035                                          MVT &RegisterVT,
1036                                          TargetLoweringBase *TLI) {
1037  // Figure out the right, legal destination reg to copy into.
1038  unsigned NumElts = VT.getVectorNumElements();
1039  MVT EltTy = VT.getVectorElementType();
1040
1041  unsigned NumVectorRegs = 1;
1042
1043  // FIXME: We don't support non-power-of-2-sized vectors for now.  Ideally we
1044  // could break down into LHS/RHS like LegalizeDAG does.
1045  if (!isPowerOf2_32(NumElts)) {
1046    NumVectorRegs = NumElts;
1047    NumElts = 1;
1048  }
1049
1050  // Divide the input until we get to a supported size.  This will always
1051  // end with a scalar if the target doesn't support vectors.
1052  while (NumElts > 1 && !TLI->isTypeLegal(MVT::getVectorVT(EltTy, NumElts))) {
1053    NumElts >>= 1;
1054    NumVectorRegs <<= 1;
1055  }
1056
1057  NumIntermediates = NumVectorRegs;
1058
1059  MVT NewVT = MVT::getVectorVT(EltTy, NumElts);
1060  if (!TLI->isTypeLegal(NewVT))
1061    NewVT = EltTy;
1062  IntermediateVT = NewVT;
1063
1064  unsigned NewVTSize = NewVT.getSizeInBits();
1065
1066  // Convert sizes such as i33 to i64.
1067  if (!isPowerOf2_32(NewVTSize))
1068    NewVTSize = NextPowerOf2(NewVTSize);
1069
1070  MVT DestVT = TLI->getRegisterType(NewVT);
1071  RegisterVT = DestVT;
1072  if (EVT(DestVT).bitsLT(NewVT))    // Value is expanded, e.g. i64 -> i16.
1073    return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits());
1074
1075  // Otherwise, promotion or legal types use the same number of registers as
1076  // the vector decimated to the appropriate level.
1077  return NumVectorRegs;
1078}
1079
1080/// isLegalRC - Return true if the value types that can be represented by the
1081/// specified register class are all legal.
1082bool TargetLoweringBase::isLegalRC(const TargetRegisterClass *RC) const {
1083  for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end();
1084       I != E; ++I) {
1085    if (isTypeLegal(*I))
1086      return true;
1087  }
1088  return false;
1089}
1090
1091/// Replace/modify any TargetFrameIndex operands with a targte-dependent
1092/// sequence of memory operands that is recognized by PrologEpilogInserter.
1093MachineBasicBlock*
1094TargetLoweringBase::emitPatchPoint(MachineInstr *MI,
1095                                   MachineBasicBlock *MBB) const {
1096  MachineFunction &MF = *MI->getParent()->getParent();
1097  MachineFrameInfo &MFI = *MF.getFrameInfo();
1098
1099  // We're handling multiple types of operands here:
1100  // PATCHPOINT MetaArgs - live-in, read only, direct
1101  // STATEPOINT Deopt Spill - live-through, read only, indirect
1102  // STATEPOINT Deopt Alloca - live-through, read only, direct
1103  // (We're currently conservative and mark the deopt slots read/write in
1104  // practice.)
1105  // STATEPOINT GC Spill - live-through, read/write, indirect
1106  // STATEPOINT GC Alloca - live-through, read/write, direct
1107  // The live-in vs live-through is handled already (the live through ones are
1108  // all stack slots), but we need to handle the different type of stackmap
1109  // operands and memory effects here.
1110
1111  // MI changes inside this loop as we grow operands.
1112  for(unsigned OperIdx = 0; OperIdx != MI->getNumOperands(); ++OperIdx) {
1113    MachineOperand &MO = MI->getOperand(OperIdx);
1114    if (!MO.isFI())
1115      continue;
1116
1117    // foldMemoryOperand builds a new MI after replacing a single FI operand
1118    // with the canonical set of five x86 addressing-mode operands.
1119    int FI = MO.getIndex();
1120    MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), MI->getDesc());
1121
1122    // Copy operands before the frame-index.
1123    for (unsigned i = 0; i < OperIdx; ++i)
1124      MIB.addOperand(MI->getOperand(i));
1125    // Add frame index operands recognized by stackmaps.cpp
1126    if (MFI.isStatepointSpillSlotObjectIndex(FI)) {
1127      // indirect-mem-ref tag, size, #FI, offset.
1128      // Used for spills inserted by StatepointLowering.  This codepath is not
1129      // used for patchpoints/stackmaps at all, for these spilling is done via
1130      // foldMemoryOperand callback only.
1131      assert(MI->getOpcode() == TargetOpcode::STATEPOINT && "sanity");
1132      MIB.addImm(StackMaps::IndirectMemRefOp);
1133      MIB.addImm(MFI.getObjectSize(FI));
1134      MIB.addOperand(MI->getOperand(OperIdx));
1135      MIB.addImm(0);
1136    } else {
1137      // direct-mem-ref tag, #FI, offset.
1138      // Used by patchpoint, and direct alloca arguments to statepoints
1139      MIB.addImm(StackMaps::DirectMemRefOp);
1140      MIB.addOperand(MI->getOperand(OperIdx));
1141      MIB.addImm(0);
1142    }
1143    // Copy the operands after the frame index.
1144    for (unsigned i = OperIdx + 1; i != MI->getNumOperands(); ++i)
1145      MIB.addOperand(MI->getOperand(i));
1146
1147    // Inherit previous memory operands.
1148    MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
1149    assert(MIB->mayLoad() && "Folded a stackmap use to a non-load!");
1150
1151    // Add a new memory operand for this FI.
1152    assert(MFI.getObjectOffset(FI) != -1);
1153
1154    unsigned Flags = MachineMemOperand::MOLoad;
1155    if (MI->getOpcode() == TargetOpcode::STATEPOINT) {
1156      Flags |= MachineMemOperand::MOStore;
1157      Flags |= MachineMemOperand::MOVolatile;
1158    }
1159    MachineMemOperand *MMO = MF.getMachineMemOperand(
1160        MachinePointerInfo::getFixedStack(MF, FI), Flags,
1161        MF.getDataLayout().getPointerSize(), MFI.getObjectAlignment(FI));
1162    MIB->addMemOperand(MF, MMO);
1163
1164    // Replace the instruction and update the operand index.
1165    MBB->insert(MachineBasicBlock::iterator(MI), MIB);
1166    OperIdx += (MIB->getNumOperands() - MI->getNumOperands()) - 1;
1167    MI->eraseFromParent();
1168    MI = MIB;
1169  }
1170  return MBB;
1171}
1172
1173/// findRepresentativeClass - Return the largest legal super-reg register class
1174/// of the register class for the specified type and its associated "cost".
1175// This function is in TargetLowering because it uses RegClassForVT which would
1176// need to be moved to TargetRegisterInfo and would necessitate moving
1177// isTypeLegal over as well - a massive change that would just require
1178// TargetLowering having a TargetRegisterInfo class member that it would use.
1179std::pair<const TargetRegisterClass *, uint8_t>
1180TargetLoweringBase::findRepresentativeClass(const TargetRegisterInfo *TRI,
1181                                            MVT VT) const {
1182  const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
1183  if (!RC)
1184    return std::make_pair(RC, 0);
1185
1186  // Compute the set of all super-register classes.
1187  BitVector SuperRegRC(TRI->getNumRegClasses());
1188  for (SuperRegClassIterator RCI(RC, TRI); RCI.isValid(); ++RCI)
1189    SuperRegRC.setBitsInMask(RCI.getMask());
1190
1191  // Find the first legal register class with the largest spill size.
1192  const TargetRegisterClass *BestRC = RC;
1193  for (int i = SuperRegRC.find_first(); i >= 0; i = SuperRegRC.find_next(i)) {
1194    const TargetRegisterClass *SuperRC = TRI->getRegClass(i);
1195    // We want the largest possible spill size.
1196    if (SuperRC->getSize() <= BestRC->getSize())
1197      continue;
1198    if (!isLegalRC(SuperRC))
1199      continue;
1200    BestRC = SuperRC;
1201  }
1202  return std::make_pair(BestRC, 1);
1203}
1204
1205/// computeRegisterProperties - Once all of the register classes are added,
1206/// this allows us to compute derived properties we expose.
1207void TargetLoweringBase::computeRegisterProperties(
1208    const TargetRegisterInfo *TRI) {
1209  static_assert(MVT::LAST_VALUETYPE <= MVT::MAX_ALLOWED_VALUETYPE,
1210                "Too many value types for ValueTypeActions to hold!");
1211
1212  // Everything defaults to needing one register.
1213  for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) {
1214    NumRegistersForVT[i] = 1;
1215    RegisterTypeForVT[i] = TransformToType[i] = (MVT::SimpleValueType)i;
1216  }
1217  // ...except isVoid, which doesn't need any registers.
1218  NumRegistersForVT[MVT::isVoid] = 0;
1219
1220  // Find the largest integer register class.
1221  unsigned LargestIntReg = MVT::LAST_INTEGER_VALUETYPE;
1222  for (; RegClassForVT[LargestIntReg] == nullptr; --LargestIntReg)
1223    assert(LargestIntReg != MVT::i1 && "No integer registers defined!");
1224
1225  // Every integer value type larger than this largest register takes twice as
1226  // many registers to represent as the previous ValueType.
1227  for (unsigned ExpandedReg = LargestIntReg + 1;
1228       ExpandedReg <= MVT::LAST_INTEGER_VALUETYPE; ++ExpandedReg) {
1229    NumRegistersForVT[ExpandedReg] = 2*NumRegistersForVT[ExpandedReg-1];
1230    RegisterTypeForVT[ExpandedReg] = (MVT::SimpleValueType)LargestIntReg;
1231    TransformToType[ExpandedReg] = (MVT::SimpleValueType)(ExpandedReg - 1);
1232    ValueTypeActions.setTypeAction((MVT::SimpleValueType)ExpandedReg,
1233                                   TypeExpandInteger);
1234  }
1235
1236  // Inspect all of the ValueType's smaller than the largest integer
1237  // register to see which ones need promotion.
1238  unsigned LegalIntReg = LargestIntReg;
1239  for (unsigned IntReg = LargestIntReg - 1;
1240       IntReg >= (unsigned)MVT::i1; --IntReg) {
1241    MVT IVT = (MVT::SimpleValueType)IntReg;
1242    if (isTypeLegal(IVT)) {
1243      LegalIntReg = IntReg;
1244    } else {
1245      RegisterTypeForVT[IntReg] = TransformToType[IntReg] =
1246        (const MVT::SimpleValueType)LegalIntReg;
1247      ValueTypeActions.setTypeAction(IVT, TypePromoteInteger);
1248    }
1249  }
1250
1251  // ppcf128 type is really two f64's.
1252  if (!isTypeLegal(MVT::ppcf128)) {
1253    NumRegistersForVT[MVT::ppcf128] = 2*NumRegistersForVT[MVT::f64];
1254    RegisterTypeForVT[MVT::ppcf128] = MVT::f64;
1255    TransformToType[MVT::ppcf128] = MVT::f64;
1256    ValueTypeActions.setTypeAction(MVT::ppcf128, TypeExpandFloat);
1257  }
1258
1259  // Decide how to handle f128. If the target does not have native f128 support,
1260  // expand it to i128 and we will be generating soft float library calls.
1261  if (!isTypeLegal(MVT::f128)) {
1262    NumRegistersForVT[MVT::f128] = NumRegistersForVT[MVT::i128];
1263    RegisterTypeForVT[MVT::f128] = RegisterTypeForVT[MVT::i128];
1264    TransformToType[MVT::f128] = MVT::i128;
1265    ValueTypeActions.setTypeAction(MVT::f128, TypeSoftenFloat);
1266  }
1267
1268  // Decide how to handle f64. If the target does not have native f64 support,
1269  // expand it to i64 and we will be generating soft float library calls.
1270  if (!isTypeLegal(MVT::f64)) {
1271    NumRegistersForVT[MVT::f64] = NumRegistersForVT[MVT::i64];
1272    RegisterTypeForVT[MVT::f64] = RegisterTypeForVT[MVT::i64];
1273    TransformToType[MVT::f64] = MVT::i64;
1274    ValueTypeActions.setTypeAction(MVT::f64, TypeSoftenFloat);
1275  }
1276
1277  // Decide how to handle f32. If the target does not have native f32 support,
1278  // expand it to i32 and we will be generating soft float library calls.
1279  if (!isTypeLegal(MVT::f32)) {
1280    NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::i32];
1281    RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::i32];
1282    TransformToType[MVT::f32] = MVT::i32;
1283    ValueTypeActions.setTypeAction(MVT::f32, TypeSoftenFloat);
1284  }
1285
1286  // Decide how to handle f16. If the target does not have native f16 support,
1287  // promote it to f32, because there are no f16 library calls (except for
1288  // conversions).
1289  if (!isTypeLegal(MVT::f16)) {
1290    NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::f32];
1291    RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::f32];
1292    TransformToType[MVT::f16] = MVT::f32;
1293    ValueTypeActions.setTypeAction(MVT::f16, TypePromoteFloat);
1294  }
1295
1296  // Loop over all of the vector value types to see which need transformations.
1297  for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE;
1298       i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
1299    MVT VT = (MVT::SimpleValueType) i;
1300    if (isTypeLegal(VT))
1301      continue;
1302
1303    MVT EltVT = VT.getVectorElementType();
1304    unsigned NElts = VT.getVectorNumElements();
1305    bool IsLegalWiderType = false;
1306    LegalizeTypeAction PreferredAction = getPreferredVectorAction(VT);
1307    switch (PreferredAction) {
1308    case TypePromoteInteger: {
1309      // Try to promote the elements of integer vectors. If no legal
1310      // promotion was found, fall through to the widen-vector method.
1311      for (unsigned nVT = i + 1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
1312        MVT SVT = (MVT::SimpleValueType) nVT;
1313        // Promote vectors of integers to vectors with the same number
1314        // of elements, with a wider element type.
1315        if (SVT.getVectorElementType().getSizeInBits() > EltVT.getSizeInBits()
1316            && SVT.getVectorNumElements() == NElts && isTypeLegal(SVT)
1317            && SVT.getScalarType().isInteger()) {
1318          TransformToType[i] = SVT;
1319          RegisterTypeForVT[i] = SVT;
1320          NumRegistersForVT[i] = 1;
1321          ValueTypeActions.setTypeAction(VT, TypePromoteInteger);
1322          IsLegalWiderType = true;
1323          break;
1324        }
1325      }
1326      if (IsLegalWiderType)
1327        break;
1328    }
1329    case TypeWidenVector: {
1330      // Try to widen the vector.
1331      for (unsigned nVT = i + 1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
1332        MVT SVT = (MVT::SimpleValueType) nVT;
1333        if (SVT.getVectorElementType() == EltVT
1334            && SVT.getVectorNumElements() > NElts && isTypeLegal(SVT)) {
1335          TransformToType[i] = SVT;
1336          RegisterTypeForVT[i] = SVT;
1337          NumRegistersForVT[i] = 1;
1338          ValueTypeActions.setTypeAction(VT, TypeWidenVector);
1339          IsLegalWiderType = true;
1340          break;
1341        }
1342      }
1343      if (IsLegalWiderType)
1344        break;
1345    }
1346    case TypeSplitVector:
1347    case TypeScalarizeVector: {
1348      MVT IntermediateVT;
1349      MVT RegisterVT;
1350      unsigned NumIntermediates;
1351      NumRegistersForVT[i] = getVectorTypeBreakdownMVT(VT, IntermediateVT,
1352          NumIntermediates, RegisterVT, this);
1353      RegisterTypeForVT[i] = RegisterVT;
1354
1355      MVT NVT = VT.getPow2VectorType();
1356      if (NVT == VT) {
1357        // Type is already a power of 2.  The default action is to split.
1358        TransformToType[i] = MVT::Other;
1359        if (PreferredAction == TypeScalarizeVector)
1360          ValueTypeActions.setTypeAction(VT, TypeScalarizeVector);
1361        else if (PreferredAction == TypeSplitVector)
1362          ValueTypeActions.setTypeAction(VT, TypeSplitVector);
1363        else
1364          // Set type action according to the number of elements.
1365          ValueTypeActions.setTypeAction(VT, NElts == 1 ? TypeScalarizeVector
1366                                                        : TypeSplitVector);
1367      } else {
1368        TransformToType[i] = NVT;
1369        ValueTypeActions.setTypeAction(VT, TypeWidenVector);
1370      }
1371      break;
1372    }
1373    default:
1374      llvm_unreachable("Unknown vector legalization action!");
1375    }
1376  }
1377
1378  // Determine the 'representative' register class for each value type.
1379  // An representative register class is the largest (meaning one which is
1380  // not a sub-register class / subreg register class) legal register class for
1381  // a group of value types. For example, on i386, i8, i16, and i32
1382  // representative would be GR32; while on x86_64 it's GR64.
1383  for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) {
1384    const TargetRegisterClass* RRC;
1385    uint8_t Cost;
1386    std::tie(RRC, Cost) = findRepresentativeClass(TRI, (MVT::SimpleValueType)i);
1387    RepRegClassForVT[i] = RRC;
1388    RepRegClassCostForVT[i] = Cost;
1389  }
1390}
1391
1392EVT TargetLoweringBase::getSetCCResultType(const DataLayout &DL, LLVMContext &,
1393                                           EVT VT) const {
1394  assert(!VT.isVector() && "No default SetCC type for vectors!");
1395  return getPointerTy(DL).SimpleTy;
1396}
1397
1398MVT::SimpleValueType TargetLoweringBase::getCmpLibcallReturnType() const {
1399  return MVT::i32; // return the default value
1400}
1401
1402/// getVectorTypeBreakdown - Vector types are broken down into some number of
1403/// legal first class types.  For example, MVT::v8f32 maps to 2 MVT::v4f32
1404/// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack.
1405/// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and X86.
1406///
1407/// This method returns the number of registers needed, and the VT for each
1408/// register.  It also returns the VT and quantity of the intermediate values
1409/// before they are promoted/expanded.
1410///
1411unsigned TargetLoweringBase::getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
1412                                                EVT &IntermediateVT,
1413                                                unsigned &NumIntermediates,
1414                                                MVT &RegisterVT) const {
1415  unsigned NumElts = VT.getVectorNumElements();
1416
1417  // If there is a wider vector type with the same element type as this one,
1418  // or a promoted vector type that has the same number of elements which
1419  // are wider, then we should convert to that legal vector type.
1420  // This handles things like <2 x float> -> <4 x float> and
1421  // <4 x i1> -> <4 x i32>.
1422  LegalizeTypeAction TA = getTypeAction(Context, VT);
1423  if (NumElts != 1 && (TA == TypeWidenVector || TA == TypePromoteInteger)) {
1424    EVT RegisterEVT = getTypeToTransformTo(Context, VT);
1425    if (isTypeLegal(RegisterEVT)) {
1426      IntermediateVT = RegisterEVT;
1427      RegisterVT = RegisterEVT.getSimpleVT();
1428      NumIntermediates = 1;
1429      return 1;
1430    }
1431  }
1432
1433  // Figure out the right, legal destination reg to copy into.
1434  EVT EltTy = VT.getVectorElementType();
1435
1436  unsigned NumVectorRegs = 1;
1437
1438  // FIXME: We don't support non-power-of-2-sized vectors for now.  Ideally we
1439  // could break down into LHS/RHS like LegalizeDAG does.
1440  if (!isPowerOf2_32(NumElts)) {
1441    NumVectorRegs = NumElts;
1442    NumElts = 1;
1443  }
1444
1445  // Divide the input until we get to a supported size.  This will always
1446  // end with a scalar if the target doesn't support vectors.
1447  while (NumElts > 1 && !isTypeLegal(
1448                                   EVT::getVectorVT(Context, EltTy, NumElts))) {
1449    NumElts >>= 1;
1450    NumVectorRegs <<= 1;
1451  }
1452
1453  NumIntermediates = NumVectorRegs;
1454
1455  EVT NewVT = EVT::getVectorVT(Context, EltTy, NumElts);
1456  if (!isTypeLegal(NewVT))
1457    NewVT = EltTy;
1458  IntermediateVT = NewVT;
1459
1460  MVT DestVT = getRegisterType(Context, NewVT);
1461  RegisterVT = DestVT;
1462  unsigned NewVTSize = NewVT.getSizeInBits();
1463
1464  // Convert sizes such as i33 to i64.
1465  if (!isPowerOf2_32(NewVTSize))
1466    NewVTSize = NextPowerOf2(NewVTSize);
1467
1468  if (EVT(DestVT).bitsLT(NewVT))   // Value is expanded, e.g. i64 -> i16.
1469    return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits());
1470
1471  // Otherwise, promotion or legal types use the same number of registers as
1472  // the vector decimated to the appropriate level.
1473  return NumVectorRegs;
1474}
1475
1476/// Get the EVTs and ArgFlags collections that represent the legalized return
1477/// type of the given function.  This does not require a DAG or a return value,
1478/// and is suitable for use before any DAGs for the function are constructed.
1479/// TODO: Move this out of TargetLowering.cpp.
1480void llvm::GetReturnInfo(Type *ReturnType, AttributeSet attr,
1481                         SmallVectorImpl<ISD::OutputArg> &Outs,
1482                         const TargetLowering &TLI, const DataLayout &DL) {
1483  SmallVector<EVT, 4> ValueVTs;
1484  ComputeValueVTs(TLI, DL, ReturnType, ValueVTs);
1485  unsigned NumValues = ValueVTs.size();
1486  if (NumValues == 0) return;
1487
1488  for (unsigned j = 0, f = NumValues; j != f; ++j) {
1489    EVT VT = ValueVTs[j];
1490    ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
1491
1492    if (attr.hasAttribute(AttributeSet::ReturnIndex, Attribute::SExt))
1493      ExtendKind = ISD::SIGN_EXTEND;
1494    else if (attr.hasAttribute(AttributeSet::ReturnIndex, Attribute::ZExt))
1495      ExtendKind = ISD::ZERO_EXTEND;
1496
1497    // FIXME: C calling convention requires the return type to be promoted to
1498    // at least 32-bit. But this is not necessary for non-C calling
1499    // conventions. The frontend should mark functions whose return values
1500    // require promoting with signext or zeroext attributes.
1501    if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) {
1502      MVT MinVT = TLI.getRegisterType(ReturnType->getContext(), MVT::i32);
1503      if (VT.bitsLT(MinVT))
1504        VT = MinVT;
1505    }
1506
1507    unsigned NumParts = TLI.getNumRegisters(ReturnType->getContext(), VT);
1508    MVT PartVT = TLI.getRegisterType(ReturnType->getContext(), VT);
1509
1510    // 'inreg' on function refers to return value
1511    ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
1512    if (attr.hasAttribute(AttributeSet::ReturnIndex, Attribute::InReg))
1513      Flags.setInReg();
1514
1515    // Propagate extension type if any
1516    if (attr.hasAttribute(AttributeSet::ReturnIndex, Attribute::SExt))
1517      Flags.setSExt();
1518    else if (attr.hasAttribute(AttributeSet::ReturnIndex, Attribute::ZExt))
1519      Flags.setZExt();
1520
1521    for (unsigned i = 0; i < NumParts; ++i)
1522      Outs.push_back(ISD::OutputArg(Flags, PartVT, VT, /*isFixed=*/true, 0, 0));
1523  }
1524}
1525
1526/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
1527/// function arguments in the caller parameter area.  This is the actual
1528/// alignment, not its logarithm.
1529unsigned TargetLoweringBase::getByValTypeAlignment(Type *Ty,
1530                                                   const DataLayout &DL) const {
1531  return DL.getABITypeAlignment(Ty);
1532}
1533
1534bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context,
1535                                            const DataLayout &DL, EVT VT,
1536                                            unsigned AddrSpace,
1537                                            unsigned Alignment,
1538                                            bool *Fast) const {
1539  // Check if the specified alignment is sufficient based on the data layout.
1540  // TODO: While using the data layout works in practice, a better solution
1541  // would be to implement this check directly (make this a virtual function).
1542  // For example, the ABI alignment may change based on software platform while
1543  // this function should only be affected by hardware implementation.
1544  Type *Ty = VT.getTypeForEVT(Context);
1545  if (Alignment >= DL.getABITypeAlignment(Ty)) {
1546    // Assume that an access that meets the ABI-specified alignment is fast.
1547    if (Fast != nullptr)
1548      *Fast = true;
1549    return true;
1550  }
1551
1552  // This is a misaligned access.
1553  return allowsMisalignedMemoryAccesses(VT, AddrSpace, Alignment, Fast);
1554}
1555
1556
1557//===----------------------------------------------------------------------===//
1558//  TargetTransformInfo Helpers
1559//===----------------------------------------------------------------------===//
1560
1561int TargetLoweringBase::InstructionOpcodeToISD(unsigned Opcode) const {
1562  enum InstructionOpcodes {
1563#define HANDLE_INST(NUM, OPCODE, CLASS) OPCODE = NUM,
1564#define LAST_OTHER_INST(NUM) InstructionOpcodesCount = NUM
1565#include "llvm/IR/Instruction.def"
1566  };
1567  switch (static_cast<InstructionOpcodes>(Opcode)) {
1568  case Ret:            return 0;
1569  case Br:             return 0;
1570  case Switch:         return 0;
1571  case IndirectBr:     return 0;
1572  case Invoke:         return 0;
1573  case Resume:         return 0;
1574  case Unreachable:    return 0;
1575  case CleanupRet:     return 0;
1576  case CatchRet:       return 0;
1577  case CatchPad:       return 0;
1578  case CatchSwitch:    return 0;
1579  case CleanupPad:     return 0;
1580  case Add:            return ISD::ADD;
1581  case FAdd:           return ISD::FADD;
1582  case Sub:            return ISD::SUB;
1583  case FSub:           return ISD::FSUB;
1584  case Mul:            return ISD::MUL;
1585  case FMul:           return ISD::FMUL;
1586  case UDiv:           return ISD::UDIV;
1587  case SDiv:           return ISD::SDIV;
1588  case FDiv:           return ISD::FDIV;
1589  case URem:           return ISD::UREM;
1590  case SRem:           return ISD::SREM;
1591  case FRem:           return ISD::FREM;
1592  case Shl:            return ISD::SHL;
1593  case LShr:           return ISD::SRL;
1594  case AShr:           return ISD::SRA;
1595  case And:            return ISD::AND;
1596  case Or:             return ISD::OR;
1597  case Xor:            return ISD::XOR;
1598  case Alloca:         return 0;
1599  case Load:           return ISD::LOAD;
1600  case Store:          return ISD::STORE;
1601  case GetElementPtr:  return 0;
1602  case Fence:          return 0;
1603  case AtomicCmpXchg:  return 0;
1604  case AtomicRMW:      return 0;
1605  case Trunc:          return ISD::TRUNCATE;
1606  case ZExt:           return ISD::ZERO_EXTEND;
1607  case SExt:           return ISD::SIGN_EXTEND;
1608  case FPToUI:         return ISD::FP_TO_UINT;
1609  case FPToSI:         return ISD::FP_TO_SINT;
1610  case UIToFP:         return ISD::UINT_TO_FP;
1611  case SIToFP:         return ISD::SINT_TO_FP;
1612  case FPTrunc:        return ISD::FP_ROUND;
1613  case FPExt:          return ISD::FP_EXTEND;
1614  case PtrToInt:       return ISD::BITCAST;
1615  case IntToPtr:       return ISD::BITCAST;
1616  case BitCast:        return ISD::BITCAST;
1617  case AddrSpaceCast:  return ISD::ADDRSPACECAST;
1618  case ICmp:           return ISD::SETCC;
1619  case FCmp:           return ISD::SETCC;
1620  case PHI:            return 0;
1621  case Call:           return 0;
1622  case Select:         return ISD::SELECT;
1623  case UserOp1:        return 0;
1624  case UserOp2:        return 0;
1625  case VAArg:          return 0;
1626  case ExtractElement: return ISD::EXTRACT_VECTOR_ELT;
1627  case InsertElement:  return ISD::INSERT_VECTOR_ELT;
1628  case ShuffleVector:  return ISD::VECTOR_SHUFFLE;
1629  case ExtractValue:   return ISD::MERGE_VALUES;
1630  case InsertValue:    return ISD::MERGE_VALUES;
1631  case LandingPad:     return 0;
1632  }
1633
1634  llvm_unreachable("Unknown instruction type encountered!");
1635}
1636
1637std::pair<int, MVT>
1638TargetLoweringBase::getTypeLegalizationCost(const DataLayout &DL,
1639                                            Type *Ty) const {
1640  LLVMContext &C = Ty->getContext();
1641  EVT MTy = getValueType(DL, Ty);
1642
1643  int Cost = 1;
1644  // We keep legalizing the type until we find a legal kind. We assume that
1645  // the only operation that costs anything is the split. After splitting
1646  // we need to handle two types.
1647  while (true) {
1648    LegalizeKind LK = getTypeConversion(C, MTy);
1649
1650    if (LK.first == TypeLegal)
1651      return std::make_pair(Cost, MTy.getSimpleVT());
1652
1653    if (LK.first == TypeSplitVector || LK.first == TypeExpandInteger)
1654      Cost *= 2;
1655
1656    // Do not loop with f128 type.
1657    if (MTy == LK.second)
1658      return std::make_pair(Cost, MTy.getSimpleVT());
1659
1660    // Keep legalizing the type.
1661    MTy = LK.second;
1662  }
1663}
1664
1665Value *TargetLoweringBase::getSafeStackPointerLocation(IRBuilder<> &IRB) const {
1666  if (!TM.getTargetTriple().isAndroid())
1667    return nullptr;
1668
1669  // Android provides a libc function to retrieve the address of the current
1670  // thread's unsafe stack pointer.
1671  Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1672  Type *StackPtrTy = Type::getInt8PtrTy(M->getContext());
1673  Value *Fn = M->getOrInsertFunction("__safestack_pointer_address",
1674                                     StackPtrTy->getPointerTo(0), nullptr);
1675  return IRB.CreateCall(Fn);
1676}
1677
1678//===----------------------------------------------------------------------===//
1679//  Loop Strength Reduction hooks
1680//===----------------------------------------------------------------------===//
1681
1682/// isLegalAddressingMode - Return true if the addressing mode represented
1683/// by AM is legal for this target, for a load/store of the specified type.
1684bool TargetLoweringBase::isLegalAddressingMode(const DataLayout &DL,
1685                                               const AddrMode &AM, Type *Ty,
1686                                               unsigned AS) const {
1687  // The default implementation of this implements a conservative RISCy, r+r and
1688  // r+i addr mode.
1689
1690  // Allows a sign-extended 16-bit immediate field.
1691  if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
1692    return false;
1693
1694  // No global is ever allowed as a base.
1695  if (AM.BaseGV)
1696    return false;
1697
1698  // Only support r+r,
1699  switch (AM.Scale) {
1700  case 0:  // "r+i" or just "i", depending on HasBaseReg.
1701    break;
1702  case 1:
1703    if (AM.HasBaseReg && AM.BaseOffs)  // "r+r+i" is not allowed.
1704      return false;
1705    // Otherwise we have r+r or r+i.
1706    break;
1707  case 2:
1708    if (AM.HasBaseReg || AM.BaseOffs)  // 2*r+r  or  2*r+i is not allowed.
1709      return false;
1710    // Allow 2*r as r+r.
1711    break;
1712  default: // Don't allow n * r
1713    return false;
1714  }
1715
1716  return true;
1717}
1718