TargetLowering.cpp revision 199481
1//===-- TargetLowering.cpp - Implement the TargetLowering class -----------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This implements the TargetLowering class. 11// 12//===----------------------------------------------------------------------===// 13 14#include "llvm/Target/TargetLowering.h" 15#include "llvm/MC/MCAsmInfo.h" 16#include "llvm/Target/TargetData.h" 17#include "llvm/Target/TargetLoweringObjectFile.h" 18#include "llvm/Target/TargetMachine.h" 19#include "llvm/Target/TargetRegisterInfo.h" 20#include "llvm/Target/TargetSubtarget.h" 21#include "llvm/GlobalVariable.h" 22#include "llvm/DerivedTypes.h" 23#include "llvm/CodeGen/MachineFrameInfo.h" 24#include "llvm/CodeGen/SelectionDAG.h" 25#include "llvm/ADT/STLExtras.h" 26#include "llvm/Support/ErrorHandling.h" 27#include "llvm/Support/MathExtras.h" 28using namespace llvm; 29 30namespace llvm { 31TLSModel::Model getTLSModel(const GlobalValue *GV, Reloc::Model reloc) { 32 bool isLocal = GV->hasLocalLinkage(); 33 bool isDeclaration = GV->isDeclaration(); 34 // FIXME: what should we do for protected and internal visibility? 35 // For variables, is internal different from hidden? 36 bool isHidden = GV->hasHiddenVisibility(); 37 38 if (reloc == Reloc::PIC_) { 39 if (isLocal || isHidden) 40 return TLSModel::LocalDynamic; 41 else 42 return TLSModel::GeneralDynamic; 43 } else { 44 if (!isDeclaration || isHidden) 45 return TLSModel::LocalExec; 46 else 47 return TLSModel::InitialExec; 48 } 49} 50} 51 52/// InitLibcallNames - Set default libcall names. 53/// 54static void InitLibcallNames(const char **Names) { 55 Names[RTLIB::SHL_I16] = "__ashlhi3"; 56 Names[RTLIB::SHL_I32] = "__ashlsi3"; 57 Names[RTLIB::SHL_I64] = "__ashldi3"; 58 Names[RTLIB::SHL_I128] = "__ashlti3"; 59 Names[RTLIB::SRL_I16] = "__lshrhi3"; 60 Names[RTLIB::SRL_I32] = "__lshrsi3"; 61 Names[RTLIB::SRL_I64] = "__lshrdi3"; 62 Names[RTLIB::SRL_I128] = "__lshrti3"; 63 Names[RTLIB::SRA_I16] = "__ashrhi3"; 64 Names[RTLIB::SRA_I32] = "__ashrsi3"; 65 Names[RTLIB::SRA_I64] = "__ashrdi3"; 66 Names[RTLIB::SRA_I128] = "__ashrti3"; 67 Names[RTLIB::MUL_I8] = "__mulqi3"; 68 Names[RTLIB::MUL_I16] = "__mulhi3"; 69 Names[RTLIB::MUL_I32] = "__mulsi3"; 70 Names[RTLIB::MUL_I64] = "__muldi3"; 71 Names[RTLIB::MUL_I128] = "__multi3"; 72 Names[RTLIB::SDIV_I8] = "__divqi3"; 73 Names[RTLIB::SDIV_I16] = "__divhi3"; 74 Names[RTLIB::SDIV_I32] = "__divsi3"; 75 Names[RTLIB::SDIV_I64] = "__divdi3"; 76 Names[RTLIB::SDIV_I128] = "__divti3"; 77 Names[RTLIB::UDIV_I8] = "__udivqi3"; 78 Names[RTLIB::UDIV_I16] = "__udivhi3"; 79 Names[RTLIB::UDIV_I32] = "__udivsi3"; 80 Names[RTLIB::UDIV_I64] = "__udivdi3"; 81 Names[RTLIB::UDIV_I128] = "__udivti3"; 82 Names[RTLIB::SREM_I8] = "__modqi3"; 83 Names[RTLIB::SREM_I16] = "__modhi3"; 84 Names[RTLIB::SREM_I32] = "__modsi3"; 85 Names[RTLIB::SREM_I64] = "__moddi3"; 86 Names[RTLIB::SREM_I128] = "__modti3"; 87 Names[RTLIB::UREM_I8] = "__umodqi3"; 88 Names[RTLIB::UREM_I16] = "__umodhi3"; 89 Names[RTLIB::UREM_I32] = "__umodsi3"; 90 Names[RTLIB::UREM_I64] = "__umoddi3"; 91 Names[RTLIB::UREM_I128] = "__umodti3"; 92 Names[RTLIB::NEG_I32] = "__negsi2"; 93 Names[RTLIB::NEG_I64] = "__negdi2"; 94 Names[RTLIB::ADD_F32] = "__addsf3"; 95 Names[RTLIB::ADD_F64] = "__adddf3"; 96 Names[RTLIB::ADD_F80] = "__addxf3"; 97 Names[RTLIB::ADD_PPCF128] = "__gcc_qadd"; 98 Names[RTLIB::SUB_F32] = "__subsf3"; 99 Names[RTLIB::SUB_F64] = "__subdf3"; 100 Names[RTLIB::SUB_F80] = "__subxf3"; 101 Names[RTLIB::SUB_PPCF128] = "__gcc_qsub"; 102 Names[RTLIB::MUL_F32] = "__mulsf3"; 103 Names[RTLIB::MUL_F64] = "__muldf3"; 104 Names[RTLIB::MUL_F80] = "__mulxf3"; 105 Names[RTLIB::MUL_PPCF128] = "__gcc_qmul"; 106 Names[RTLIB::DIV_F32] = "__divsf3"; 107 Names[RTLIB::DIV_F64] = "__divdf3"; 108 Names[RTLIB::DIV_F80] = "__divxf3"; 109 Names[RTLIB::DIV_PPCF128] = "__gcc_qdiv"; 110 Names[RTLIB::REM_F32] = "fmodf"; 111 Names[RTLIB::REM_F64] = "fmod"; 112 Names[RTLIB::REM_F80] = "fmodl"; 113 Names[RTLIB::REM_PPCF128] = "fmodl"; 114 Names[RTLIB::POWI_F32] = "__powisf2"; 115 Names[RTLIB::POWI_F64] = "__powidf2"; 116 Names[RTLIB::POWI_F80] = "__powixf2"; 117 Names[RTLIB::POWI_PPCF128] = "__powitf2"; 118 Names[RTLIB::SQRT_F32] = "sqrtf"; 119 Names[RTLIB::SQRT_F64] = "sqrt"; 120 Names[RTLIB::SQRT_F80] = "sqrtl"; 121 Names[RTLIB::SQRT_PPCF128] = "sqrtl"; 122 Names[RTLIB::LOG_F32] = "logf"; 123 Names[RTLIB::LOG_F64] = "log"; 124 Names[RTLIB::LOG_F80] = "logl"; 125 Names[RTLIB::LOG_PPCF128] = "logl"; 126 Names[RTLIB::LOG2_F32] = "log2f"; 127 Names[RTLIB::LOG2_F64] = "log2"; 128 Names[RTLIB::LOG2_F80] = "log2l"; 129 Names[RTLIB::LOG2_PPCF128] = "log2l"; 130 Names[RTLIB::LOG10_F32] = "log10f"; 131 Names[RTLIB::LOG10_F64] = "log10"; 132 Names[RTLIB::LOG10_F80] = "log10l"; 133 Names[RTLIB::LOG10_PPCF128] = "log10l"; 134 Names[RTLIB::EXP_F32] = "expf"; 135 Names[RTLIB::EXP_F64] = "exp"; 136 Names[RTLIB::EXP_F80] = "expl"; 137 Names[RTLIB::EXP_PPCF128] = "expl"; 138 Names[RTLIB::EXP2_F32] = "exp2f"; 139 Names[RTLIB::EXP2_F64] = "exp2"; 140 Names[RTLIB::EXP2_F80] = "exp2l"; 141 Names[RTLIB::EXP2_PPCF128] = "exp2l"; 142 Names[RTLIB::SIN_F32] = "sinf"; 143 Names[RTLIB::SIN_F64] = "sin"; 144 Names[RTLIB::SIN_F80] = "sinl"; 145 Names[RTLIB::SIN_PPCF128] = "sinl"; 146 Names[RTLIB::COS_F32] = "cosf"; 147 Names[RTLIB::COS_F64] = "cos"; 148 Names[RTLIB::COS_F80] = "cosl"; 149 Names[RTLIB::COS_PPCF128] = "cosl"; 150 Names[RTLIB::POW_F32] = "powf"; 151 Names[RTLIB::POW_F64] = "pow"; 152 Names[RTLIB::POW_F80] = "powl"; 153 Names[RTLIB::POW_PPCF128] = "powl"; 154 Names[RTLIB::CEIL_F32] = "ceilf"; 155 Names[RTLIB::CEIL_F64] = "ceil"; 156 Names[RTLIB::CEIL_F80] = "ceill"; 157 Names[RTLIB::CEIL_PPCF128] = "ceill"; 158 Names[RTLIB::TRUNC_F32] = "truncf"; 159 Names[RTLIB::TRUNC_F64] = "trunc"; 160 Names[RTLIB::TRUNC_F80] = "truncl"; 161 Names[RTLIB::TRUNC_PPCF128] = "truncl"; 162 Names[RTLIB::RINT_F32] = "rintf"; 163 Names[RTLIB::RINT_F64] = "rint"; 164 Names[RTLIB::RINT_F80] = "rintl"; 165 Names[RTLIB::RINT_PPCF128] = "rintl"; 166 Names[RTLIB::NEARBYINT_F32] = "nearbyintf"; 167 Names[RTLIB::NEARBYINT_F64] = "nearbyint"; 168 Names[RTLIB::NEARBYINT_F80] = "nearbyintl"; 169 Names[RTLIB::NEARBYINT_PPCF128] = "nearbyintl"; 170 Names[RTLIB::FLOOR_F32] = "floorf"; 171 Names[RTLIB::FLOOR_F64] = "floor"; 172 Names[RTLIB::FLOOR_F80] = "floorl"; 173 Names[RTLIB::FLOOR_PPCF128] = "floorl"; 174 Names[RTLIB::FPEXT_F32_F64] = "__extendsfdf2"; 175 Names[RTLIB::FPROUND_F64_F32] = "__truncdfsf2"; 176 Names[RTLIB::FPROUND_F80_F32] = "__truncxfsf2"; 177 Names[RTLIB::FPROUND_PPCF128_F32] = "__trunctfsf2"; 178 Names[RTLIB::FPROUND_F80_F64] = "__truncxfdf2"; 179 Names[RTLIB::FPROUND_PPCF128_F64] = "__trunctfdf2"; 180 Names[RTLIB::FPTOSINT_F32_I8] = "__fixsfi8"; 181 Names[RTLIB::FPTOSINT_F32_I16] = "__fixsfi16"; 182 Names[RTLIB::FPTOSINT_F32_I32] = "__fixsfsi"; 183 Names[RTLIB::FPTOSINT_F32_I64] = "__fixsfdi"; 184 Names[RTLIB::FPTOSINT_F32_I128] = "__fixsfti"; 185 Names[RTLIB::FPTOSINT_F64_I32] = "__fixdfsi"; 186 Names[RTLIB::FPTOSINT_F64_I64] = "__fixdfdi"; 187 Names[RTLIB::FPTOSINT_F64_I128] = "__fixdfti"; 188 Names[RTLIB::FPTOSINT_F80_I32] = "__fixxfsi"; 189 Names[RTLIB::FPTOSINT_F80_I64] = "__fixxfdi"; 190 Names[RTLIB::FPTOSINT_F80_I128] = "__fixxfti"; 191 Names[RTLIB::FPTOSINT_PPCF128_I32] = "__fixtfsi"; 192 Names[RTLIB::FPTOSINT_PPCF128_I64] = "__fixtfdi"; 193 Names[RTLIB::FPTOSINT_PPCF128_I128] = "__fixtfti"; 194 Names[RTLIB::FPTOUINT_F32_I8] = "__fixunssfi8"; 195 Names[RTLIB::FPTOUINT_F32_I16] = "__fixunssfi16"; 196 Names[RTLIB::FPTOUINT_F32_I32] = "__fixunssfsi"; 197 Names[RTLIB::FPTOUINT_F32_I64] = "__fixunssfdi"; 198 Names[RTLIB::FPTOUINT_F32_I128] = "__fixunssfti"; 199 Names[RTLIB::FPTOUINT_F64_I32] = "__fixunsdfsi"; 200 Names[RTLIB::FPTOUINT_F64_I64] = "__fixunsdfdi"; 201 Names[RTLIB::FPTOUINT_F64_I128] = "__fixunsdfti"; 202 Names[RTLIB::FPTOUINT_F80_I32] = "__fixunsxfsi"; 203 Names[RTLIB::FPTOUINT_F80_I64] = "__fixunsxfdi"; 204 Names[RTLIB::FPTOUINT_F80_I128] = "__fixunsxfti"; 205 Names[RTLIB::FPTOUINT_PPCF128_I32] = "__fixunstfsi"; 206 Names[RTLIB::FPTOUINT_PPCF128_I64] = "__fixunstfdi"; 207 Names[RTLIB::FPTOUINT_PPCF128_I128] = "__fixunstfti"; 208 Names[RTLIB::SINTTOFP_I32_F32] = "__floatsisf"; 209 Names[RTLIB::SINTTOFP_I32_F64] = "__floatsidf"; 210 Names[RTLIB::SINTTOFP_I32_F80] = "__floatsixf"; 211 Names[RTLIB::SINTTOFP_I32_PPCF128] = "__floatsitf"; 212 Names[RTLIB::SINTTOFP_I64_F32] = "__floatdisf"; 213 Names[RTLIB::SINTTOFP_I64_F64] = "__floatdidf"; 214 Names[RTLIB::SINTTOFP_I64_F80] = "__floatdixf"; 215 Names[RTLIB::SINTTOFP_I64_PPCF128] = "__floatditf"; 216 Names[RTLIB::SINTTOFP_I128_F32] = "__floattisf"; 217 Names[RTLIB::SINTTOFP_I128_F64] = "__floattidf"; 218 Names[RTLIB::SINTTOFP_I128_F80] = "__floattixf"; 219 Names[RTLIB::SINTTOFP_I128_PPCF128] = "__floattitf"; 220 Names[RTLIB::UINTTOFP_I32_F32] = "__floatunsisf"; 221 Names[RTLIB::UINTTOFP_I32_F64] = "__floatunsidf"; 222 Names[RTLIB::UINTTOFP_I32_F80] = "__floatunsixf"; 223 Names[RTLIB::UINTTOFP_I32_PPCF128] = "__floatunsitf"; 224 Names[RTLIB::UINTTOFP_I64_F32] = "__floatundisf"; 225 Names[RTLIB::UINTTOFP_I64_F64] = "__floatundidf"; 226 Names[RTLIB::UINTTOFP_I64_F80] = "__floatundixf"; 227 Names[RTLIB::UINTTOFP_I64_PPCF128] = "__floatunditf"; 228 Names[RTLIB::UINTTOFP_I128_F32] = "__floatuntisf"; 229 Names[RTLIB::UINTTOFP_I128_F64] = "__floatuntidf"; 230 Names[RTLIB::UINTTOFP_I128_F80] = "__floatuntixf"; 231 Names[RTLIB::UINTTOFP_I128_PPCF128] = "__floatuntitf"; 232 Names[RTLIB::OEQ_F32] = "__eqsf2"; 233 Names[RTLIB::OEQ_F64] = "__eqdf2"; 234 Names[RTLIB::UNE_F32] = "__nesf2"; 235 Names[RTLIB::UNE_F64] = "__nedf2"; 236 Names[RTLIB::OGE_F32] = "__gesf2"; 237 Names[RTLIB::OGE_F64] = "__gedf2"; 238 Names[RTLIB::OLT_F32] = "__ltsf2"; 239 Names[RTLIB::OLT_F64] = "__ltdf2"; 240 Names[RTLIB::OLE_F32] = "__lesf2"; 241 Names[RTLIB::OLE_F64] = "__ledf2"; 242 Names[RTLIB::OGT_F32] = "__gtsf2"; 243 Names[RTLIB::OGT_F64] = "__gtdf2"; 244 Names[RTLIB::UO_F32] = "__unordsf2"; 245 Names[RTLIB::UO_F64] = "__unorddf2"; 246 Names[RTLIB::O_F32] = "__unordsf2"; 247 Names[RTLIB::O_F64] = "__unorddf2"; 248 Names[RTLIB::MEMCPY] = "memcpy"; 249 Names[RTLIB::MEMMOVE] = "memmove"; 250 Names[RTLIB::MEMSET] = "memset"; 251 Names[RTLIB::UNWIND_RESUME] = "_Unwind_Resume"; 252} 253 254/// InitLibcallCallingConvs - Set default libcall CallingConvs. 255/// 256static void InitLibcallCallingConvs(CallingConv::ID *CCs) { 257 for (int i = 0; i < RTLIB::UNKNOWN_LIBCALL; ++i) { 258 CCs[i] = CallingConv::C; 259 } 260} 261 262/// getFPEXT - Return the FPEXT_*_* value for the given types, or 263/// UNKNOWN_LIBCALL if there is none. 264RTLIB::Libcall RTLIB::getFPEXT(EVT OpVT, EVT RetVT) { 265 if (OpVT == MVT::f32) { 266 if (RetVT == MVT::f64) 267 return FPEXT_F32_F64; 268 } 269 return UNKNOWN_LIBCALL; 270} 271 272/// getFPROUND - Return the FPROUND_*_* value for the given types, or 273/// UNKNOWN_LIBCALL if there is none. 274RTLIB::Libcall RTLIB::getFPROUND(EVT OpVT, EVT RetVT) { 275 if (RetVT == MVT::f32) { 276 if (OpVT == MVT::f64) 277 return FPROUND_F64_F32; 278 if (OpVT == MVT::f80) 279 return FPROUND_F80_F32; 280 if (OpVT == MVT::ppcf128) 281 return FPROUND_PPCF128_F32; 282 } else if (RetVT == MVT::f64) { 283 if (OpVT == MVT::f80) 284 return FPROUND_F80_F64; 285 if (OpVT == MVT::ppcf128) 286 return FPROUND_PPCF128_F64; 287 } 288 return UNKNOWN_LIBCALL; 289} 290 291/// getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or 292/// UNKNOWN_LIBCALL if there is none. 293RTLIB::Libcall RTLIB::getFPTOSINT(EVT OpVT, EVT RetVT) { 294 if (OpVT == MVT::f32) { 295 if (RetVT == MVT::i8) 296 return FPTOSINT_F32_I8; 297 if (RetVT == MVT::i16) 298 return FPTOSINT_F32_I16; 299 if (RetVT == MVT::i32) 300 return FPTOSINT_F32_I32; 301 if (RetVT == MVT::i64) 302 return FPTOSINT_F32_I64; 303 if (RetVT == MVT::i128) 304 return FPTOSINT_F32_I128; 305 } else if (OpVT == MVT::f64) { 306 if (RetVT == MVT::i32) 307 return FPTOSINT_F64_I32; 308 if (RetVT == MVT::i64) 309 return FPTOSINT_F64_I64; 310 if (RetVT == MVT::i128) 311 return FPTOSINT_F64_I128; 312 } else if (OpVT == MVT::f80) { 313 if (RetVT == MVT::i32) 314 return FPTOSINT_F80_I32; 315 if (RetVT == MVT::i64) 316 return FPTOSINT_F80_I64; 317 if (RetVT == MVT::i128) 318 return FPTOSINT_F80_I128; 319 } else if (OpVT == MVT::ppcf128) { 320 if (RetVT == MVT::i32) 321 return FPTOSINT_PPCF128_I32; 322 if (RetVT == MVT::i64) 323 return FPTOSINT_PPCF128_I64; 324 if (RetVT == MVT::i128) 325 return FPTOSINT_PPCF128_I128; 326 } 327 return UNKNOWN_LIBCALL; 328} 329 330/// getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or 331/// UNKNOWN_LIBCALL if there is none. 332RTLIB::Libcall RTLIB::getFPTOUINT(EVT OpVT, EVT RetVT) { 333 if (OpVT == MVT::f32) { 334 if (RetVT == MVT::i8) 335 return FPTOUINT_F32_I8; 336 if (RetVT == MVT::i16) 337 return FPTOUINT_F32_I16; 338 if (RetVT == MVT::i32) 339 return FPTOUINT_F32_I32; 340 if (RetVT == MVT::i64) 341 return FPTOUINT_F32_I64; 342 if (RetVT == MVT::i128) 343 return FPTOUINT_F32_I128; 344 } else if (OpVT == MVT::f64) { 345 if (RetVT == MVT::i32) 346 return FPTOUINT_F64_I32; 347 if (RetVT == MVT::i64) 348 return FPTOUINT_F64_I64; 349 if (RetVT == MVT::i128) 350 return FPTOUINT_F64_I128; 351 } else if (OpVT == MVT::f80) { 352 if (RetVT == MVT::i32) 353 return FPTOUINT_F80_I32; 354 if (RetVT == MVT::i64) 355 return FPTOUINT_F80_I64; 356 if (RetVT == MVT::i128) 357 return FPTOUINT_F80_I128; 358 } else if (OpVT == MVT::ppcf128) { 359 if (RetVT == MVT::i32) 360 return FPTOUINT_PPCF128_I32; 361 if (RetVT == MVT::i64) 362 return FPTOUINT_PPCF128_I64; 363 if (RetVT == MVT::i128) 364 return FPTOUINT_PPCF128_I128; 365 } 366 return UNKNOWN_LIBCALL; 367} 368 369/// getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or 370/// UNKNOWN_LIBCALL if there is none. 371RTLIB::Libcall RTLIB::getSINTTOFP(EVT OpVT, EVT RetVT) { 372 if (OpVT == MVT::i32) { 373 if (RetVT == MVT::f32) 374 return SINTTOFP_I32_F32; 375 else if (RetVT == MVT::f64) 376 return SINTTOFP_I32_F64; 377 else if (RetVT == MVT::f80) 378 return SINTTOFP_I32_F80; 379 else if (RetVT == MVT::ppcf128) 380 return SINTTOFP_I32_PPCF128; 381 } else if (OpVT == MVT::i64) { 382 if (RetVT == MVT::f32) 383 return SINTTOFP_I64_F32; 384 else if (RetVT == MVT::f64) 385 return SINTTOFP_I64_F64; 386 else if (RetVT == MVT::f80) 387 return SINTTOFP_I64_F80; 388 else if (RetVT == MVT::ppcf128) 389 return SINTTOFP_I64_PPCF128; 390 } else if (OpVT == MVT::i128) { 391 if (RetVT == MVT::f32) 392 return SINTTOFP_I128_F32; 393 else if (RetVT == MVT::f64) 394 return SINTTOFP_I128_F64; 395 else if (RetVT == MVT::f80) 396 return SINTTOFP_I128_F80; 397 else if (RetVT == MVT::ppcf128) 398 return SINTTOFP_I128_PPCF128; 399 } 400 return UNKNOWN_LIBCALL; 401} 402 403/// getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or 404/// UNKNOWN_LIBCALL if there is none. 405RTLIB::Libcall RTLIB::getUINTTOFP(EVT OpVT, EVT RetVT) { 406 if (OpVT == MVT::i32) { 407 if (RetVT == MVT::f32) 408 return UINTTOFP_I32_F32; 409 else if (RetVT == MVT::f64) 410 return UINTTOFP_I32_F64; 411 else if (RetVT == MVT::f80) 412 return UINTTOFP_I32_F80; 413 else if (RetVT == MVT::ppcf128) 414 return UINTTOFP_I32_PPCF128; 415 } else if (OpVT == MVT::i64) { 416 if (RetVT == MVT::f32) 417 return UINTTOFP_I64_F32; 418 else if (RetVT == MVT::f64) 419 return UINTTOFP_I64_F64; 420 else if (RetVT == MVT::f80) 421 return UINTTOFP_I64_F80; 422 else if (RetVT == MVT::ppcf128) 423 return UINTTOFP_I64_PPCF128; 424 } else if (OpVT == MVT::i128) { 425 if (RetVT == MVT::f32) 426 return UINTTOFP_I128_F32; 427 else if (RetVT == MVT::f64) 428 return UINTTOFP_I128_F64; 429 else if (RetVT == MVT::f80) 430 return UINTTOFP_I128_F80; 431 else if (RetVT == MVT::ppcf128) 432 return UINTTOFP_I128_PPCF128; 433 } 434 return UNKNOWN_LIBCALL; 435} 436 437/// InitCmpLibcallCCs - Set default comparison libcall CC. 438/// 439static void InitCmpLibcallCCs(ISD::CondCode *CCs) { 440 memset(CCs, ISD::SETCC_INVALID, sizeof(ISD::CondCode)*RTLIB::UNKNOWN_LIBCALL); 441 CCs[RTLIB::OEQ_F32] = ISD::SETEQ; 442 CCs[RTLIB::OEQ_F64] = ISD::SETEQ; 443 CCs[RTLIB::UNE_F32] = ISD::SETNE; 444 CCs[RTLIB::UNE_F64] = ISD::SETNE; 445 CCs[RTLIB::OGE_F32] = ISD::SETGE; 446 CCs[RTLIB::OGE_F64] = ISD::SETGE; 447 CCs[RTLIB::OLT_F32] = ISD::SETLT; 448 CCs[RTLIB::OLT_F64] = ISD::SETLT; 449 CCs[RTLIB::OLE_F32] = ISD::SETLE; 450 CCs[RTLIB::OLE_F64] = ISD::SETLE; 451 CCs[RTLIB::OGT_F32] = ISD::SETGT; 452 CCs[RTLIB::OGT_F64] = ISD::SETGT; 453 CCs[RTLIB::UO_F32] = ISD::SETNE; 454 CCs[RTLIB::UO_F64] = ISD::SETNE; 455 CCs[RTLIB::O_F32] = ISD::SETEQ; 456 CCs[RTLIB::O_F64] = ISD::SETEQ; 457} 458 459/// NOTE: The constructor takes ownership of TLOF. 460TargetLowering::TargetLowering(TargetMachine &tm,TargetLoweringObjectFile *tlof) 461 : TM(tm), TD(TM.getTargetData()), TLOF(*tlof) { 462 // All operations default to being supported. 463 memset(OpActions, 0, sizeof(OpActions)); 464 memset(LoadExtActions, 0, sizeof(LoadExtActions)); 465 memset(TruncStoreActions, 0, sizeof(TruncStoreActions)); 466 memset(IndexedModeActions, 0, sizeof(IndexedModeActions)); 467 memset(ConvertActions, 0, sizeof(ConvertActions)); 468 memset(CondCodeActions, 0, sizeof(CondCodeActions)); 469 470 // Set default actions for various operations. 471 for (unsigned VT = 0; VT != (unsigned)MVT::LAST_VALUETYPE; ++VT) { 472 // Default all indexed load / store to expand. 473 for (unsigned IM = (unsigned)ISD::PRE_INC; 474 IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) { 475 setIndexedLoadAction(IM, (MVT::SimpleValueType)VT, Expand); 476 setIndexedStoreAction(IM, (MVT::SimpleValueType)VT, Expand); 477 } 478 479 // These operations default to expand. 480 setOperationAction(ISD::FGETSIGN, (MVT::SimpleValueType)VT, Expand); 481 setOperationAction(ISD::CONCAT_VECTORS, (MVT::SimpleValueType)VT, Expand); 482 } 483 484 // Most targets ignore the @llvm.prefetch intrinsic. 485 setOperationAction(ISD::PREFETCH, MVT::Other, Expand); 486 487 // ConstantFP nodes default to expand. Targets can either change this to 488 // Legal, in which case all fp constants are legal, or use isFPImmLegal() 489 // to optimize expansions for certain constants. 490 setOperationAction(ISD::ConstantFP, MVT::f32, Expand); 491 setOperationAction(ISD::ConstantFP, MVT::f64, Expand); 492 setOperationAction(ISD::ConstantFP, MVT::f80, Expand); 493 494 // These library functions default to expand. 495 setOperationAction(ISD::FLOG , MVT::f64, Expand); 496 setOperationAction(ISD::FLOG2, MVT::f64, Expand); 497 setOperationAction(ISD::FLOG10,MVT::f64, Expand); 498 setOperationAction(ISD::FEXP , MVT::f64, Expand); 499 setOperationAction(ISD::FEXP2, MVT::f64, Expand); 500 setOperationAction(ISD::FLOG , MVT::f32, Expand); 501 setOperationAction(ISD::FLOG2, MVT::f32, Expand); 502 setOperationAction(ISD::FLOG10,MVT::f32, Expand); 503 setOperationAction(ISD::FEXP , MVT::f32, Expand); 504 setOperationAction(ISD::FEXP2, MVT::f32, Expand); 505 506 // Default ISD::TRAP to expand (which turns it into abort). 507 setOperationAction(ISD::TRAP, MVT::Other, Expand); 508 509 IsLittleEndian = TD->isLittleEndian(); 510 UsesGlobalOffsetTable = false; 511 ShiftAmountTy = PointerTy = MVT::getIntegerVT(8*TD->getPointerSize()); 512 memset(RegClassForVT, 0,MVT::LAST_VALUETYPE*sizeof(TargetRegisterClass*)); 513 memset(TargetDAGCombineArray, 0, array_lengthof(TargetDAGCombineArray)); 514 maxStoresPerMemset = maxStoresPerMemcpy = maxStoresPerMemmove = 8; 515 benefitFromCodePlacementOpt = false; 516 UseUnderscoreSetJmp = false; 517 UseUnderscoreLongJmp = false; 518 SelectIsExpensive = false; 519 IntDivIsCheap = false; 520 Pow2DivIsCheap = false; 521 StackPointerRegisterToSaveRestore = 0; 522 ExceptionPointerRegister = 0; 523 ExceptionSelectorRegister = 0; 524 BooleanContents = UndefinedBooleanContent; 525 SchedPreferenceInfo = SchedulingForLatency; 526 JumpBufSize = 0; 527 JumpBufAlignment = 0; 528 IfCvtBlockSizeLimit = 2; 529 IfCvtDupBlockSizeLimit = 0; 530 PrefLoopAlignment = 0; 531 532 InitLibcallNames(LibcallRoutineNames); 533 InitCmpLibcallCCs(CmpLibcallCCs); 534 InitLibcallCallingConvs(LibcallCallingConvs); 535 536 // Tell Legalize whether the assembler supports DEBUG_LOC. 537 const MCAsmInfo *TASM = TM.getMCAsmInfo(); 538 if (!TASM || !TASM->hasDotLocAndDotFile()) 539 setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand); 540} 541 542TargetLowering::~TargetLowering() { 543 delete &TLOF; 544} 545 546static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT, 547 unsigned &NumIntermediates, 548 EVT &RegisterVT, 549 TargetLowering* TLI) { 550 // Figure out the right, legal destination reg to copy into. 551 unsigned NumElts = VT.getVectorNumElements(); 552 MVT EltTy = VT.getVectorElementType(); 553 554 unsigned NumVectorRegs = 1; 555 556 // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally we 557 // could break down into LHS/RHS like LegalizeDAG does. 558 if (!isPowerOf2_32(NumElts)) { 559 NumVectorRegs = NumElts; 560 NumElts = 1; 561 } 562 563 // Divide the input until we get to a supported size. This will always 564 // end with a scalar if the target doesn't support vectors. 565 while (NumElts > 1 && !TLI->isTypeLegal(MVT::getVectorVT(EltTy, NumElts))) { 566 NumElts >>= 1; 567 NumVectorRegs <<= 1; 568 } 569 570 NumIntermediates = NumVectorRegs; 571 572 MVT NewVT = MVT::getVectorVT(EltTy, NumElts); 573 if (!TLI->isTypeLegal(NewVT)) 574 NewVT = EltTy; 575 IntermediateVT = NewVT; 576 577 EVT DestVT = TLI->getRegisterType(NewVT); 578 RegisterVT = DestVT; 579 if (EVT(DestVT).bitsLT(NewVT)) { 580 // Value is expanded, e.g. i64 -> i16. 581 return NumVectorRegs*(NewVT.getSizeInBits()/DestVT.getSizeInBits()); 582 } else { 583 // Otherwise, promotion or legal types use the same number of registers as 584 // the vector decimated to the appropriate level. 585 return NumVectorRegs; 586 } 587 588 return 1; 589} 590 591/// computeRegisterProperties - Once all of the register classes are added, 592/// this allows us to compute derived properties we expose. 593void TargetLowering::computeRegisterProperties() { 594 assert(MVT::LAST_VALUETYPE <= MVT::MAX_ALLOWED_VALUETYPE && 595 "Too many value types for ValueTypeActions to hold!"); 596 597 // Everything defaults to needing one register. 598 for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) { 599 NumRegistersForVT[i] = 1; 600 RegisterTypeForVT[i] = TransformToType[i] = (MVT::SimpleValueType)i; 601 } 602 // ...except isVoid, which doesn't need any registers. 603 NumRegistersForVT[MVT::isVoid] = 0; 604 605 // Find the largest integer register class. 606 unsigned LargestIntReg = MVT::LAST_INTEGER_VALUETYPE; 607 for (; RegClassForVT[LargestIntReg] == 0; --LargestIntReg) 608 assert(LargestIntReg != MVT::i1 && "No integer registers defined!"); 609 610 // Every integer value type larger than this largest register takes twice as 611 // many registers to represent as the previous ValueType. 612 for (unsigned ExpandedReg = LargestIntReg + 1; ; ++ExpandedReg) { 613 EVT ExpandedVT = (MVT::SimpleValueType)ExpandedReg; 614 if (!ExpandedVT.isInteger()) 615 break; 616 NumRegistersForVT[ExpandedReg] = 2*NumRegistersForVT[ExpandedReg-1]; 617 RegisterTypeForVT[ExpandedReg] = (MVT::SimpleValueType)LargestIntReg; 618 TransformToType[ExpandedReg] = (MVT::SimpleValueType)(ExpandedReg - 1); 619 ValueTypeActions.setTypeAction(ExpandedVT, Expand); 620 } 621 622 // Inspect all of the ValueType's smaller than the largest integer 623 // register to see which ones need promotion. 624 unsigned LegalIntReg = LargestIntReg; 625 for (unsigned IntReg = LargestIntReg - 1; 626 IntReg >= (unsigned)MVT::i1; --IntReg) { 627 EVT IVT = (MVT::SimpleValueType)IntReg; 628 if (isTypeLegal(IVT)) { 629 LegalIntReg = IntReg; 630 } else { 631 RegisterTypeForVT[IntReg] = TransformToType[IntReg] = 632 (MVT::SimpleValueType)LegalIntReg; 633 ValueTypeActions.setTypeAction(IVT, Promote); 634 } 635 } 636 637 // ppcf128 type is really two f64's. 638 if (!isTypeLegal(MVT::ppcf128)) { 639 NumRegistersForVT[MVT::ppcf128] = 2*NumRegistersForVT[MVT::f64]; 640 RegisterTypeForVT[MVT::ppcf128] = MVT::f64; 641 TransformToType[MVT::ppcf128] = MVT::f64; 642 ValueTypeActions.setTypeAction(MVT::ppcf128, Expand); 643 } 644 645 // Decide how to handle f64. If the target does not have native f64 support, 646 // expand it to i64 and we will be generating soft float library calls. 647 if (!isTypeLegal(MVT::f64)) { 648 NumRegistersForVT[MVT::f64] = NumRegistersForVT[MVT::i64]; 649 RegisterTypeForVT[MVT::f64] = RegisterTypeForVT[MVT::i64]; 650 TransformToType[MVT::f64] = MVT::i64; 651 ValueTypeActions.setTypeAction(MVT::f64, Expand); 652 } 653 654 // Decide how to handle f32. If the target does not have native support for 655 // f32, promote it to f64 if it is legal. Otherwise, expand it to i32. 656 if (!isTypeLegal(MVT::f32)) { 657 if (isTypeLegal(MVT::f64)) { 658 NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::f64]; 659 RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::f64]; 660 TransformToType[MVT::f32] = MVT::f64; 661 ValueTypeActions.setTypeAction(MVT::f32, Promote); 662 } else { 663 NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::i32]; 664 RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::i32]; 665 TransformToType[MVT::f32] = MVT::i32; 666 ValueTypeActions.setTypeAction(MVT::f32, Expand); 667 } 668 } 669 670 // Loop over all of the vector value types to see which need transformations. 671 for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE; 672 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) { 673 MVT VT = (MVT::SimpleValueType)i; 674 if (!isTypeLegal(VT)) { 675 MVT IntermediateVT; 676 EVT RegisterVT; 677 unsigned NumIntermediates; 678 NumRegistersForVT[i] = 679 getVectorTypeBreakdownMVT(VT, IntermediateVT, NumIntermediates, 680 RegisterVT, this); 681 RegisterTypeForVT[i] = RegisterVT; 682 683 // Determine if there is a legal wider type. 684 bool IsLegalWiderType = false; 685 EVT EltVT = VT.getVectorElementType(); 686 unsigned NElts = VT.getVectorNumElements(); 687 for (unsigned nVT = i+1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) { 688 EVT SVT = (MVT::SimpleValueType)nVT; 689 if (isTypeLegal(SVT) && SVT.getVectorElementType() == EltVT && 690 SVT.getVectorNumElements() > NElts) { 691 TransformToType[i] = SVT; 692 ValueTypeActions.setTypeAction(VT, Promote); 693 IsLegalWiderType = true; 694 break; 695 } 696 } 697 if (!IsLegalWiderType) { 698 EVT NVT = VT.getPow2VectorType(); 699 if (NVT == VT) { 700 // Type is already a power of 2. The default action is to split. 701 TransformToType[i] = MVT::Other; 702 ValueTypeActions.setTypeAction(VT, Expand); 703 } else { 704 TransformToType[i] = NVT; 705 ValueTypeActions.setTypeAction(VT, Promote); 706 } 707 } 708 } 709 } 710} 711 712const char *TargetLowering::getTargetNodeName(unsigned Opcode) const { 713 return NULL; 714} 715 716 717MVT::SimpleValueType TargetLowering::getSetCCResultType(EVT VT) const { 718 return PointerTy.SimpleTy; 719} 720 721/// getVectorTypeBreakdown - Vector types are broken down into some number of 722/// legal first class types. For example, MVT::v8f32 maps to 2 MVT::v4f32 723/// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack. 724/// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and X86. 725/// 726/// This method returns the number of registers needed, and the VT for each 727/// register. It also returns the VT and quantity of the intermediate values 728/// before they are promoted/expanded. 729/// 730unsigned TargetLowering::getVectorTypeBreakdown(LLVMContext &Context, EVT VT, 731 EVT &IntermediateVT, 732 unsigned &NumIntermediates, 733 EVT &RegisterVT) const { 734 // Figure out the right, legal destination reg to copy into. 735 unsigned NumElts = VT.getVectorNumElements(); 736 EVT EltTy = VT.getVectorElementType(); 737 738 unsigned NumVectorRegs = 1; 739 740 // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally we 741 // could break down into LHS/RHS like LegalizeDAG does. 742 if (!isPowerOf2_32(NumElts)) { 743 NumVectorRegs = NumElts; 744 NumElts = 1; 745 } 746 747 // Divide the input until we get to a supported size. This will always 748 // end with a scalar if the target doesn't support vectors. 749 while (NumElts > 1 && !isTypeLegal( 750 EVT::getVectorVT(Context, EltTy, NumElts))) { 751 NumElts >>= 1; 752 NumVectorRegs <<= 1; 753 } 754 755 NumIntermediates = NumVectorRegs; 756 757 EVT NewVT = EVT::getVectorVT(Context, EltTy, NumElts); 758 if (!isTypeLegal(NewVT)) 759 NewVT = EltTy; 760 IntermediateVT = NewVT; 761 762 EVT DestVT = getRegisterType(Context, NewVT); 763 RegisterVT = DestVT; 764 if (DestVT.bitsLT(NewVT)) { 765 // Value is expanded, e.g. i64 -> i16. 766 return NumVectorRegs*(NewVT.getSizeInBits()/DestVT.getSizeInBits()); 767 } else { 768 // Otherwise, promotion or legal types use the same number of registers as 769 // the vector decimated to the appropriate level. 770 return NumVectorRegs; 771 } 772 773 return 1; 774} 775 776/// getWidenVectorType: given a vector type, returns the type to widen to 777/// (e.g., v7i8 to v8i8). If the vector type is legal, it returns itself. 778/// If there is no vector type that we want to widen to, returns MVT::Other 779/// When and where to widen is target dependent based on the cost of 780/// scalarizing vs using the wider vector type. 781EVT TargetLowering::getWidenVectorType(EVT VT) const { 782 assert(VT.isVector()); 783 if (isTypeLegal(VT)) 784 return VT; 785 786 // Default is not to widen until moved to LegalizeTypes 787 return MVT::Other; 788} 789 790/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 791/// function arguments in the caller parameter area. This is the actual 792/// alignment, not its logarithm. 793unsigned TargetLowering::getByValTypeAlignment(const Type *Ty) const { 794 return TD->getCallFrameTypeAlignment(Ty); 795} 796 797SDValue TargetLowering::getPICJumpTableRelocBase(SDValue Table, 798 SelectionDAG &DAG) const { 799 if (usesGlobalOffsetTable()) 800 return DAG.getGLOBAL_OFFSET_TABLE(getPointerTy()); 801 return Table; 802} 803 804bool 805TargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 806 // Assume that everything is safe in static mode. 807 if (getTargetMachine().getRelocationModel() == Reloc::Static) 808 return true; 809 810 // In dynamic-no-pic mode, assume that known defined values are safe. 811 if (getTargetMachine().getRelocationModel() == Reloc::DynamicNoPIC && 812 GA && 813 !GA->getGlobal()->isDeclaration() && 814 !GA->getGlobal()->isWeakForLinker()) 815 return true; 816 817 // Otherwise assume nothing is safe. 818 return false; 819} 820 821//===----------------------------------------------------------------------===// 822// Optimization Methods 823//===----------------------------------------------------------------------===// 824 825/// ShrinkDemandedConstant - Check to see if the specified operand of the 826/// specified instruction is a constant integer. If so, check to see if there 827/// are any bits set in the constant that are not demanded. If so, shrink the 828/// constant and return true. 829bool TargetLowering::TargetLoweringOpt::ShrinkDemandedConstant(SDValue Op, 830 const APInt &Demanded) { 831 DebugLoc dl = Op.getDebugLoc(); 832 833 // FIXME: ISD::SELECT, ISD::SELECT_CC 834 switch (Op.getOpcode()) { 835 default: break; 836 case ISD::XOR: 837 case ISD::AND: 838 case ISD::OR: { 839 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 840 if (!C) return false; 841 842 if (Op.getOpcode() == ISD::XOR && 843 (C->getAPIntValue() | (~Demanded)).isAllOnesValue()) 844 return false; 845 846 // if we can expand it to have all bits set, do it 847 if (C->getAPIntValue().intersects(~Demanded)) { 848 EVT VT = Op.getValueType(); 849 SDValue New = DAG.getNode(Op.getOpcode(), dl, VT, Op.getOperand(0), 850 DAG.getConstant(Demanded & 851 C->getAPIntValue(), 852 VT)); 853 return CombineTo(Op, New); 854 } 855 856 break; 857 } 858 } 859 860 return false; 861} 862 863/// ShrinkDemandedOp - Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the 864/// casts are free. This uses isZExtFree and ZERO_EXTEND for the widening 865/// cast, but it could be generalized for targets with other types of 866/// implicit widening casts. 867bool 868TargetLowering::TargetLoweringOpt::ShrinkDemandedOp(SDValue Op, 869 unsigned BitWidth, 870 const APInt &Demanded, 871 DebugLoc dl) { 872 assert(Op.getNumOperands() == 2 && 873 "ShrinkDemandedOp only supports binary operators!"); 874 assert(Op.getNode()->getNumValues() == 1 && 875 "ShrinkDemandedOp only supports nodes with one result!"); 876 877 // Don't do this if the node has another user, which may require the 878 // full value. 879 if (!Op.getNode()->hasOneUse()) 880 return false; 881 882 // Search for the smallest integer type with free casts to and from 883 // Op's type. For expedience, just check power-of-2 integer types. 884 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 885 unsigned SmallVTBits = BitWidth - Demanded.countLeadingZeros(); 886 if (!isPowerOf2_32(SmallVTBits)) 887 SmallVTBits = NextPowerOf2(SmallVTBits); 888 for (; SmallVTBits < BitWidth; SmallVTBits = NextPowerOf2(SmallVTBits)) { 889 EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), SmallVTBits); 890 if (TLI.isTruncateFree(Op.getValueType(), SmallVT) && 891 TLI.isZExtFree(SmallVT, Op.getValueType())) { 892 // We found a type with free casts. 893 SDValue X = DAG.getNode(Op.getOpcode(), dl, SmallVT, 894 DAG.getNode(ISD::TRUNCATE, dl, SmallVT, 895 Op.getNode()->getOperand(0)), 896 DAG.getNode(ISD::TRUNCATE, dl, SmallVT, 897 Op.getNode()->getOperand(1))); 898 SDValue Z = DAG.getNode(ISD::ZERO_EXTEND, dl, Op.getValueType(), X); 899 return CombineTo(Op, Z); 900 } 901 } 902 return false; 903} 904 905/// SimplifyDemandedBits - Look at Op. At this point, we know that only the 906/// DemandedMask bits of the result of Op are ever used downstream. If we can 907/// use this information to simplify Op, create a new simplified DAG node and 908/// return true, returning the original and new nodes in Old and New. Otherwise, 909/// analyze the expression and return a mask of KnownOne and KnownZero bits for 910/// the expression (used to simplify the caller). The KnownZero/One bits may 911/// only be accurate for those bits in the DemandedMask. 912bool TargetLowering::SimplifyDemandedBits(SDValue Op, 913 const APInt &DemandedMask, 914 APInt &KnownZero, 915 APInt &KnownOne, 916 TargetLoweringOpt &TLO, 917 unsigned Depth) const { 918 unsigned BitWidth = DemandedMask.getBitWidth(); 919 assert(Op.getValueSizeInBits() == BitWidth && 920 "Mask size mismatches value type size!"); 921 APInt NewMask = DemandedMask; 922 DebugLoc dl = Op.getDebugLoc(); 923 924 // Don't know anything. 925 KnownZero = KnownOne = APInt(BitWidth, 0); 926 927 // Other users may use these bits. 928 if (!Op.getNode()->hasOneUse()) { 929 if (Depth != 0) { 930 // If not at the root, Just compute the KnownZero/KnownOne bits to 931 // simplify things downstream. 932 TLO.DAG.ComputeMaskedBits(Op, DemandedMask, KnownZero, KnownOne, Depth); 933 return false; 934 } 935 // If this is the root being simplified, allow it to have multiple uses, 936 // just set the NewMask to all bits. 937 NewMask = APInt::getAllOnesValue(BitWidth); 938 } else if (DemandedMask == 0) { 939 // Not demanding any bits from Op. 940 if (Op.getOpcode() != ISD::UNDEF) 941 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(Op.getValueType())); 942 return false; 943 } else if (Depth == 6) { // Limit search depth. 944 return false; 945 } 946 947 APInt KnownZero2, KnownOne2, KnownZeroOut, KnownOneOut; 948 switch (Op.getOpcode()) { 949 case ISD::Constant: 950 // We know all of the bits for a constant! 951 KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue() & NewMask; 952 KnownZero = ~KnownOne & NewMask; 953 return false; // Don't fall through, will infinitely loop. 954 case ISD::AND: 955 // If the RHS is a constant, check to see if the LHS would be zero without 956 // using the bits from the RHS. Below, we use knowledge about the RHS to 957 // simplify the LHS, here we're using information from the LHS to simplify 958 // the RHS. 959 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 960 APInt LHSZero, LHSOne; 961 TLO.DAG.ComputeMaskedBits(Op.getOperand(0), NewMask, 962 LHSZero, LHSOne, Depth+1); 963 // If the LHS already has zeros where RHSC does, this and is dead. 964 if ((LHSZero & NewMask) == (~RHSC->getAPIntValue() & NewMask)) 965 return TLO.CombineTo(Op, Op.getOperand(0)); 966 // If any of the set bits in the RHS are known zero on the LHS, shrink 967 // the constant. 968 if (TLO.ShrinkDemandedConstant(Op, ~LHSZero & NewMask)) 969 return true; 970 } 971 972 if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero, 973 KnownOne, TLO, Depth+1)) 974 return true; 975 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 976 if (SimplifyDemandedBits(Op.getOperand(0), ~KnownZero & NewMask, 977 KnownZero2, KnownOne2, TLO, Depth+1)) 978 return true; 979 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 980 981 // If all of the demanded bits are known one on one side, return the other. 982 // These bits cannot contribute to the result of the 'and'. 983 if ((NewMask & ~KnownZero2 & KnownOne) == (~KnownZero2 & NewMask)) 984 return TLO.CombineTo(Op, Op.getOperand(0)); 985 if ((NewMask & ~KnownZero & KnownOne2) == (~KnownZero & NewMask)) 986 return TLO.CombineTo(Op, Op.getOperand(1)); 987 // If all of the demanded bits in the inputs are known zeros, return zero. 988 if ((NewMask & (KnownZero|KnownZero2)) == NewMask) 989 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, Op.getValueType())); 990 // If the RHS is a constant, see if we can simplify it. 991 if (TLO.ShrinkDemandedConstant(Op, ~KnownZero2 & NewMask)) 992 return true; 993 // If the operation can be done in a smaller type, do so. 994 if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl)) 995 return true; 996 997 // Output known-1 bits are only known if set in both the LHS & RHS. 998 KnownOne &= KnownOne2; 999 // Output known-0 are known to be clear if zero in either the LHS | RHS. 1000 KnownZero |= KnownZero2; 1001 break; 1002 case ISD::OR: 1003 if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero, 1004 KnownOne, TLO, Depth+1)) 1005 return true; 1006 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1007 if (SimplifyDemandedBits(Op.getOperand(0), ~KnownOne & NewMask, 1008 KnownZero2, KnownOne2, TLO, Depth+1)) 1009 return true; 1010 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 1011 1012 // If all of the demanded bits are known zero on one side, return the other. 1013 // These bits cannot contribute to the result of the 'or'. 1014 if ((NewMask & ~KnownOne2 & KnownZero) == (~KnownOne2 & NewMask)) 1015 return TLO.CombineTo(Op, Op.getOperand(0)); 1016 if ((NewMask & ~KnownOne & KnownZero2) == (~KnownOne & NewMask)) 1017 return TLO.CombineTo(Op, Op.getOperand(1)); 1018 // If all of the potentially set bits on one side are known to be set on 1019 // the other side, just use the 'other' side. 1020 if ((NewMask & ~KnownZero & KnownOne2) == (~KnownZero & NewMask)) 1021 return TLO.CombineTo(Op, Op.getOperand(0)); 1022 if ((NewMask & ~KnownZero2 & KnownOne) == (~KnownZero2 & NewMask)) 1023 return TLO.CombineTo(Op, Op.getOperand(1)); 1024 // If the RHS is a constant, see if we can simplify it. 1025 if (TLO.ShrinkDemandedConstant(Op, NewMask)) 1026 return true; 1027 // If the operation can be done in a smaller type, do so. 1028 if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl)) 1029 return true; 1030 1031 // Output known-0 bits are only known if clear in both the LHS & RHS. 1032 KnownZero &= KnownZero2; 1033 // Output known-1 are known to be set if set in either the LHS | RHS. 1034 KnownOne |= KnownOne2; 1035 break; 1036 case ISD::XOR: 1037 if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero, 1038 KnownOne, TLO, Depth+1)) 1039 return true; 1040 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1041 if (SimplifyDemandedBits(Op.getOperand(0), NewMask, KnownZero2, 1042 KnownOne2, TLO, Depth+1)) 1043 return true; 1044 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 1045 1046 // If all of the demanded bits are known zero on one side, return the other. 1047 // These bits cannot contribute to the result of the 'xor'. 1048 if ((KnownZero & NewMask) == NewMask) 1049 return TLO.CombineTo(Op, Op.getOperand(0)); 1050 if ((KnownZero2 & NewMask) == NewMask) 1051 return TLO.CombineTo(Op, Op.getOperand(1)); 1052 // If the operation can be done in a smaller type, do so. 1053 if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl)) 1054 return true; 1055 1056 // If all of the unknown bits are known to be zero on one side or the other 1057 // (but not both) turn this into an *inclusive* or. 1058 // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0 1059 if ((NewMask & ~KnownZero & ~KnownZero2) == 0) 1060 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::OR, dl, Op.getValueType(), 1061 Op.getOperand(0), 1062 Op.getOperand(1))); 1063 1064 // Output known-0 bits are known if clear or set in both the LHS & RHS. 1065 KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2); 1066 // Output known-1 are known to be set if set in only one of the LHS, RHS. 1067 KnownOneOut = (KnownZero & KnownOne2) | (KnownOne & KnownZero2); 1068 1069 // If all of the demanded bits on one side are known, and all of the set 1070 // bits on that side are also known to be set on the other side, turn this 1071 // into an AND, as we know the bits will be cleared. 1072 // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2 1073 if ((NewMask & (KnownZero|KnownOne)) == NewMask) { // all known 1074 if ((KnownOne & KnownOne2) == KnownOne) { 1075 EVT VT = Op.getValueType(); 1076 SDValue ANDC = TLO.DAG.getConstant(~KnownOne & NewMask, VT); 1077 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, dl, VT, 1078 Op.getOperand(0), ANDC)); 1079 } 1080 } 1081 1082 // If the RHS is a constant, see if we can simplify it. 1083 // for XOR, we prefer to force bits to 1 if they will make a -1. 1084 // if we can't force bits, try to shrink constant 1085 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 1086 APInt Expanded = C->getAPIntValue() | (~NewMask); 1087 // if we can expand it to have all bits set, do it 1088 if (Expanded.isAllOnesValue()) { 1089 if (Expanded != C->getAPIntValue()) { 1090 EVT VT = Op.getValueType(); 1091 SDValue New = TLO.DAG.getNode(Op.getOpcode(), dl,VT, Op.getOperand(0), 1092 TLO.DAG.getConstant(Expanded, VT)); 1093 return TLO.CombineTo(Op, New); 1094 } 1095 // if it already has all the bits set, nothing to change 1096 // but don't shrink either! 1097 } else if (TLO.ShrinkDemandedConstant(Op, NewMask)) { 1098 return true; 1099 } 1100 } 1101 1102 KnownZero = KnownZeroOut; 1103 KnownOne = KnownOneOut; 1104 break; 1105 case ISD::SELECT: 1106 if (SimplifyDemandedBits(Op.getOperand(2), NewMask, KnownZero, 1107 KnownOne, TLO, Depth+1)) 1108 return true; 1109 if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero2, 1110 KnownOne2, TLO, Depth+1)) 1111 return true; 1112 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1113 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 1114 1115 // If the operands are constants, see if we can simplify them. 1116 if (TLO.ShrinkDemandedConstant(Op, NewMask)) 1117 return true; 1118 1119 // Only known if known in both the LHS and RHS. 1120 KnownOne &= KnownOne2; 1121 KnownZero &= KnownZero2; 1122 break; 1123 case ISD::SELECT_CC: 1124 if (SimplifyDemandedBits(Op.getOperand(3), NewMask, KnownZero, 1125 KnownOne, TLO, Depth+1)) 1126 return true; 1127 if (SimplifyDemandedBits(Op.getOperand(2), NewMask, KnownZero2, 1128 KnownOne2, TLO, Depth+1)) 1129 return true; 1130 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1131 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 1132 1133 // If the operands are constants, see if we can simplify them. 1134 if (TLO.ShrinkDemandedConstant(Op, NewMask)) 1135 return true; 1136 1137 // Only known if known in both the LHS and RHS. 1138 KnownOne &= KnownOne2; 1139 KnownZero &= KnownZero2; 1140 break; 1141 case ISD::SHL: 1142 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 1143 unsigned ShAmt = SA->getZExtValue(); 1144 SDValue InOp = Op.getOperand(0); 1145 1146 // If the shift count is an invalid immediate, don't do anything. 1147 if (ShAmt >= BitWidth) 1148 break; 1149 1150 // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a 1151 // single shift. We can do this if the bottom bits (which are shifted 1152 // out) are never demanded. 1153 if (InOp.getOpcode() == ISD::SRL && 1154 isa<ConstantSDNode>(InOp.getOperand(1))) { 1155 if (ShAmt && (NewMask & APInt::getLowBitsSet(BitWidth, ShAmt)) == 0) { 1156 unsigned C1= cast<ConstantSDNode>(InOp.getOperand(1))->getZExtValue(); 1157 unsigned Opc = ISD::SHL; 1158 int Diff = ShAmt-C1; 1159 if (Diff < 0) { 1160 Diff = -Diff; 1161 Opc = ISD::SRL; 1162 } 1163 1164 SDValue NewSA = 1165 TLO.DAG.getConstant(Diff, Op.getOperand(1).getValueType()); 1166 EVT VT = Op.getValueType(); 1167 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, 1168 InOp.getOperand(0), NewSA)); 1169 } 1170 } 1171 1172 if (SimplifyDemandedBits(Op.getOperand(0), NewMask.lshr(ShAmt), 1173 KnownZero, KnownOne, TLO, Depth+1)) 1174 return true; 1175 KnownZero <<= SA->getZExtValue(); 1176 KnownOne <<= SA->getZExtValue(); 1177 // low bits known zero. 1178 KnownZero |= APInt::getLowBitsSet(BitWidth, SA->getZExtValue()); 1179 } 1180 break; 1181 case ISD::SRL: 1182 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 1183 EVT VT = Op.getValueType(); 1184 unsigned ShAmt = SA->getZExtValue(); 1185 unsigned VTSize = VT.getSizeInBits(); 1186 SDValue InOp = Op.getOperand(0); 1187 1188 // If the shift count is an invalid immediate, don't do anything. 1189 if (ShAmt >= BitWidth) 1190 break; 1191 1192 // If this is ((X << C1) >>u ShAmt), see if we can simplify this into a 1193 // single shift. We can do this if the top bits (which are shifted out) 1194 // are never demanded. 1195 if (InOp.getOpcode() == ISD::SHL && 1196 isa<ConstantSDNode>(InOp.getOperand(1))) { 1197 if (ShAmt && (NewMask & APInt::getHighBitsSet(VTSize, ShAmt)) == 0) { 1198 unsigned C1= cast<ConstantSDNode>(InOp.getOperand(1))->getZExtValue(); 1199 unsigned Opc = ISD::SRL; 1200 int Diff = ShAmt-C1; 1201 if (Diff < 0) { 1202 Diff = -Diff; 1203 Opc = ISD::SHL; 1204 } 1205 1206 SDValue NewSA = 1207 TLO.DAG.getConstant(Diff, Op.getOperand(1).getValueType()); 1208 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, 1209 InOp.getOperand(0), NewSA)); 1210 } 1211 } 1212 1213 // Compute the new bits that are at the top now. 1214 if (SimplifyDemandedBits(InOp, (NewMask << ShAmt), 1215 KnownZero, KnownOne, TLO, Depth+1)) 1216 return true; 1217 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1218 KnownZero = KnownZero.lshr(ShAmt); 1219 KnownOne = KnownOne.lshr(ShAmt); 1220 1221 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt); 1222 KnownZero |= HighBits; // High bits known zero. 1223 } 1224 break; 1225 case ISD::SRA: 1226 // If this is an arithmetic shift right and only the low-bit is set, we can 1227 // always convert this into a logical shr, even if the shift amount is 1228 // variable. The low bit of the shift cannot be an input sign bit unless 1229 // the shift amount is >= the size of the datatype, which is undefined. 1230 if (DemandedMask == 1) 1231 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, Op.getValueType(), 1232 Op.getOperand(0), Op.getOperand(1))); 1233 1234 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 1235 EVT VT = Op.getValueType(); 1236 unsigned ShAmt = SA->getZExtValue(); 1237 1238 // If the shift count is an invalid immediate, don't do anything. 1239 if (ShAmt >= BitWidth) 1240 break; 1241 1242 APInt InDemandedMask = (NewMask << ShAmt); 1243 1244 // If any of the demanded bits are produced by the sign extension, we also 1245 // demand the input sign bit. 1246 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt); 1247 if (HighBits.intersects(NewMask)) 1248 InDemandedMask |= APInt::getSignBit(VT.getSizeInBits()); 1249 1250 if (SimplifyDemandedBits(Op.getOperand(0), InDemandedMask, 1251 KnownZero, KnownOne, TLO, Depth+1)) 1252 return true; 1253 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1254 KnownZero = KnownZero.lshr(ShAmt); 1255 KnownOne = KnownOne.lshr(ShAmt); 1256 1257 // Handle the sign bit, adjusted to where it is now in the mask. 1258 APInt SignBit = APInt::getSignBit(BitWidth).lshr(ShAmt); 1259 1260 // If the input sign bit is known to be zero, or if none of the top bits 1261 // are demanded, turn this into an unsigned shift right. 1262 if (KnownZero.intersects(SignBit) || (HighBits & ~NewMask) == HighBits) { 1263 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, VT, 1264 Op.getOperand(0), 1265 Op.getOperand(1))); 1266 } else if (KnownOne.intersects(SignBit)) { // New bits are known one. 1267 KnownOne |= HighBits; 1268 } 1269 } 1270 break; 1271 case ISD::SIGN_EXTEND_INREG: { 1272 EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 1273 1274 // Sign extension. Compute the demanded bits in the result that are not 1275 // present in the input. 1276 APInt NewBits = APInt::getHighBitsSet(BitWidth, 1277 BitWidth - EVT.getSizeInBits()) & 1278 NewMask; 1279 1280 // If none of the extended bits are demanded, eliminate the sextinreg. 1281 if (NewBits == 0) 1282 return TLO.CombineTo(Op, Op.getOperand(0)); 1283 1284 APInt InSignBit = APInt::getSignBit(EVT.getSizeInBits()); 1285 InSignBit.zext(BitWidth); 1286 APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, 1287 EVT.getSizeInBits()) & 1288 NewMask; 1289 1290 // Since the sign extended bits are demanded, we know that the sign 1291 // bit is demanded. 1292 InputDemandedBits |= InSignBit; 1293 1294 if (SimplifyDemandedBits(Op.getOperand(0), InputDemandedBits, 1295 KnownZero, KnownOne, TLO, Depth+1)) 1296 return true; 1297 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1298 1299 // If the sign bit of the input is known set or clear, then we know the 1300 // top bits of the result. 1301 1302 // If the input sign bit is known zero, convert this into a zero extension. 1303 if (KnownZero.intersects(InSignBit)) 1304 return TLO.CombineTo(Op, 1305 TLO.DAG.getZeroExtendInReg(Op.getOperand(0),dl,EVT)); 1306 1307 if (KnownOne.intersects(InSignBit)) { // Input sign bit known set 1308 KnownOne |= NewBits; 1309 KnownZero &= ~NewBits; 1310 } else { // Input sign bit unknown 1311 KnownZero &= ~NewBits; 1312 KnownOne &= ~NewBits; 1313 } 1314 break; 1315 } 1316 case ISD::ZERO_EXTEND: { 1317 unsigned OperandBitWidth = Op.getOperand(0).getValueSizeInBits(); 1318 APInt InMask = NewMask; 1319 InMask.trunc(OperandBitWidth); 1320 1321 // If none of the top bits are demanded, convert this into an any_extend. 1322 APInt NewBits = 1323 APInt::getHighBitsSet(BitWidth, BitWidth - OperandBitWidth) & NewMask; 1324 if (!NewBits.intersects(NewMask)) 1325 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::ANY_EXTEND, dl, 1326 Op.getValueType(), 1327 Op.getOperand(0))); 1328 1329 if (SimplifyDemandedBits(Op.getOperand(0), InMask, 1330 KnownZero, KnownOne, TLO, Depth+1)) 1331 return true; 1332 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1333 KnownZero.zext(BitWidth); 1334 KnownOne.zext(BitWidth); 1335 KnownZero |= NewBits; 1336 break; 1337 } 1338 case ISD::SIGN_EXTEND: { 1339 EVT InVT = Op.getOperand(0).getValueType(); 1340 unsigned InBits = InVT.getSizeInBits(); 1341 APInt InMask = APInt::getLowBitsSet(BitWidth, InBits); 1342 APInt InSignBit = APInt::getBitsSet(BitWidth, InBits - 1, InBits); 1343 APInt NewBits = ~InMask & NewMask; 1344 1345 // If none of the top bits are demanded, convert this into an any_extend. 1346 if (NewBits == 0) 1347 return TLO.CombineTo(Op,TLO.DAG.getNode(ISD::ANY_EXTEND, dl, 1348 Op.getValueType(), 1349 Op.getOperand(0))); 1350 1351 // Since some of the sign extended bits are demanded, we know that the sign 1352 // bit is demanded. 1353 APInt InDemandedBits = InMask & NewMask; 1354 InDemandedBits |= InSignBit; 1355 InDemandedBits.trunc(InBits); 1356 1357 if (SimplifyDemandedBits(Op.getOperand(0), InDemandedBits, KnownZero, 1358 KnownOne, TLO, Depth+1)) 1359 return true; 1360 KnownZero.zext(BitWidth); 1361 KnownOne.zext(BitWidth); 1362 1363 // If the sign bit is known zero, convert this to a zero extend. 1364 if (KnownZero.intersects(InSignBit)) 1365 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::ZERO_EXTEND, dl, 1366 Op.getValueType(), 1367 Op.getOperand(0))); 1368 1369 // If the sign bit is known one, the top bits match. 1370 if (KnownOne.intersects(InSignBit)) { 1371 KnownOne |= NewBits; 1372 KnownZero &= ~NewBits; 1373 } else { // Otherwise, top bits aren't known. 1374 KnownOne &= ~NewBits; 1375 KnownZero &= ~NewBits; 1376 } 1377 break; 1378 } 1379 case ISD::ANY_EXTEND: { 1380 unsigned OperandBitWidth = Op.getOperand(0).getValueSizeInBits(); 1381 APInt InMask = NewMask; 1382 InMask.trunc(OperandBitWidth); 1383 if (SimplifyDemandedBits(Op.getOperand(0), InMask, 1384 KnownZero, KnownOne, TLO, Depth+1)) 1385 return true; 1386 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1387 KnownZero.zext(BitWidth); 1388 KnownOne.zext(BitWidth); 1389 break; 1390 } 1391 case ISD::TRUNCATE: { 1392 // Simplify the input, using demanded bit information, and compute the known 1393 // zero/one bits live out. 1394 APInt TruncMask = NewMask; 1395 TruncMask.zext(Op.getOperand(0).getValueSizeInBits()); 1396 if (SimplifyDemandedBits(Op.getOperand(0), TruncMask, 1397 KnownZero, KnownOne, TLO, Depth+1)) 1398 return true; 1399 KnownZero.trunc(BitWidth); 1400 KnownOne.trunc(BitWidth); 1401 1402 // If the input is only used by this truncate, see if we can shrink it based 1403 // on the known demanded bits. 1404 if (Op.getOperand(0).getNode()->hasOneUse()) { 1405 SDValue In = Op.getOperand(0); 1406 unsigned InBitWidth = In.getValueSizeInBits(); 1407 switch (In.getOpcode()) { 1408 default: break; 1409 case ISD::SRL: 1410 // Shrink SRL by a constant if none of the high bits shifted in are 1411 // demanded. 1412 if (ConstantSDNode *ShAmt = dyn_cast<ConstantSDNode>(In.getOperand(1))){ 1413 APInt HighBits = APInt::getHighBitsSet(InBitWidth, 1414 InBitWidth - BitWidth); 1415 HighBits = HighBits.lshr(ShAmt->getZExtValue()); 1416 HighBits.trunc(BitWidth); 1417 1418 if (ShAmt->getZExtValue() < BitWidth && !(HighBits & NewMask)) { 1419 // None of the shifted in bits are needed. Add a truncate of the 1420 // shift input, then shift it. 1421 SDValue NewTrunc = TLO.DAG.getNode(ISD::TRUNCATE, dl, 1422 Op.getValueType(), 1423 In.getOperand(0)); 1424 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, 1425 Op.getValueType(), 1426 NewTrunc, 1427 In.getOperand(1))); 1428 } 1429 } 1430 break; 1431 } 1432 } 1433 1434 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1435 break; 1436 } 1437 case ISD::AssertZext: { 1438 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 1439 APInt InMask = APInt::getLowBitsSet(BitWidth, 1440 VT.getSizeInBits()); 1441 if (SimplifyDemandedBits(Op.getOperand(0), InMask & NewMask, 1442 KnownZero, KnownOne, TLO, Depth+1)) 1443 return true; 1444 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1445 KnownZero |= ~InMask & NewMask; 1446 break; 1447 } 1448 case ISD::BIT_CONVERT: 1449#if 0 1450 // If this is an FP->Int bitcast and if the sign bit is the only thing that 1451 // is demanded, turn this into a FGETSIGN. 1452 if (NewMask == EVT::getIntegerVTSignBit(Op.getValueType()) && 1453 MVT::isFloatingPoint(Op.getOperand(0).getValueType()) && 1454 !MVT::isVector(Op.getOperand(0).getValueType())) { 1455 // Only do this xform if FGETSIGN is valid or if before legalize. 1456 if (!TLO.AfterLegalize || 1457 isOperationLegal(ISD::FGETSIGN, Op.getValueType())) { 1458 // Make a FGETSIGN + SHL to move the sign bit into the appropriate 1459 // place. We expect the SHL to be eliminated by other optimizations. 1460 SDValue Sign = TLO.DAG.getNode(ISD::FGETSIGN, Op.getValueType(), 1461 Op.getOperand(0)); 1462 unsigned ShVal = Op.getValueType().getSizeInBits()-1; 1463 SDValue ShAmt = TLO.DAG.getConstant(ShVal, getShiftAmountTy()); 1464 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SHL, Op.getValueType(), 1465 Sign, ShAmt)); 1466 } 1467 } 1468#endif 1469 break; 1470 case ISD::ADD: 1471 case ISD::MUL: 1472 case ISD::SUB: { 1473 // Add, Sub, and Mul don't demand any bits in positions beyond that 1474 // of the highest bit demanded of them. 1475 APInt LoMask = APInt::getLowBitsSet(BitWidth, 1476 BitWidth - NewMask.countLeadingZeros()); 1477 if (SimplifyDemandedBits(Op.getOperand(0), LoMask, KnownZero2, 1478 KnownOne2, TLO, Depth+1)) 1479 return true; 1480 if (SimplifyDemandedBits(Op.getOperand(1), LoMask, KnownZero2, 1481 KnownOne2, TLO, Depth+1)) 1482 return true; 1483 // See if the operation should be performed at a smaller bit width. 1484 if (TLO.ShrinkDemandedOp(Op, BitWidth, NewMask, dl)) 1485 return true; 1486 } 1487 // FALL THROUGH 1488 default: 1489 // Just use ComputeMaskedBits to compute output bits. 1490 TLO.DAG.ComputeMaskedBits(Op, NewMask, KnownZero, KnownOne, Depth); 1491 break; 1492 } 1493 1494 // If we know the value of all of the demanded bits, return this as a 1495 // constant. 1496 if ((NewMask & (KnownZero|KnownOne)) == NewMask) 1497 return TLO.CombineTo(Op, TLO.DAG.getConstant(KnownOne, Op.getValueType())); 1498 1499 return false; 1500} 1501 1502/// computeMaskedBitsForTargetNode - Determine which of the bits specified 1503/// in Mask are known to be either zero or one and return them in the 1504/// KnownZero/KnownOne bitsets. 1505void TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op, 1506 const APInt &Mask, 1507 APInt &KnownZero, 1508 APInt &KnownOne, 1509 const SelectionDAG &DAG, 1510 unsigned Depth) const { 1511 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 1512 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 1513 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 1514 Op.getOpcode() == ISD::INTRINSIC_VOID) && 1515 "Should use MaskedValueIsZero if you don't know whether Op" 1516 " is a target node!"); 1517 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); 1518} 1519 1520/// ComputeNumSignBitsForTargetNode - This method can be implemented by 1521/// targets that want to expose additional information about sign bits to the 1522/// DAG Combiner. 1523unsigned TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op, 1524 unsigned Depth) const { 1525 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 1526 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 1527 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 1528 Op.getOpcode() == ISD::INTRINSIC_VOID) && 1529 "Should use ComputeNumSignBits if you don't know whether Op" 1530 " is a target node!"); 1531 return 1; 1532} 1533 1534/// ValueHasExactlyOneBitSet - Test if the given value is known to have exactly 1535/// one bit set. This differs from ComputeMaskedBits in that it doesn't need to 1536/// determine which bit is set. 1537/// 1538static bool ValueHasExactlyOneBitSet(SDValue Val, const SelectionDAG &DAG) { 1539 // A left-shift of a constant one will have exactly one bit set, because 1540 // shifting the bit off the end is undefined. 1541 if (Val.getOpcode() == ISD::SHL) 1542 if (ConstantSDNode *C = 1543 dyn_cast<ConstantSDNode>(Val.getNode()->getOperand(0))) 1544 if (C->getAPIntValue() == 1) 1545 return true; 1546 1547 // Similarly, a right-shift of a constant sign-bit will have exactly 1548 // one bit set. 1549 if (Val.getOpcode() == ISD::SRL) 1550 if (ConstantSDNode *C = 1551 dyn_cast<ConstantSDNode>(Val.getNode()->getOperand(0))) 1552 if (C->getAPIntValue().isSignBit()) 1553 return true; 1554 1555 // More could be done here, though the above checks are enough 1556 // to handle some common cases. 1557 1558 // Fall back to ComputeMaskedBits to catch other known cases. 1559 EVT OpVT = Val.getValueType(); 1560 unsigned BitWidth = OpVT.getSizeInBits(); 1561 APInt Mask = APInt::getAllOnesValue(BitWidth); 1562 APInt KnownZero, KnownOne; 1563 DAG.ComputeMaskedBits(Val, Mask, KnownZero, KnownOne); 1564 return (KnownZero.countPopulation() == BitWidth - 1) && 1565 (KnownOne.countPopulation() == 1); 1566} 1567 1568/// SimplifySetCC - Try to simplify a setcc built with the specified operands 1569/// and cc. If it is unable to simplify it, return a null SDValue. 1570SDValue 1571TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1, 1572 ISD::CondCode Cond, bool foldBooleans, 1573 DAGCombinerInfo &DCI, DebugLoc dl) const { 1574 SelectionDAG &DAG = DCI.DAG; 1575 LLVMContext &Context = *DAG.getContext(); 1576 1577 // These setcc operations always fold. 1578 switch (Cond) { 1579 default: break; 1580 case ISD::SETFALSE: 1581 case ISD::SETFALSE2: return DAG.getConstant(0, VT); 1582 case ISD::SETTRUE: 1583 case ISD::SETTRUE2: return DAG.getConstant(1, VT); 1584 } 1585 1586 if (isa<ConstantSDNode>(N0.getNode())) { 1587 // Ensure that the constant occurs on the RHS, and fold constant 1588 // comparisons. 1589 return DAG.getSetCC(dl, VT, N1, N0, ISD::getSetCCSwappedOperands(Cond)); 1590 } 1591 1592 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) { 1593 const APInt &C1 = N1C->getAPIntValue(); 1594 1595 // If the LHS is '(srl (ctlz x), 5)', the RHS is 0/1, and this is an 1596 // equality comparison, then we're just comparing whether X itself is 1597 // zero. 1598 if (N0.getOpcode() == ISD::SRL && (C1 == 0 || C1 == 1) && 1599 N0.getOperand(0).getOpcode() == ISD::CTLZ && 1600 N0.getOperand(1).getOpcode() == ISD::Constant) { 1601 unsigned ShAmt = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue(); 1602 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 1603 ShAmt == Log2_32(N0.getValueType().getSizeInBits())) { 1604 if ((C1 == 0) == (Cond == ISD::SETEQ)) { 1605 // (srl (ctlz x), 5) == 0 -> X != 0 1606 // (srl (ctlz x), 5) != 1 -> X != 0 1607 Cond = ISD::SETNE; 1608 } else { 1609 // (srl (ctlz x), 5) != 0 -> X == 0 1610 // (srl (ctlz x), 5) == 1 -> X == 0 1611 Cond = ISD::SETEQ; 1612 } 1613 SDValue Zero = DAG.getConstant(0, N0.getValueType()); 1614 return DAG.getSetCC(dl, VT, N0.getOperand(0).getOperand(0), 1615 Zero, Cond); 1616 } 1617 } 1618 1619 // If the LHS is '(and load, const)', the RHS is 0, 1620 // the test is for equality or unsigned, and all 1 bits of the const are 1621 // in the same partial word, see if we can shorten the load. 1622 if (DCI.isBeforeLegalize() && 1623 N0.getOpcode() == ISD::AND && C1 == 0 && 1624 N0.getNode()->hasOneUse() && 1625 isa<LoadSDNode>(N0.getOperand(0)) && 1626 N0.getOperand(0).getNode()->hasOneUse() && 1627 isa<ConstantSDNode>(N0.getOperand(1))) { 1628 LoadSDNode *Lod = cast<LoadSDNode>(N0.getOperand(0)); 1629 uint64_t bestMask = 0; 1630 unsigned bestWidth = 0, bestOffset = 0; 1631 if (!Lod->isVolatile() && Lod->isUnindexed() && 1632 // FIXME: This uses getZExtValue() below so it only works on i64 and 1633 // below. 1634 N0.getValueType().getSizeInBits() <= 64) { 1635 unsigned origWidth = N0.getValueType().getSizeInBits(); 1636 // We can narrow (e.g.) 16-bit extending loads on 32-bit target to 1637 // 8 bits, but have to be careful... 1638 if (Lod->getExtensionType() != ISD::NON_EXTLOAD) 1639 origWidth = Lod->getMemoryVT().getSizeInBits(); 1640 uint64_t Mask =cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue(); 1641 for (unsigned width = origWidth / 2; width>=8; width /= 2) { 1642 uint64_t newMask = (1ULL << width) - 1; 1643 for (unsigned offset=0; offset<origWidth/width; offset++) { 1644 if ((newMask & Mask) == Mask) { 1645 if (!TD->isLittleEndian()) 1646 bestOffset = (origWidth/width - offset - 1) * (width/8); 1647 else 1648 bestOffset = (uint64_t)offset * (width/8); 1649 bestMask = Mask >> (offset * (width/8) * 8); 1650 bestWidth = width; 1651 break; 1652 } 1653 newMask = newMask << width; 1654 } 1655 } 1656 } 1657 if (bestWidth) { 1658 EVT newVT = EVT::getIntegerVT(Context, bestWidth); 1659 if (newVT.isRound()) { 1660 EVT PtrType = Lod->getOperand(1).getValueType(); 1661 SDValue Ptr = Lod->getBasePtr(); 1662 if (bestOffset != 0) 1663 Ptr = DAG.getNode(ISD::ADD, dl, PtrType, Lod->getBasePtr(), 1664 DAG.getConstant(bestOffset, PtrType)); 1665 unsigned NewAlign = MinAlign(Lod->getAlignment(), bestOffset); 1666 SDValue NewLoad = DAG.getLoad(newVT, dl, Lod->getChain(), Ptr, 1667 Lod->getSrcValue(), 1668 Lod->getSrcValueOffset() + bestOffset, 1669 false, NewAlign); 1670 return DAG.getSetCC(dl, VT, 1671 DAG.getNode(ISD::AND, dl, newVT, NewLoad, 1672 DAG.getConstant(bestMask, newVT)), 1673 DAG.getConstant(0LL, newVT), Cond); 1674 } 1675 } 1676 } 1677 1678 // If the LHS is a ZERO_EXTEND, perform the comparison on the input. 1679 if (N0.getOpcode() == ISD::ZERO_EXTEND) { 1680 unsigned InSize = N0.getOperand(0).getValueType().getSizeInBits(); 1681 1682 // If the comparison constant has bits in the upper part, the 1683 // zero-extended value could never match. 1684 if (C1.intersects(APInt::getHighBitsSet(C1.getBitWidth(), 1685 C1.getBitWidth() - InSize))) { 1686 switch (Cond) { 1687 case ISD::SETUGT: 1688 case ISD::SETUGE: 1689 case ISD::SETEQ: return DAG.getConstant(0, VT); 1690 case ISD::SETULT: 1691 case ISD::SETULE: 1692 case ISD::SETNE: return DAG.getConstant(1, VT); 1693 case ISD::SETGT: 1694 case ISD::SETGE: 1695 // True if the sign bit of C1 is set. 1696 return DAG.getConstant(C1.isNegative(), VT); 1697 case ISD::SETLT: 1698 case ISD::SETLE: 1699 // True if the sign bit of C1 isn't set. 1700 return DAG.getConstant(C1.isNonNegative(), VT); 1701 default: 1702 break; 1703 } 1704 } 1705 1706 // Otherwise, we can perform the comparison with the low bits. 1707 switch (Cond) { 1708 case ISD::SETEQ: 1709 case ISD::SETNE: 1710 case ISD::SETUGT: 1711 case ISD::SETUGE: 1712 case ISD::SETULT: 1713 case ISD::SETULE: { 1714 EVT newVT = N0.getOperand(0).getValueType(); 1715 if (DCI.isBeforeLegalizeOps() || 1716 (isOperationLegal(ISD::SETCC, newVT) && 1717 getCondCodeAction(Cond, newVT)==Legal)) 1718 return DAG.getSetCC(dl, VT, N0.getOperand(0), 1719 DAG.getConstant(APInt(C1).trunc(InSize), newVT), 1720 Cond); 1721 break; 1722 } 1723 default: 1724 break; // todo, be more careful with signed comparisons 1725 } 1726 } else if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG && 1727 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 1728 EVT ExtSrcTy = cast<VTSDNode>(N0.getOperand(1))->getVT(); 1729 unsigned ExtSrcTyBits = ExtSrcTy.getSizeInBits(); 1730 EVT ExtDstTy = N0.getValueType(); 1731 unsigned ExtDstTyBits = ExtDstTy.getSizeInBits(); 1732 1733 // If the extended part has any inconsistent bits, it cannot ever 1734 // compare equal. In other words, they have to be all ones or all 1735 // zeros. 1736 APInt ExtBits = 1737 APInt::getHighBitsSet(ExtDstTyBits, ExtDstTyBits - ExtSrcTyBits); 1738 if ((C1 & ExtBits) != 0 && (C1 & ExtBits) != ExtBits) 1739 return DAG.getConstant(Cond == ISD::SETNE, VT); 1740 1741 SDValue ZextOp; 1742 EVT Op0Ty = N0.getOperand(0).getValueType(); 1743 if (Op0Ty == ExtSrcTy) { 1744 ZextOp = N0.getOperand(0); 1745 } else { 1746 APInt Imm = APInt::getLowBitsSet(ExtDstTyBits, ExtSrcTyBits); 1747 ZextOp = DAG.getNode(ISD::AND, dl, Op0Ty, N0.getOperand(0), 1748 DAG.getConstant(Imm, Op0Ty)); 1749 } 1750 if (!DCI.isCalledByLegalizer()) 1751 DCI.AddToWorklist(ZextOp.getNode()); 1752 // Otherwise, make this a use of a zext. 1753 return DAG.getSetCC(dl, VT, ZextOp, 1754 DAG.getConstant(C1 & APInt::getLowBitsSet( 1755 ExtDstTyBits, 1756 ExtSrcTyBits), 1757 ExtDstTy), 1758 Cond); 1759 } else if ((N1C->isNullValue() || N1C->getAPIntValue() == 1) && 1760 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 1761 1762 // SETCC (SETCC), [0|1], [EQ|NE] -> SETCC 1763 if (N0.getOpcode() == ISD::SETCC) { 1764 bool TrueWhenTrue = (Cond == ISD::SETEQ) ^ (N1C->getZExtValue() != 1); 1765 if (TrueWhenTrue) 1766 return N0; 1767 1768 // Invert the condition. 1769 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get(); 1770 CC = ISD::getSetCCInverse(CC, 1771 N0.getOperand(0).getValueType().isInteger()); 1772 return DAG.getSetCC(dl, VT, N0.getOperand(0), N0.getOperand(1), CC); 1773 } 1774 1775 if ((N0.getOpcode() == ISD::XOR || 1776 (N0.getOpcode() == ISD::AND && 1777 N0.getOperand(0).getOpcode() == ISD::XOR && 1778 N0.getOperand(1) == N0.getOperand(0).getOperand(1))) && 1779 isa<ConstantSDNode>(N0.getOperand(1)) && 1780 cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue() == 1) { 1781 // If this is (X^1) == 0/1, swap the RHS and eliminate the xor. We 1782 // can only do this if the top bits are known zero. 1783 unsigned BitWidth = N0.getValueSizeInBits(); 1784 if (DAG.MaskedValueIsZero(N0, 1785 APInt::getHighBitsSet(BitWidth, 1786 BitWidth-1))) { 1787 // Okay, get the un-inverted input value. 1788 SDValue Val; 1789 if (N0.getOpcode() == ISD::XOR) 1790 Val = N0.getOperand(0); 1791 else { 1792 assert(N0.getOpcode() == ISD::AND && 1793 N0.getOperand(0).getOpcode() == ISD::XOR); 1794 // ((X^1)&1)^1 -> X & 1 1795 Val = DAG.getNode(ISD::AND, dl, N0.getValueType(), 1796 N0.getOperand(0).getOperand(0), 1797 N0.getOperand(1)); 1798 } 1799 return DAG.getSetCC(dl, VT, Val, N1, 1800 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ); 1801 } 1802 } 1803 } 1804 1805 APInt MinVal, MaxVal; 1806 unsigned OperandBitSize = N1C->getValueType(0).getSizeInBits(); 1807 if (ISD::isSignedIntSetCC(Cond)) { 1808 MinVal = APInt::getSignedMinValue(OperandBitSize); 1809 MaxVal = APInt::getSignedMaxValue(OperandBitSize); 1810 } else { 1811 MinVal = APInt::getMinValue(OperandBitSize); 1812 MaxVal = APInt::getMaxValue(OperandBitSize); 1813 } 1814 1815 // Canonicalize GE/LE comparisons to use GT/LT comparisons. 1816 if (Cond == ISD::SETGE || Cond == ISD::SETUGE) { 1817 if (C1 == MinVal) return DAG.getConstant(1, VT); // X >= MIN --> true 1818 // X >= C0 --> X > (C0-1) 1819 return DAG.getSetCC(dl, VT, N0, 1820 DAG.getConstant(C1-1, N1.getValueType()), 1821 (Cond == ISD::SETGE) ? ISD::SETGT : ISD::SETUGT); 1822 } 1823 1824 if (Cond == ISD::SETLE || Cond == ISD::SETULE) { 1825 if (C1 == MaxVal) return DAG.getConstant(1, VT); // X <= MAX --> true 1826 // X <= C0 --> X < (C0+1) 1827 return DAG.getSetCC(dl, VT, N0, 1828 DAG.getConstant(C1+1, N1.getValueType()), 1829 (Cond == ISD::SETLE) ? ISD::SETLT : ISD::SETULT); 1830 } 1831 1832 if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MinVal) 1833 return DAG.getConstant(0, VT); // X < MIN --> false 1834 if ((Cond == ISD::SETGE || Cond == ISD::SETUGE) && C1 == MinVal) 1835 return DAG.getConstant(1, VT); // X >= MIN --> true 1836 if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MaxVal) 1837 return DAG.getConstant(0, VT); // X > MAX --> false 1838 if ((Cond == ISD::SETLE || Cond == ISD::SETULE) && C1 == MaxVal) 1839 return DAG.getConstant(1, VT); // X <= MAX --> true 1840 1841 // Canonicalize setgt X, Min --> setne X, Min 1842 if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MinVal) 1843 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE); 1844 // Canonicalize setlt X, Max --> setne X, Max 1845 if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MaxVal) 1846 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE); 1847 1848 // If we have setult X, 1, turn it into seteq X, 0 1849 if ((Cond == ISD::SETLT || Cond == ISD::SETULT) && C1 == MinVal+1) 1850 return DAG.getSetCC(dl, VT, N0, 1851 DAG.getConstant(MinVal, N0.getValueType()), 1852 ISD::SETEQ); 1853 // If we have setugt X, Max-1, turn it into seteq X, Max 1854 else if ((Cond == ISD::SETGT || Cond == ISD::SETUGT) && C1 == MaxVal-1) 1855 return DAG.getSetCC(dl, VT, N0, 1856 DAG.getConstant(MaxVal, N0.getValueType()), 1857 ISD::SETEQ); 1858 1859 // If we have "setcc X, C0", check to see if we can shrink the immediate 1860 // by changing cc. 1861 1862 // SETUGT X, SINTMAX -> SETLT X, 0 1863 if (Cond == ISD::SETUGT && 1864 C1 == APInt::getSignedMaxValue(OperandBitSize)) 1865 return DAG.getSetCC(dl, VT, N0, 1866 DAG.getConstant(0, N1.getValueType()), 1867 ISD::SETLT); 1868 1869 // SETULT X, SINTMIN -> SETGT X, -1 1870 if (Cond == ISD::SETULT && 1871 C1 == APInt::getSignedMinValue(OperandBitSize)) { 1872 SDValue ConstMinusOne = 1873 DAG.getConstant(APInt::getAllOnesValue(OperandBitSize), 1874 N1.getValueType()); 1875 return DAG.getSetCC(dl, VT, N0, ConstMinusOne, ISD::SETGT); 1876 } 1877 1878 // Fold bit comparisons when we can. 1879 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 1880 VT == N0.getValueType() && N0.getOpcode() == ISD::AND) 1881 if (ConstantSDNode *AndRHS = 1882 dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 1883 EVT ShiftTy = DCI.isBeforeLegalize() ? 1884 getPointerTy() : getShiftAmountTy(); 1885 if (Cond == ISD::SETNE && C1 == 0) {// (X & 8) != 0 --> (X & 8) >> 3 1886 // Perform the xform if the AND RHS is a single bit. 1887 if (isPowerOf2_64(AndRHS->getZExtValue())) { 1888 return DAG.getNode(ISD::SRL, dl, VT, N0, 1889 DAG.getConstant(Log2_64(AndRHS->getZExtValue()), 1890 ShiftTy)); 1891 } 1892 } else if (Cond == ISD::SETEQ && C1 == AndRHS->getZExtValue()) { 1893 // (X & 8) == 8 --> (X & 8) >> 3 1894 // Perform the xform if C1 is a single bit. 1895 if (C1.isPowerOf2()) { 1896 return DAG.getNode(ISD::SRL, dl, VT, N0, 1897 DAG.getConstant(C1.logBase2(), ShiftTy)); 1898 } 1899 } 1900 } 1901 } 1902 1903 if (isa<ConstantFPSDNode>(N0.getNode())) { 1904 // Constant fold or commute setcc. 1905 SDValue O = DAG.FoldSetCC(VT, N0, N1, Cond, dl); 1906 if (O.getNode()) return O; 1907 } else if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1.getNode())) { 1908 // If the RHS of an FP comparison is a constant, simplify it away in 1909 // some cases. 1910 if (CFP->getValueAPF().isNaN()) { 1911 // If an operand is known to be a nan, we can fold it. 1912 switch (ISD::getUnorderedFlavor(Cond)) { 1913 default: llvm_unreachable("Unknown flavor!"); 1914 case 0: // Known false. 1915 return DAG.getConstant(0, VT); 1916 case 1: // Known true. 1917 return DAG.getConstant(1, VT); 1918 case 2: // Undefined. 1919 return DAG.getUNDEF(VT); 1920 } 1921 } 1922 1923 // Otherwise, we know the RHS is not a NaN. Simplify the node to drop the 1924 // constant if knowing that the operand is non-nan is enough. We prefer to 1925 // have SETO(x,x) instead of SETO(x, 0.0) because this avoids having to 1926 // materialize 0.0. 1927 if (Cond == ISD::SETO || Cond == ISD::SETUO) 1928 return DAG.getSetCC(dl, VT, N0, N0, Cond); 1929 1930 // If the condition is not legal, see if we can find an equivalent one 1931 // which is legal. 1932 if (!isCondCodeLegal(Cond, N0.getValueType())) { 1933 // If the comparison was an awkward floating-point == or != and one of 1934 // the comparison operands is infinity or negative infinity, convert the 1935 // condition to a less-awkward <= or >=. 1936 if (CFP->getValueAPF().isInfinity()) { 1937 if (CFP->getValueAPF().isNegative()) { 1938 if (Cond == ISD::SETOEQ && 1939 isCondCodeLegal(ISD::SETOLE, N0.getValueType())) 1940 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOLE); 1941 if (Cond == ISD::SETUEQ && 1942 isCondCodeLegal(ISD::SETOLE, N0.getValueType())) 1943 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETULE); 1944 if (Cond == ISD::SETUNE && 1945 isCondCodeLegal(ISD::SETUGT, N0.getValueType())) 1946 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETUGT); 1947 if (Cond == ISD::SETONE && 1948 isCondCodeLegal(ISD::SETUGT, N0.getValueType())) 1949 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOGT); 1950 } else { 1951 if (Cond == ISD::SETOEQ && 1952 isCondCodeLegal(ISD::SETOGE, N0.getValueType())) 1953 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOGE); 1954 if (Cond == ISD::SETUEQ && 1955 isCondCodeLegal(ISD::SETOGE, N0.getValueType())) 1956 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETUGE); 1957 if (Cond == ISD::SETUNE && 1958 isCondCodeLegal(ISD::SETULT, N0.getValueType())) 1959 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETULT); 1960 if (Cond == ISD::SETONE && 1961 isCondCodeLegal(ISD::SETULT, N0.getValueType())) 1962 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETOLT); 1963 } 1964 } 1965 } 1966 } 1967 1968 if (N0 == N1) { 1969 // We can always fold X == X for integer setcc's. 1970 if (N0.getValueType().isInteger()) 1971 return DAG.getConstant(ISD::isTrueWhenEqual(Cond), VT); 1972 unsigned UOF = ISD::getUnorderedFlavor(Cond); 1973 if (UOF == 2) // FP operators that are undefined on NaNs. 1974 return DAG.getConstant(ISD::isTrueWhenEqual(Cond), VT); 1975 if (UOF == unsigned(ISD::isTrueWhenEqual(Cond))) 1976 return DAG.getConstant(UOF, VT); 1977 // Otherwise, we can't fold it. However, we can simplify it to SETUO/SETO 1978 // if it is not already. 1979 ISD::CondCode NewCond = UOF == 0 ? ISD::SETO : ISD::SETUO; 1980 if (NewCond != Cond) 1981 return DAG.getSetCC(dl, VT, N0, N1, NewCond); 1982 } 1983 1984 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 1985 N0.getValueType().isInteger()) { 1986 if (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::SUB || 1987 N0.getOpcode() == ISD::XOR) { 1988 // Simplify (X+Y) == (X+Z) --> Y == Z 1989 if (N0.getOpcode() == N1.getOpcode()) { 1990 if (N0.getOperand(0) == N1.getOperand(0)) 1991 return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(1), Cond); 1992 if (N0.getOperand(1) == N1.getOperand(1)) 1993 return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(0), Cond); 1994 if (DAG.isCommutativeBinOp(N0.getOpcode())) { 1995 // If X op Y == Y op X, try other combinations. 1996 if (N0.getOperand(0) == N1.getOperand(1)) 1997 return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(0), 1998 Cond); 1999 if (N0.getOperand(1) == N1.getOperand(0)) 2000 return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(1), 2001 Cond); 2002 } 2003 } 2004 2005 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(N1)) { 2006 if (ConstantSDNode *LHSR = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 2007 // Turn (X+C1) == C2 --> X == C2-C1 2008 if (N0.getOpcode() == ISD::ADD && N0.getNode()->hasOneUse()) { 2009 return DAG.getSetCC(dl, VT, N0.getOperand(0), 2010 DAG.getConstant(RHSC->getAPIntValue()- 2011 LHSR->getAPIntValue(), 2012 N0.getValueType()), Cond); 2013 } 2014 2015 // Turn (X^C1) == C2 into X == C1^C2 iff X&~C1 = 0. 2016 if (N0.getOpcode() == ISD::XOR) 2017 // If we know that all of the inverted bits are zero, don't bother 2018 // performing the inversion. 2019 if (DAG.MaskedValueIsZero(N0.getOperand(0), ~LHSR->getAPIntValue())) 2020 return 2021 DAG.getSetCC(dl, VT, N0.getOperand(0), 2022 DAG.getConstant(LHSR->getAPIntValue() ^ 2023 RHSC->getAPIntValue(), 2024 N0.getValueType()), 2025 Cond); 2026 } 2027 2028 // Turn (C1-X) == C2 --> X == C1-C2 2029 if (ConstantSDNode *SUBC = dyn_cast<ConstantSDNode>(N0.getOperand(0))) { 2030 if (N0.getOpcode() == ISD::SUB && N0.getNode()->hasOneUse()) { 2031 return 2032 DAG.getSetCC(dl, VT, N0.getOperand(1), 2033 DAG.getConstant(SUBC->getAPIntValue() - 2034 RHSC->getAPIntValue(), 2035 N0.getValueType()), 2036 Cond); 2037 } 2038 } 2039 } 2040 2041 // Simplify (X+Z) == X --> Z == 0 2042 if (N0.getOperand(0) == N1) 2043 return DAG.getSetCC(dl, VT, N0.getOperand(1), 2044 DAG.getConstant(0, N0.getValueType()), Cond); 2045 if (N0.getOperand(1) == N1) { 2046 if (DAG.isCommutativeBinOp(N0.getOpcode())) 2047 return DAG.getSetCC(dl, VT, N0.getOperand(0), 2048 DAG.getConstant(0, N0.getValueType()), Cond); 2049 else if (N0.getNode()->hasOneUse()) { 2050 assert(N0.getOpcode() == ISD::SUB && "Unexpected operation!"); 2051 // (Z-X) == X --> Z == X<<1 2052 SDValue SH = DAG.getNode(ISD::SHL, dl, N1.getValueType(), 2053 N1, 2054 DAG.getConstant(1, getShiftAmountTy())); 2055 if (!DCI.isCalledByLegalizer()) 2056 DCI.AddToWorklist(SH.getNode()); 2057 return DAG.getSetCC(dl, VT, N0.getOperand(0), SH, Cond); 2058 } 2059 } 2060 } 2061 2062 if (N1.getOpcode() == ISD::ADD || N1.getOpcode() == ISD::SUB || 2063 N1.getOpcode() == ISD::XOR) { 2064 // Simplify X == (X+Z) --> Z == 0 2065 if (N1.getOperand(0) == N0) { 2066 return DAG.getSetCC(dl, VT, N1.getOperand(1), 2067 DAG.getConstant(0, N1.getValueType()), Cond); 2068 } else if (N1.getOperand(1) == N0) { 2069 if (DAG.isCommutativeBinOp(N1.getOpcode())) { 2070 return DAG.getSetCC(dl, VT, N1.getOperand(0), 2071 DAG.getConstant(0, N1.getValueType()), Cond); 2072 } else if (N1.getNode()->hasOneUse()) { 2073 assert(N1.getOpcode() == ISD::SUB && "Unexpected operation!"); 2074 // X == (Z-X) --> X<<1 == Z 2075 SDValue SH = DAG.getNode(ISD::SHL, dl, N1.getValueType(), N0, 2076 DAG.getConstant(1, getShiftAmountTy())); 2077 if (!DCI.isCalledByLegalizer()) 2078 DCI.AddToWorklist(SH.getNode()); 2079 return DAG.getSetCC(dl, VT, SH, N1.getOperand(0), Cond); 2080 } 2081 } 2082 } 2083 2084 // Simplify x&y == y to x&y != 0 if y has exactly one bit set. 2085 // Note that where y is variable and is known to have at most 2086 // one bit set (for example, if it is z&1) we cannot do this; 2087 // the expressions are not equivalent when y==0. 2088 if (N0.getOpcode() == ISD::AND) 2089 if (N0.getOperand(0) == N1 || N0.getOperand(1) == N1) { 2090 if (ValueHasExactlyOneBitSet(N1, DAG)) { 2091 Cond = ISD::getSetCCInverse(Cond, /*isInteger=*/true); 2092 SDValue Zero = DAG.getConstant(0, N1.getValueType()); 2093 return DAG.getSetCC(dl, VT, N0, Zero, Cond); 2094 } 2095 } 2096 if (N1.getOpcode() == ISD::AND) 2097 if (N1.getOperand(0) == N0 || N1.getOperand(1) == N0) { 2098 if (ValueHasExactlyOneBitSet(N0, DAG)) { 2099 Cond = ISD::getSetCCInverse(Cond, /*isInteger=*/true); 2100 SDValue Zero = DAG.getConstant(0, N0.getValueType()); 2101 return DAG.getSetCC(dl, VT, N1, Zero, Cond); 2102 } 2103 } 2104 } 2105 2106 // Fold away ALL boolean setcc's. 2107 SDValue Temp; 2108 if (N0.getValueType() == MVT::i1 && foldBooleans) { 2109 switch (Cond) { 2110 default: llvm_unreachable("Unknown integer setcc!"); 2111 case ISD::SETEQ: // X == Y -> ~(X^Y) 2112 Temp = DAG.getNode(ISD::XOR, dl, MVT::i1, N0, N1); 2113 N0 = DAG.getNOT(dl, Temp, MVT::i1); 2114 if (!DCI.isCalledByLegalizer()) 2115 DCI.AddToWorklist(Temp.getNode()); 2116 break; 2117 case ISD::SETNE: // X != Y --> (X^Y) 2118 N0 = DAG.getNode(ISD::XOR, dl, MVT::i1, N0, N1); 2119 break; 2120 case ISD::SETGT: // X >s Y --> X == 0 & Y == 1 --> ~X & Y 2121 case ISD::SETULT: // X <u Y --> X == 0 & Y == 1 --> ~X & Y 2122 Temp = DAG.getNOT(dl, N0, MVT::i1); 2123 N0 = DAG.getNode(ISD::AND, dl, MVT::i1, N1, Temp); 2124 if (!DCI.isCalledByLegalizer()) 2125 DCI.AddToWorklist(Temp.getNode()); 2126 break; 2127 case ISD::SETLT: // X <s Y --> X == 1 & Y == 0 --> ~Y & X 2128 case ISD::SETUGT: // X >u Y --> X == 1 & Y == 0 --> ~Y & X 2129 Temp = DAG.getNOT(dl, N1, MVT::i1); 2130 N0 = DAG.getNode(ISD::AND, dl, MVT::i1, N0, Temp); 2131 if (!DCI.isCalledByLegalizer()) 2132 DCI.AddToWorklist(Temp.getNode()); 2133 break; 2134 case ISD::SETULE: // X <=u Y --> X == 0 | Y == 1 --> ~X | Y 2135 case ISD::SETGE: // X >=s Y --> X == 0 | Y == 1 --> ~X | Y 2136 Temp = DAG.getNOT(dl, N0, MVT::i1); 2137 N0 = DAG.getNode(ISD::OR, dl, MVT::i1, N1, Temp); 2138 if (!DCI.isCalledByLegalizer()) 2139 DCI.AddToWorklist(Temp.getNode()); 2140 break; 2141 case ISD::SETUGE: // X >=u Y --> X == 1 | Y == 0 --> ~Y | X 2142 case ISD::SETLE: // X <=s Y --> X == 1 | Y == 0 --> ~Y | X 2143 Temp = DAG.getNOT(dl, N1, MVT::i1); 2144 N0 = DAG.getNode(ISD::OR, dl, MVT::i1, N0, Temp); 2145 break; 2146 } 2147 if (VT != MVT::i1) { 2148 if (!DCI.isCalledByLegalizer()) 2149 DCI.AddToWorklist(N0.getNode()); 2150 // FIXME: If running after legalize, we probably can't do this. 2151 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, N0); 2152 } 2153 return N0; 2154 } 2155 2156 // Could not fold it. 2157 return SDValue(); 2158} 2159 2160/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the 2161/// node is a GlobalAddress + offset. 2162bool TargetLowering::isGAPlusOffset(SDNode *N, GlobalValue* &GA, 2163 int64_t &Offset) const { 2164 if (isa<GlobalAddressSDNode>(N)) { 2165 GlobalAddressSDNode *GASD = cast<GlobalAddressSDNode>(N); 2166 GA = GASD->getGlobal(); 2167 Offset += GASD->getOffset(); 2168 return true; 2169 } 2170 2171 if (N->getOpcode() == ISD::ADD) { 2172 SDValue N1 = N->getOperand(0); 2173 SDValue N2 = N->getOperand(1); 2174 if (isGAPlusOffset(N1.getNode(), GA, Offset)) { 2175 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N2); 2176 if (V) { 2177 Offset += V->getSExtValue(); 2178 return true; 2179 } 2180 } else if (isGAPlusOffset(N2.getNode(), GA, Offset)) { 2181 ConstantSDNode *V = dyn_cast<ConstantSDNode>(N1); 2182 if (V) { 2183 Offset += V->getSExtValue(); 2184 return true; 2185 } 2186 } 2187 } 2188 return false; 2189} 2190 2191 2192/// isConsecutiveLoad - Return true if LD is loading 'Bytes' bytes from a 2193/// location that is 'Dist' units away from the location that the 'Base' load 2194/// is loading from. 2195bool TargetLowering::isConsecutiveLoad(LoadSDNode *LD, LoadSDNode *Base, 2196 unsigned Bytes, int Dist, 2197 const MachineFrameInfo *MFI) const { 2198 if (LD->getChain() != Base->getChain()) 2199 return false; 2200 EVT VT = LD->getValueType(0); 2201 if (VT.getSizeInBits() / 8 != Bytes) 2202 return false; 2203 2204 SDValue Loc = LD->getOperand(1); 2205 SDValue BaseLoc = Base->getOperand(1); 2206 if (Loc.getOpcode() == ISD::FrameIndex) { 2207 if (BaseLoc.getOpcode() != ISD::FrameIndex) 2208 return false; 2209 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 2210 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 2211 int FS = MFI->getObjectSize(FI); 2212 int BFS = MFI->getObjectSize(BFI); 2213 if (FS != BFS || FS != (int)Bytes) return false; 2214 return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Bytes); 2215 } 2216 if (Loc.getOpcode() == ISD::ADD && Loc.getOperand(0) == BaseLoc) { 2217 ConstantSDNode *V = dyn_cast<ConstantSDNode>(Loc.getOperand(1)); 2218 if (V && (V->getSExtValue() == Dist*Bytes)) 2219 return true; 2220 } 2221 2222 GlobalValue *GV1 = NULL; 2223 GlobalValue *GV2 = NULL; 2224 int64_t Offset1 = 0; 2225 int64_t Offset2 = 0; 2226 bool isGA1 = isGAPlusOffset(Loc.getNode(), GV1, Offset1); 2227 bool isGA2 = isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2); 2228 if (isGA1 && isGA2 && GV1 == GV2) 2229 return Offset1 == (Offset2 + Dist*Bytes); 2230 return false; 2231} 2232 2233 2234SDValue TargetLowering:: 2235PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const { 2236 // Default implementation: no optimization. 2237 return SDValue(); 2238} 2239 2240//===----------------------------------------------------------------------===// 2241// Inline Assembler Implementation Methods 2242//===----------------------------------------------------------------------===// 2243 2244 2245TargetLowering::ConstraintType 2246TargetLowering::getConstraintType(const std::string &Constraint) const { 2247 // FIXME: lots more standard ones to handle. 2248 if (Constraint.size() == 1) { 2249 switch (Constraint[0]) { 2250 default: break; 2251 case 'r': return C_RegisterClass; 2252 case 'm': // memory 2253 case 'o': // offsetable 2254 case 'V': // not offsetable 2255 return C_Memory; 2256 case 'i': // Simple Integer or Relocatable Constant 2257 case 'n': // Simple Integer 2258 case 's': // Relocatable Constant 2259 case 'X': // Allow ANY value. 2260 case 'I': // Target registers. 2261 case 'J': 2262 case 'K': 2263 case 'L': 2264 case 'M': 2265 case 'N': 2266 case 'O': 2267 case 'P': 2268 return C_Other; 2269 } 2270 } 2271 2272 if (Constraint.size() > 1 && Constraint[0] == '{' && 2273 Constraint[Constraint.size()-1] == '}') 2274 return C_Register; 2275 return C_Unknown; 2276} 2277 2278/// LowerXConstraint - try to replace an X constraint, which matches anything, 2279/// with another that has more specific requirements based on the type of the 2280/// corresponding operand. 2281const char *TargetLowering::LowerXConstraint(EVT ConstraintVT) const{ 2282 if (ConstraintVT.isInteger()) 2283 return "r"; 2284 if (ConstraintVT.isFloatingPoint()) 2285 return "f"; // works for many targets 2286 return 0; 2287} 2288 2289/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 2290/// vector. If it is invalid, don't add anything to Ops. 2291void TargetLowering::LowerAsmOperandForConstraint(SDValue Op, 2292 char ConstraintLetter, 2293 bool hasMemory, 2294 std::vector<SDValue> &Ops, 2295 SelectionDAG &DAG) const { 2296 switch (ConstraintLetter) { 2297 default: break; 2298 case 'X': // Allows any operand; labels (basic block) use this. 2299 if (Op.getOpcode() == ISD::BasicBlock) { 2300 Ops.push_back(Op); 2301 return; 2302 } 2303 // fall through 2304 case 'i': // Simple Integer or Relocatable Constant 2305 case 'n': // Simple Integer 2306 case 's': { // Relocatable Constant 2307 // These operands are interested in values of the form (GV+C), where C may 2308 // be folded in as an offset of GV, or it may be explicitly added. Also, it 2309 // is possible and fine if either GV or C are missing. 2310 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 2311 GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op); 2312 2313 // If we have "(add GV, C)", pull out GV/C 2314 if (Op.getOpcode() == ISD::ADD) { 2315 C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 2316 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0)); 2317 if (C == 0 || GA == 0) { 2318 C = dyn_cast<ConstantSDNode>(Op.getOperand(0)); 2319 GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(1)); 2320 } 2321 if (C == 0 || GA == 0) 2322 C = 0, GA = 0; 2323 } 2324 2325 // If we find a valid operand, map to the TargetXXX version so that the 2326 // value itself doesn't get selected. 2327 if (GA) { // Either &GV or &GV+C 2328 if (ConstraintLetter != 'n') { 2329 int64_t Offs = GA->getOffset(); 2330 if (C) Offs += C->getZExtValue(); 2331 Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), 2332 Op.getValueType(), Offs)); 2333 return; 2334 } 2335 } 2336 if (C) { // just C, no GV. 2337 // Simple constants are not allowed for 's'. 2338 if (ConstraintLetter != 's') { 2339 // gcc prints these as sign extended. Sign extend value to 64 bits 2340 // now; without this it would get ZExt'd later in 2341 // ScheduleDAGSDNodes::EmitNode, which is very generic. 2342 Ops.push_back(DAG.getTargetConstant(C->getAPIntValue().getSExtValue(), 2343 MVT::i64)); 2344 return; 2345 } 2346 } 2347 break; 2348 } 2349 } 2350} 2351 2352std::vector<unsigned> TargetLowering:: 2353getRegClassForInlineAsmConstraint(const std::string &Constraint, 2354 EVT VT) const { 2355 return std::vector<unsigned>(); 2356} 2357 2358 2359std::pair<unsigned, const TargetRegisterClass*> TargetLowering:: 2360getRegForInlineAsmConstraint(const std::string &Constraint, 2361 EVT VT) const { 2362 if (Constraint[0] != '{') 2363 return std::pair<unsigned, const TargetRegisterClass*>(0, 0); 2364 assert(*(Constraint.end()-1) == '}' && "Not a brace enclosed constraint?"); 2365 2366 // Remove the braces from around the name. 2367 StringRef RegName(Constraint.data()+1, Constraint.size()-2); 2368 2369 // Figure out which register class contains this reg. 2370 const TargetRegisterInfo *RI = TM.getRegisterInfo(); 2371 for (TargetRegisterInfo::regclass_iterator RCI = RI->regclass_begin(), 2372 E = RI->regclass_end(); RCI != E; ++RCI) { 2373 const TargetRegisterClass *RC = *RCI; 2374 2375 // If none of the the value types for this register class are valid, we 2376 // can't use it. For example, 64-bit reg classes on 32-bit targets. 2377 bool isLegal = false; 2378 for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end(); 2379 I != E; ++I) { 2380 if (isTypeLegal(*I)) { 2381 isLegal = true; 2382 break; 2383 } 2384 } 2385 2386 if (!isLegal) continue; 2387 2388 for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end(); 2389 I != E; ++I) { 2390 if (RegName.equals_lower(RI->getName(*I))) 2391 return std::make_pair(*I, RC); 2392 } 2393 } 2394 2395 return std::pair<unsigned, const TargetRegisterClass*>(0, 0); 2396} 2397 2398//===----------------------------------------------------------------------===// 2399// Constraint Selection. 2400 2401/// isMatchingInputConstraint - Return true of this is an input operand that is 2402/// a matching constraint like "4". 2403bool TargetLowering::AsmOperandInfo::isMatchingInputConstraint() const { 2404 assert(!ConstraintCode.empty() && "No known constraint!"); 2405 return isdigit(ConstraintCode[0]); 2406} 2407 2408/// getMatchedOperand - If this is an input matching constraint, this method 2409/// returns the output operand it matches. 2410unsigned TargetLowering::AsmOperandInfo::getMatchedOperand() const { 2411 assert(!ConstraintCode.empty() && "No known constraint!"); 2412 return atoi(ConstraintCode.c_str()); 2413} 2414 2415 2416/// getConstraintGenerality - Return an integer indicating how general CT 2417/// is. 2418static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) { 2419 switch (CT) { 2420 default: llvm_unreachable("Unknown constraint type!"); 2421 case TargetLowering::C_Other: 2422 case TargetLowering::C_Unknown: 2423 return 0; 2424 case TargetLowering::C_Register: 2425 return 1; 2426 case TargetLowering::C_RegisterClass: 2427 return 2; 2428 case TargetLowering::C_Memory: 2429 return 3; 2430 } 2431} 2432 2433/// ChooseConstraint - If there are multiple different constraints that we 2434/// could pick for this operand (e.g. "imr") try to pick the 'best' one. 2435/// This is somewhat tricky: constraints fall into four classes: 2436/// Other -> immediates and magic values 2437/// Register -> one specific register 2438/// RegisterClass -> a group of regs 2439/// Memory -> memory 2440/// Ideally, we would pick the most specific constraint possible: if we have 2441/// something that fits into a register, we would pick it. The problem here 2442/// is that if we have something that could either be in a register or in 2443/// memory that use of the register could cause selection of *other* 2444/// operands to fail: they might only succeed if we pick memory. Because of 2445/// this the heuristic we use is: 2446/// 2447/// 1) If there is an 'other' constraint, and if the operand is valid for 2448/// that constraint, use it. This makes us take advantage of 'i' 2449/// constraints when available. 2450/// 2) Otherwise, pick the most general constraint present. This prefers 2451/// 'm' over 'r', for example. 2452/// 2453static void ChooseConstraint(TargetLowering::AsmOperandInfo &OpInfo, 2454 bool hasMemory, const TargetLowering &TLI, 2455 SDValue Op, SelectionDAG *DAG) { 2456 assert(OpInfo.Codes.size() > 1 && "Doesn't have multiple constraint options"); 2457 unsigned BestIdx = 0; 2458 TargetLowering::ConstraintType BestType = TargetLowering::C_Unknown; 2459 int BestGenerality = -1; 2460 2461 // Loop over the options, keeping track of the most general one. 2462 for (unsigned i = 0, e = OpInfo.Codes.size(); i != e; ++i) { 2463 TargetLowering::ConstraintType CType = 2464 TLI.getConstraintType(OpInfo.Codes[i]); 2465 2466 // If this is an 'other' constraint, see if the operand is valid for it. 2467 // For example, on X86 we might have an 'rI' constraint. If the operand 2468 // is an integer in the range [0..31] we want to use I (saving a load 2469 // of a register), otherwise we must use 'r'. 2470 if (CType == TargetLowering::C_Other && Op.getNode()) { 2471 assert(OpInfo.Codes[i].size() == 1 && 2472 "Unhandled multi-letter 'other' constraint"); 2473 std::vector<SDValue> ResultOps; 2474 TLI.LowerAsmOperandForConstraint(Op, OpInfo.Codes[i][0], hasMemory, 2475 ResultOps, *DAG); 2476 if (!ResultOps.empty()) { 2477 BestType = CType; 2478 BestIdx = i; 2479 break; 2480 } 2481 } 2482 2483 // This constraint letter is more general than the previous one, use it. 2484 int Generality = getConstraintGenerality(CType); 2485 if (Generality > BestGenerality) { 2486 BestType = CType; 2487 BestIdx = i; 2488 BestGenerality = Generality; 2489 } 2490 } 2491 2492 OpInfo.ConstraintCode = OpInfo.Codes[BestIdx]; 2493 OpInfo.ConstraintType = BestType; 2494} 2495 2496/// ComputeConstraintToUse - Determines the constraint code and constraint 2497/// type to use for the specific AsmOperandInfo, setting 2498/// OpInfo.ConstraintCode and OpInfo.ConstraintType. 2499void TargetLowering::ComputeConstraintToUse(AsmOperandInfo &OpInfo, 2500 SDValue Op, 2501 bool hasMemory, 2502 SelectionDAG *DAG) const { 2503 assert(!OpInfo.Codes.empty() && "Must have at least one constraint"); 2504 2505 // Single-letter constraints ('r') are very common. 2506 if (OpInfo.Codes.size() == 1) { 2507 OpInfo.ConstraintCode = OpInfo.Codes[0]; 2508 OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode); 2509 } else { 2510 ChooseConstraint(OpInfo, hasMemory, *this, Op, DAG); 2511 } 2512 2513 // 'X' matches anything. 2514 if (OpInfo.ConstraintCode == "X" && OpInfo.CallOperandVal) { 2515 // Labels and constants are handled elsewhere ('X' is the only thing 2516 // that matches labels). For Functions, the type here is the type of 2517 // the result, which is not what we want to look at; leave them alone. 2518 Value *v = OpInfo.CallOperandVal; 2519 if (isa<BasicBlock>(v) || isa<ConstantInt>(v) || isa<Function>(v)) { 2520 OpInfo.CallOperandVal = v; 2521 return; 2522 } 2523 2524 // Otherwise, try to resolve it to something we know about by looking at 2525 // the actual operand type. 2526 if (const char *Repl = LowerXConstraint(OpInfo.ConstraintVT)) { 2527 OpInfo.ConstraintCode = Repl; 2528 OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode); 2529 } 2530 } 2531} 2532 2533//===----------------------------------------------------------------------===// 2534// Loop Strength Reduction hooks 2535//===----------------------------------------------------------------------===// 2536 2537/// isLegalAddressingMode - Return true if the addressing mode represented 2538/// by AM is legal for this target, for a load/store of the specified type. 2539bool TargetLowering::isLegalAddressingMode(const AddrMode &AM, 2540 const Type *Ty) const { 2541 // The default implementation of this implements a conservative RISCy, r+r and 2542 // r+i addr mode. 2543 2544 // Allows a sign-extended 16-bit immediate field. 2545 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 2546 return false; 2547 2548 // No global is ever allowed as a base. 2549 if (AM.BaseGV) 2550 return false; 2551 2552 // Only support r+r, 2553 switch (AM.Scale) { 2554 case 0: // "r+i" or just "i", depending on HasBaseReg. 2555 break; 2556 case 1: 2557 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 2558 return false; 2559 // Otherwise we have r+r or r+i. 2560 break; 2561 case 2: 2562 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 2563 return false; 2564 // Allow 2*r as r+r. 2565 break; 2566 } 2567 2568 return true; 2569} 2570 2571/// BuildSDIVSequence - Given an ISD::SDIV node expressing a divide by constant, 2572/// return a DAG expression to select that will generate the same value by 2573/// multiplying by a magic number. See: 2574/// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html> 2575SDValue TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG, 2576 std::vector<SDNode*>* Created) const { 2577 EVT VT = N->getValueType(0); 2578 DebugLoc dl= N->getDebugLoc(); 2579 2580 // Check to see if we can do this. 2581 // FIXME: We should be more aggressive here. 2582 if (!isTypeLegal(VT)) 2583 return SDValue(); 2584 2585 APInt d = cast<ConstantSDNode>(N->getOperand(1))->getAPIntValue(); 2586 APInt::ms magics = d.magic(); 2587 2588 // Multiply the numerator (operand 0) by the magic value 2589 // FIXME: We should support doing a MUL in a wider type 2590 SDValue Q; 2591 if (isOperationLegalOrCustom(ISD::MULHS, VT)) 2592 Q = DAG.getNode(ISD::MULHS, dl, VT, N->getOperand(0), 2593 DAG.getConstant(magics.m, VT)); 2594 else if (isOperationLegalOrCustom(ISD::SMUL_LOHI, VT)) 2595 Q = SDValue(DAG.getNode(ISD::SMUL_LOHI, dl, DAG.getVTList(VT, VT), 2596 N->getOperand(0), 2597 DAG.getConstant(magics.m, VT)).getNode(), 1); 2598 else 2599 return SDValue(); // No mulhs or equvialent 2600 // If d > 0 and m < 0, add the numerator 2601 if (d.isStrictlyPositive() && magics.m.isNegative()) { 2602 Q = DAG.getNode(ISD::ADD, dl, VT, Q, N->getOperand(0)); 2603 if (Created) 2604 Created->push_back(Q.getNode()); 2605 } 2606 // If d < 0 and m > 0, subtract the numerator. 2607 if (d.isNegative() && magics.m.isStrictlyPositive()) { 2608 Q = DAG.getNode(ISD::SUB, dl, VT, Q, N->getOperand(0)); 2609 if (Created) 2610 Created->push_back(Q.getNode()); 2611 } 2612 // Shift right algebraic if shift value is nonzero 2613 if (magics.s > 0) { 2614 Q = DAG.getNode(ISD::SRA, dl, VT, Q, 2615 DAG.getConstant(magics.s, getShiftAmountTy())); 2616 if (Created) 2617 Created->push_back(Q.getNode()); 2618 } 2619 // Extract the sign bit and add it to the quotient 2620 SDValue T = 2621 DAG.getNode(ISD::SRL, dl, VT, Q, DAG.getConstant(VT.getSizeInBits()-1, 2622 getShiftAmountTy())); 2623 if (Created) 2624 Created->push_back(T.getNode()); 2625 return DAG.getNode(ISD::ADD, dl, VT, Q, T); 2626} 2627 2628/// BuildUDIVSequence - Given an ISD::UDIV node expressing a divide by constant, 2629/// return a DAG expression to select that will generate the same value by 2630/// multiplying by a magic number. See: 2631/// <http://the.wall.riscom.net/books/proc/ppc/cwg/code2.html> 2632SDValue TargetLowering::BuildUDIV(SDNode *N, SelectionDAG &DAG, 2633 std::vector<SDNode*>* Created) const { 2634 EVT VT = N->getValueType(0); 2635 DebugLoc dl = N->getDebugLoc(); 2636 2637 // Check to see if we can do this. 2638 // FIXME: We should be more aggressive here. 2639 if (!isTypeLegal(VT)) 2640 return SDValue(); 2641 2642 // FIXME: We should use a narrower constant when the upper 2643 // bits are known to be zero. 2644 ConstantSDNode *N1C = cast<ConstantSDNode>(N->getOperand(1)); 2645 APInt::mu magics = N1C->getAPIntValue().magicu(); 2646 2647 // Multiply the numerator (operand 0) by the magic value 2648 // FIXME: We should support doing a MUL in a wider type 2649 SDValue Q; 2650 if (isOperationLegalOrCustom(ISD::MULHU, VT)) 2651 Q = DAG.getNode(ISD::MULHU, dl, VT, N->getOperand(0), 2652 DAG.getConstant(magics.m, VT)); 2653 else if (isOperationLegalOrCustom(ISD::UMUL_LOHI, VT)) 2654 Q = SDValue(DAG.getNode(ISD::UMUL_LOHI, dl, DAG.getVTList(VT, VT), 2655 N->getOperand(0), 2656 DAG.getConstant(magics.m, VT)).getNode(), 1); 2657 else 2658 return SDValue(); // No mulhu or equvialent 2659 if (Created) 2660 Created->push_back(Q.getNode()); 2661 2662 if (magics.a == 0) { 2663 assert(magics.s < N1C->getAPIntValue().getBitWidth() && 2664 "We shouldn't generate an undefined shift!"); 2665 return DAG.getNode(ISD::SRL, dl, VT, Q, 2666 DAG.getConstant(magics.s, getShiftAmountTy())); 2667 } else { 2668 SDValue NPQ = DAG.getNode(ISD::SUB, dl, VT, N->getOperand(0), Q); 2669 if (Created) 2670 Created->push_back(NPQ.getNode()); 2671 NPQ = DAG.getNode(ISD::SRL, dl, VT, NPQ, 2672 DAG.getConstant(1, getShiftAmountTy())); 2673 if (Created) 2674 Created->push_back(NPQ.getNode()); 2675 NPQ = DAG.getNode(ISD::ADD, dl, VT, NPQ, Q); 2676 if (Created) 2677 Created->push_back(NPQ.getNode()); 2678 return DAG.getNode(ISD::SRL, dl, VT, NPQ, 2679 DAG.getConstant(magics.s-1, getShiftAmountTy())); 2680 } 2681} 2682