CGBuiltin.cpp revision 193631
1//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This contains code to emit Builtin calls as LLVM code. 11// 12//===----------------------------------------------------------------------===// 13 14#include "CodeGenFunction.h" 15#include "CodeGenModule.h" 16#include "clang/Basic/TargetInfo.h" 17#include "clang/AST/APValue.h" 18#include "clang/AST/ASTContext.h" 19#include "clang/AST/Decl.h" 20#include "clang/AST/TargetBuiltins.h" 21#include "llvm/Intrinsics.h" 22using namespace clang; 23using namespace CodeGen; 24using namespace llvm; 25 26/// Utility to insert an atomic instruction based on Instrinsic::ID 27/// and the expression node. 28static RValue EmitBinaryAtomic(CodeGenFunction& CGF, 29 Intrinsic::ID Id, const CallExpr *E) { 30 const llvm::Type *ResType[2]; 31 ResType[0] = CGF.ConvertType(E->getType()); 32 ResType[1] = CGF.ConvertType(E->getArg(0)->getType()); 33 Value *AtomF = CGF.CGM.getIntrinsic(Id, ResType, 2); 34 return RValue::get(CGF.Builder.CreateCall2(AtomF, 35 CGF.EmitScalarExpr(E->getArg(0)), 36 CGF.EmitScalarExpr(E->getArg(1)))); 37} 38 39/// Utility to insert an atomic instruction based Instrinsic::ID and 40// the expression node, where the return value is the result of the 41// operation. 42static RValue EmitBinaryAtomicPost(CodeGenFunction& CGF, 43 Intrinsic::ID Id, const CallExpr *E, 44 Instruction::BinaryOps Op) { 45 const llvm::Type *ResType[2]; 46 ResType[0] = CGF.ConvertType(E->getType()); 47 ResType[1] = CGF.ConvertType(E->getArg(0)->getType()); 48 Value *AtomF = CGF.CGM.getIntrinsic(Id, ResType, 2); 49 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0)); 50 Value *Operand = CGF.EmitScalarExpr(E->getArg(1)); 51 Value *Result = CGF.Builder.CreateCall2(AtomF, Ptr, Operand); 52 53 if (Id == Intrinsic::atomic_load_nand) 54 Result = CGF.Builder.CreateNot(Result); 55 56 57 return RValue::get(CGF.Builder.CreateBinOp(Op, Result, Operand)); 58} 59 60RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, 61 unsigned BuiltinID, const CallExpr *E) { 62 // See if we can constant fold this builtin. If so, don't emit it at all. 63 Expr::EvalResult Result; 64 if (E->Evaluate(Result, CGM.getContext())) { 65 if (Result.Val.isInt()) 66 return RValue::get(llvm::ConstantInt::get(Result.Val.getInt())); 67 else if (Result.Val.isFloat()) 68 return RValue::get(llvm::ConstantFP::get(Result.Val.getFloat())); 69 } 70 71 switch (BuiltinID) { 72 default: break; // Handle intrinsics and libm functions below. 73 case Builtin::BI__builtin___CFStringMakeConstantString: 74 return RValue::get(CGM.EmitConstantExpr(E, E->getType(), 0)); 75 case Builtin::BI__builtin_stdarg_start: 76 case Builtin::BI__builtin_va_start: 77 case Builtin::BI__builtin_va_end: { 78 Value *ArgValue = EmitVAListRef(E->getArg(0)); 79 const llvm::Type *DestType = 80 llvm::PointerType::getUnqual(llvm::Type::Int8Ty); 81 if (ArgValue->getType() != DestType) 82 ArgValue = Builder.CreateBitCast(ArgValue, DestType, 83 ArgValue->getNameStart()); 84 85 Intrinsic::ID inst = (BuiltinID == Builtin::BI__builtin_va_end) ? 86 Intrinsic::vaend : Intrinsic::vastart; 87 return RValue::get(Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue)); 88 } 89 case Builtin::BI__builtin_va_copy: { 90 Value *DstPtr = EmitVAListRef(E->getArg(0)); 91 Value *SrcPtr = EmitVAListRef(E->getArg(1)); 92 93 const llvm::Type *Type = 94 llvm::PointerType::getUnqual(llvm::Type::Int8Ty); 95 96 DstPtr = Builder.CreateBitCast(DstPtr, Type); 97 SrcPtr = Builder.CreateBitCast(SrcPtr, Type); 98 return RValue::get(Builder.CreateCall2(CGM.getIntrinsic(Intrinsic::vacopy), 99 DstPtr, SrcPtr)); 100 } 101 case Builtin::BI__builtin_abs: { 102 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 103 104 Value *NegOp = Builder.CreateNeg(ArgValue, "neg"); 105 Value *CmpResult = 106 Builder.CreateICmpSGE(ArgValue, Constant::getNullValue(ArgValue->getType()), 107 "abscond"); 108 Value *Result = 109 Builder.CreateSelect(CmpResult, ArgValue, NegOp, "abs"); 110 111 return RValue::get(Result); 112 } 113 case Builtin::BI__builtin_ctz: 114 case Builtin::BI__builtin_ctzl: 115 case Builtin::BI__builtin_ctzll: { 116 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 117 118 const llvm::Type *ArgType = ArgValue->getType(); 119 Value *F = CGM.getIntrinsic(Intrinsic::cttz, &ArgType, 1); 120 121 const llvm::Type *ResultType = ConvertType(E->getType()); 122 Value *Result = Builder.CreateCall(F, ArgValue, "tmp"); 123 if (Result->getType() != ResultType) 124 Result = Builder.CreateIntCast(Result, ResultType, "cast"); 125 return RValue::get(Result); 126 } 127 case Builtin::BI__builtin_clz: 128 case Builtin::BI__builtin_clzl: 129 case Builtin::BI__builtin_clzll: { 130 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 131 132 const llvm::Type *ArgType = ArgValue->getType(); 133 Value *F = CGM.getIntrinsic(Intrinsic::ctlz, &ArgType, 1); 134 135 const llvm::Type *ResultType = ConvertType(E->getType()); 136 Value *Result = Builder.CreateCall(F, ArgValue, "tmp"); 137 if (Result->getType() != ResultType) 138 Result = Builder.CreateIntCast(Result, ResultType, "cast"); 139 return RValue::get(Result); 140 } 141 case Builtin::BI__builtin_ffs: 142 case Builtin::BI__builtin_ffsl: 143 case Builtin::BI__builtin_ffsll: { 144 // ffs(x) -> x ? cttz(x) + 1 : 0 145 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 146 147 const llvm::Type *ArgType = ArgValue->getType(); 148 Value *F = CGM.getIntrinsic(Intrinsic::cttz, &ArgType, 1); 149 150 const llvm::Type *ResultType = ConvertType(E->getType()); 151 Value *Tmp = Builder.CreateAdd(Builder.CreateCall(F, ArgValue, "tmp"), 152 ConstantInt::get(ArgType, 1), "tmp"); 153 Value *Zero = llvm::Constant::getNullValue(ArgType); 154 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero"); 155 Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs"); 156 if (Result->getType() != ResultType) 157 Result = Builder.CreateIntCast(Result, ResultType, "cast"); 158 return RValue::get(Result); 159 } 160 case Builtin::BI__builtin_parity: 161 case Builtin::BI__builtin_parityl: 162 case Builtin::BI__builtin_parityll: { 163 // parity(x) -> ctpop(x) & 1 164 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 165 166 const llvm::Type *ArgType = ArgValue->getType(); 167 Value *F = CGM.getIntrinsic(Intrinsic::ctpop, &ArgType, 1); 168 169 const llvm::Type *ResultType = ConvertType(E->getType()); 170 Value *Tmp = Builder.CreateCall(F, ArgValue, "tmp"); 171 Value *Result = Builder.CreateAnd(Tmp, ConstantInt::get(ArgType, 1), 172 "tmp"); 173 if (Result->getType() != ResultType) 174 Result = Builder.CreateIntCast(Result, ResultType, "cast"); 175 return RValue::get(Result); 176 } 177 case Builtin::BI__builtin_popcount: 178 case Builtin::BI__builtin_popcountl: 179 case Builtin::BI__builtin_popcountll: { 180 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 181 182 const llvm::Type *ArgType = ArgValue->getType(); 183 Value *F = CGM.getIntrinsic(Intrinsic::ctpop, &ArgType, 1); 184 185 const llvm::Type *ResultType = ConvertType(E->getType()); 186 Value *Result = Builder.CreateCall(F, ArgValue, "tmp"); 187 if (Result->getType() != ResultType) 188 Result = Builder.CreateIntCast(Result, ResultType, "cast"); 189 return RValue::get(Result); 190 } 191 case Builtin::BI__builtin_expect: 192 // FIXME: pass expect through to LLVM 193 return RValue::get(EmitScalarExpr(E->getArg(0))); 194 case Builtin::BI__builtin_bswap32: 195 case Builtin::BI__builtin_bswap64: { 196 Value *ArgValue = EmitScalarExpr(E->getArg(0)); 197 const llvm::Type *ArgType = ArgValue->getType(); 198 Value *F = CGM.getIntrinsic(Intrinsic::bswap, &ArgType, 1); 199 return RValue::get(Builder.CreateCall(F, ArgValue, "tmp")); 200 } 201 case Builtin::BI__builtin_object_size: { 202 // FIXME: Implement. For now we just always fail and pretend we 203 // don't know the object size. 204 llvm::APSInt TypeArg = E->getArg(1)->EvaluateAsInt(CGM.getContext()); 205 const llvm::Type *ResType = ConvertType(E->getType()); 206 // bool UseSubObject = TypeArg.getZExtValue() & 1; 207 bool UseMinimum = TypeArg.getZExtValue() & 2; 208 return RValue::get(ConstantInt::get(ResType, UseMinimum ? 0 : -1LL)); 209 } 210 case Builtin::BI__builtin_prefetch: { 211 Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0)); 212 // FIXME: Technically these constants should of type 'int', yes? 213 RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) : 214 ConstantInt::get(llvm::Type::Int32Ty, 0); 215 Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : 216 ConstantInt::get(llvm::Type::Int32Ty, 3); 217 Value *F = CGM.getIntrinsic(Intrinsic::prefetch, 0, 0); 218 return RValue::get(Builder.CreateCall3(F, Address, RW, Locality)); 219 } 220 case Builtin::BI__builtin_trap: { 221 Value *F = CGM.getIntrinsic(Intrinsic::trap, 0, 0); 222 return RValue::get(Builder.CreateCall(F)); 223 } 224 225 case Builtin::BI__builtin_powi: 226 case Builtin::BI__builtin_powif: 227 case Builtin::BI__builtin_powil: { 228 Value *Base = EmitScalarExpr(E->getArg(0)); 229 Value *Exponent = EmitScalarExpr(E->getArg(1)); 230 const llvm::Type *ArgType = Base->getType(); 231 Value *F = CGM.getIntrinsic(Intrinsic::powi, &ArgType, 1); 232 return RValue::get(Builder.CreateCall2(F, Base, Exponent, "tmp")); 233 } 234 235 case Builtin::BI__builtin_isgreater: 236 case Builtin::BI__builtin_isgreaterequal: 237 case Builtin::BI__builtin_isless: 238 case Builtin::BI__builtin_islessequal: 239 case Builtin::BI__builtin_islessgreater: 240 case Builtin::BI__builtin_isunordered: { 241 // Ordered comparisons: we know the arguments to these are matching scalar 242 // floating point values. 243 Value *LHS = EmitScalarExpr(E->getArg(0)); 244 Value *RHS = EmitScalarExpr(E->getArg(1)); 245 246 switch (BuiltinID) { 247 default: assert(0 && "Unknown ordered comparison"); 248 case Builtin::BI__builtin_isgreater: 249 LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp"); 250 break; 251 case Builtin::BI__builtin_isgreaterequal: 252 LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp"); 253 break; 254 case Builtin::BI__builtin_isless: 255 LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp"); 256 break; 257 case Builtin::BI__builtin_islessequal: 258 LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp"); 259 break; 260 case Builtin::BI__builtin_islessgreater: 261 LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp"); 262 break; 263 case Builtin::BI__builtin_isunordered: 264 LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp"); 265 break; 266 } 267 // ZExt bool to int type. 268 return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType()), 269 "tmp")); 270 } 271 case Builtin::BIalloca: 272 case Builtin::BI__builtin_alloca: { 273 // FIXME: LLVM IR Should allow alloca with an i64 size! 274 Value *Size = EmitScalarExpr(E->getArg(0)); 275 Size = Builder.CreateIntCast(Size, llvm::Type::Int32Ty, false, "tmp"); 276 return RValue::get(Builder.CreateAlloca(llvm::Type::Int8Ty, Size, "tmp")); 277 } 278 case Builtin::BI__builtin_bzero: { 279 Value *Address = EmitScalarExpr(E->getArg(0)); 280 Builder.CreateCall4(CGM.getMemSetFn(), Address, 281 llvm::ConstantInt::get(llvm::Type::Int8Ty, 0), 282 EmitScalarExpr(E->getArg(1)), 283 llvm::ConstantInt::get(llvm::Type::Int32Ty, 1)); 284 return RValue::get(Address); 285 } 286 case Builtin::BI__builtin_memcpy: { 287 Value *Address = EmitScalarExpr(E->getArg(0)); 288 Builder.CreateCall4(CGM.getMemCpyFn(), Address, 289 EmitScalarExpr(E->getArg(1)), 290 EmitScalarExpr(E->getArg(2)), 291 llvm::ConstantInt::get(llvm::Type::Int32Ty, 1)); 292 return RValue::get(Address); 293 } 294 case Builtin::BI__builtin_memmove: { 295 Value *Address = EmitScalarExpr(E->getArg(0)); 296 Builder.CreateCall4(CGM.getMemMoveFn(), Address, 297 EmitScalarExpr(E->getArg(1)), 298 EmitScalarExpr(E->getArg(2)), 299 llvm::ConstantInt::get(llvm::Type::Int32Ty, 1)); 300 return RValue::get(Address); 301 } 302 case Builtin::BI__builtin_memset: { 303 Value *Address = EmitScalarExpr(E->getArg(0)); 304 Builder.CreateCall4(CGM.getMemSetFn(), Address, 305 Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), 306 llvm::Type::Int8Ty), 307 EmitScalarExpr(E->getArg(2)), 308 llvm::ConstantInt::get(llvm::Type::Int32Ty, 1)); 309 return RValue::get(Address); 310 } 311 case Builtin::BI__builtin_return_address: { 312 Value *F = CGM.getIntrinsic(Intrinsic::returnaddress, 0, 0); 313 return RValue::get(Builder.CreateCall(F, EmitScalarExpr(E->getArg(0)))); 314 } 315 case Builtin::BI__builtin_frame_address: { 316 Value *F = CGM.getIntrinsic(Intrinsic::frameaddress, 0, 0); 317 return RValue::get(Builder.CreateCall(F, EmitScalarExpr(E->getArg(0)))); 318 } 319 case Builtin::BI__builtin_extract_return_addr: { 320 // FIXME: There should be a target hook for this 321 return RValue::get(EmitScalarExpr(E->getArg(0))); 322 } 323 case Builtin::BI__builtin_unwind_init: { 324 Value *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init, 0, 0); 325 return RValue::get(Builder.CreateCall(F)); 326 } 327#if 0 328 // FIXME: Finish/enable when LLVM backend support stabilizes 329 case Builtin::BI__builtin_setjmp: { 330 Value *Buf = EmitScalarExpr(E->getArg(0)); 331 // Store the frame pointer to the buffer 332 Value *FrameAddrF = CGM.getIntrinsic(Intrinsic::frameaddress, 0, 0); 333 Value *FrameAddr = 334 Builder.CreateCall(FrameAddrF, 335 Constant::getNullValue(llvm::Type::Int32Ty)); 336 Builder.CreateStore(FrameAddr, Buf); 337 // Call the setjmp intrinsic 338 Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp, 0, 0); 339 const llvm::Type *DestType = 340 llvm::PointerType::getUnqual(llvm::Type::Int8Ty); 341 Buf = Builder.CreateBitCast(Buf, DestType); 342 return RValue::get(Builder.CreateCall(F, Buf)); 343 } 344 case Builtin::BI__builtin_longjmp: { 345 Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp, 0, 0); 346 Value *Buf = EmitScalarExpr(E->getArg(0)); 347 const llvm::Type *DestType = 348 llvm::PointerType::getUnqual(llvm::Type::Int8Ty); 349 Buf = Builder.CreateBitCast(Buf, DestType); 350 return RValue::get(Builder.CreateCall(F, Buf)); 351 } 352#endif 353 case Builtin::BI__sync_fetch_and_add: 354 case Builtin::BI__sync_fetch_and_sub: 355 case Builtin::BI__sync_fetch_and_or: 356 case Builtin::BI__sync_fetch_and_and: 357 case Builtin::BI__sync_fetch_and_xor: 358 case Builtin::BI__sync_add_and_fetch: 359 case Builtin::BI__sync_sub_and_fetch: 360 case Builtin::BI__sync_and_and_fetch: 361 case Builtin::BI__sync_or_and_fetch: 362 case Builtin::BI__sync_xor_and_fetch: 363 case Builtin::BI__sync_val_compare_and_swap: 364 case Builtin::BI__sync_bool_compare_and_swap: 365 case Builtin::BI__sync_lock_test_and_set: 366 case Builtin::BI__sync_lock_release: 367 assert(0 && "Shouldn't make it through sema"); 368 case Builtin::BI__sync_fetch_and_add_1: 369 case Builtin::BI__sync_fetch_and_add_2: 370 case Builtin::BI__sync_fetch_and_add_4: 371 case Builtin::BI__sync_fetch_and_add_8: 372 case Builtin::BI__sync_fetch_and_add_16: 373 return EmitBinaryAtomic(*this, Intrinsic::atomic_load_add, E); 374 case Builtin::BI__sync_fetch_and_sub_1: 375 case Builtin::BI__sync_fetch_and_sub_2: 376 case Builtin::BI__sync_fetch_and_sub_4: 377 case Builtin::BI__sync_fetch_and_sub_8: 378 case Builtin::BI__sync_fetch_and_sub_16: 379 return EmitBinaryAtomic(*this, Intrinsic::atomic_load_sub, E); 380 case Builtin::BI__sync_fetch_and_or_1: 381 case Builtin::BI__sync_fetch_and_or_2: 382 case Builtin::BI__sync_fetch_and_or_4: 383 case Builtin::BI__sync_fetch_and_or_8: 384 case Builtin::BI__sync_fetch_and_or_16: 385 return EmitBinaryAtomic(*this, Intrinsic::atomic_load_or, E); 386 case Builtin::BI__sync_fetch_and_and_1: 387 case Builtin::BI__sync_fetch_and_and_2: 388 case Builtin::BI__sync_fetch_and_and_4: 389 case Builtin::BI__sync_fetch_and_and_8: 390 case Builtin::BI__sync_fetch_and_and_16: 391 return EmitBinaryAtomic(*this, Intrinsic::atomic_load_and, E); 392 case Builtin::BI__sync_fetch_and_xor_1: 393 case Builtin::BI__sync_fetch_and_xor_2: 394 case Builtin::BI__sync_fetch_and_xor_4: 395 case Builtin::BI__sync_fetch_and_xor_8: 396 case Builtin::BI__sync_fetch_and_xor_16: 397 return EmitBinaryAtomic(*this, Intrinsic::atomic_load_xor, E); 398 case Builtin::BI__sync_fetch_and_nand_1: 399 case Builtin::BI__sync_fetch_and_nand_2: 400 case Builtin::BI__sync_fetch_and_nand_4: 401 case Builtin::BI__sync_fetch_and_nand_8: 402 case Builtin::BI__sync_fetch_and_nand_16: 403 return EmitBinaryAtomic(*this, Intrinsic::atomic_load_nand, E); 404 405 // Clang extensions: not overloaded yet. 406 case Builtin::BI__sync_fetch_and_min: 407 return EmitBinaryAtomic(*this, Intrinsic::atomic_load_min, E); 408 case Builtin::BI__sync_fetch_and_max: 409 return EmitBinaryAtomic(*this, Intrinsic::atomic_load_max, E); 410 case Builtin::BI__sync_fetch_and_umin: 411 return EmitBinaryAtomic(*this, Intrinsic::atomic_load_umin, E); 412 case Builtin::BI__sync_fetch_and_umax: 413 return EmitBinaryAtomic(*this, Intrinsic::atomic_load_umax, E); 414 415 case Builtin::BI__sync_add_and_fetch_1: 416 case Builtin::BI__sync_add_and_fetch_2: 417 case Builtin::BI__sync_add_and_fetch_4: 418 case Builtin::BI__sync_add_and_fetch_8: 419 case Builtin::BI__sync_add_and_fetch_16: 420 return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_add, E, 421 llvm::Instruction::Add); 422 case Builtin::BI__sync_sub_and_fetch_1: 423 case Builtin::BI__sync_sub_and_fetch_2: 424 case Builtin::BI__sync_sub_and_fetch_4: 425 case Builtin::BI__sync_sub_and_fetch_8: 426 case Builtin::BI__sync_sub_and_fetch_16: 427 return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_sub, E, 428 llvm::Instruction::Sub); 429 case Builtin::BI__sync_and_and_fetch_1: 430 case Builtin::BI__sync_and_and_fetch_2: 431 case Builtin::BI__sync_and_and_fetch_4: 432 case Builtin::BI__sync_and_and_fetch_8: 433 case Builtin::BI__sync_and_and_fetch_16: 434 return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_and, E, 435 llvm::Instruction::And); 436 case Builtin::BI__sync_or_and_fetch_1: 437 case Builtin::BI__sync_or_and_fetch_2: 438 case Builtin::BI__sync_or_and_fetch_4: 439 case Builtin::BI__sync_or_and_fetch_8: 440 case Builtin::BI__sync_or_and_fetch_16: 441 return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_or, E, 442 llvm::Instruction::Or); 443 case Builtin::BI__sync_xor_and_fetch_1: 444 case Builtin::BI__sync_xor_and_fetch_2: 445 case Builtin::BI__sync_xor_and_fetch_4: 446 case Builtin::BI__sync_xor_and_fetch_8: 447 case Builtin::BI__sync_xor_and_fetch_16: 448 return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_xor, E, 449 llvm::Instruction::Xor); 450 case Builtin::BI__sync_nand_and_fetch_1: 451 case Builtin::BI__sync_nand_and_fetch_2: 452 case Builtin::BI__sync_nand_and_fetch_4: 453 case Builtin::BI__sync_nand_and_fetch_8: 454 case Builtin::BI__sync_nand_and_fetch_16: 455 return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_nand, E, 456 llvm::Instruction::And); 457 458 case Builtin::BI__sync_val_compare_and_swap_1: 459 case Builtin::BI__sync_val_compare_and_swap_2: 460 case Builtin::BI__sync_val_compare_and_swap_4: 461 case Builtin::BI__sync_val_compare_and_swap_8: 462 case Builtin::BI__sync_val_compare_and_swap_16: 463 { 464 const llvm::Type *ResType[2]; 465 ResType[0]= ConvertType(E->getType()); 466 ResType[1] = ConvertType(E->getArg(0)->getType()); 467 Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap, ResType, 2); 468 return RValue::get(Builder.CreateCall3(AtomF, 469 EmitScalarExpr(E->getArg(0)), 470 EmitScalarExpr(E->getArg(1)), 471 EmitScalarExpr(E->getArg(2)))); 472 } 473 474 case Builtin::BI__sync_bool_compare_and_swap_1: 475 case Builtin::BI__sync_bool_compare_and_swap_2: 476 case Builtin::BI__sync_bool_compare_and_swap_4: 477 case Builtin::BI__sync_bool_compare_and_swap_8: 478 case Builtin::BI__sync_bool_compare_and_swap_16: 479 { 480 const llvm::Type *ResType[2]; 481 ResType[0]= ConvertType(E->getArg(1)->getType()); 482 ResType[1] = llvm::PointerType::getUnqual(ResType[0]); 483 Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap, ResType, 2); 484 Value *OldVal = EmitScalarExpr(E->getArg(1)); 485 Value *PrevVal = Builder.CreateCall3(AtomF, 486 EmitScalarExpr(E->getArg(0)), 487 OldVal, 488 EmitScalarExpr(E->getArg(2))); 489 Value *Result = Builder.CreateICmpEQ(PrevVal, OldVal); 490 // zext bool to int. 491 return RValue::get(Builder.CreateZExt(Result, ConvertType(E->getType()))); 492 } 493 494 case Builtin::BI__sync_lock_test_and_set_1: 495 case Builtin::BI__sync_lock_test_and_set_2: 496 case Builtin::BI__sync_lock_test_and_set_4: 497 case Builtin::BI__sync_lock_test_and_set_8: 498 case Builtin::BI__sync_lock_test_and_set_16: 499 return EmitBinaryAtomic(*this, Intrinsic::atomic_swap, E); 500 case Builtin::BI__sync_lock_release_1: 501 case Builtin::BI__sync_lock_release_2: 502 case Builtin::BI__sync_lock_release_4: 503 case Builtin::BI__sync_lock_release_8: 504 case Builtin::BI__sync_lock_release_16: { 505 Value *Ptr = EmitScalarExpr(E->getArg(0)); 506 const llvm::Type *ElTy = 507 cast<llvm::PointerType>(Ptr->getType())->getElementType(); 508 Builder.CreateStore(llvm::Constant::getNullValue(ElTy), Ptr, true); 509 return RValue::get(0); 510 } 511 512 case Builtin::BI__sync_synchronize: { 513 Value *C[5]; 514 C[0] = C[1] = C[2] = C[3] = llvm::ConstantInt::get(llvm::Type::Int1Ty, 1); 515 C[4] = ConstantInt::get(llvm::Type::Int1Ty, 0); 516 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::memory_barrier), C, C + 5); 517 return RValue::get(0); 518 } 519 520 // Library functions with special handling. 521 case Builtin::BIsqrt: 522 case Builtin::BIsqrtf: 523 case Builtin::BIsqrtl: { 524 // Rewrite sqrt to intrinsic if allowed. 525 if (!FD->hasAttr<ConstAttr>()) 526 break; 527 Value *Arg0 = EmitScalarExpr(E->getArg(0)); 528 const llvm::Type *ArgType = Arg0->getType(); 529 Value *F = CGM.getIntrinsic(Intrinsic::sqrt, &ArgType, 1); 530 return RValue::get(Builder.CreateCall(F, Arg0, "tmp")); 531 } 532 533 case Builtin::BIpow: 534 case Builtin::BIpowf: 535 case Builtin::BIpowl: { 536 // Rewrite sqrt to intrinsic if allowed. 537 if (!FD->hasAttr<ConstAttr>()) 538 break; 539 Value *Base = EmitScalarExpr(E->getArg(0)); 540 Value *Exponent = EmitScalarExpr(E->getArg(1)); 541 const llvm::Type *ArgType = Base->getType(); 542 Value *F = CGM.getIntrinsic(Intrinsic::pow, &ArgType, 1); 543 return RValue::get(Builder.CreateCall2(F, Base, Exponent, "tmp")); 544 } 545 } 546 547 // If this is an alias for a libm function (e.g. __builtin_sin) turn it into 548 // that function. 549 if (getContext().BuiltinInfo.isLibFunction(BuiltinID) || 550 getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID)) 551 return EmitCall(CGM.getBuiltinLibFunction(BuiltinID), 552 E->getCallee()->getType(), E->arg_begin(), 553 E->arg_end()); 554 555 // See if we have a target specific intrinsic. 556 const char *Name = getContext().BuiltinInfo.GetName(BuiltinID); 557 Intrinsic::ID IntrinsicID = 558 Intrinsic::getIntrinsicForGCCBuiltin(Target.getTargetPrefix(), Name); 559 560 if (IntrinsicID != Intrinsic::not_intrinsic) { 561 SmallVector<Value*, 16> Args; 562 563 Function *F = CGM.getIntrinsic(IntrinsicID); 564 const llvm::FunctionType *FTy = F->getFunctionType(); 565 566 for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) { 567 Value *ArgValue = EmitScalarExpr(E->getArg(i)); 568 569 // If the intrinsic arg type is different from the builtin arg type 570 // we need to do a bit cast. 571 const llvm::Type *PTy = FTy->getParamType(i); 572 if (PTy != ArgValue->getType()) { 573 assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) && 574 "Must be able to losslessly bit cast to param"); 575 ArgValue = Builder.CreateBitCast(ArgValue, PTy); 576 } 577 578 Args.push_back(ArgValue); 579 } 580 581 Value *V = Builder.CreateCall(F, Args.data(), Args.data() + Args.size()); 582 QualType BuiltinRetType = E->getType(); 583 584 const llvm::Type *RetTy = llvm::Type::VoidTy; 585 if (!BuiltinRetType->isVoidType()) RetTy = ConvertType(BuiltinRetType); 586 587 if (RetTy != V->getType()) { 588 assert(V->getType()->canLosslesslyBitCastTo(RetTy) && 589 "Must be able to losslessly bit cast result type"); 590 V = Builder.CreateBitCast(V, RetTy); 591 } 592 593 return RValue::get(V); 594 } 595 596 // See if we have a target specific builtin that needs to be lowered. 597 if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E)) 598 return RValue::get(V); 599 600 ErrorUnsupported(E, "builtin function"); 601 602 // Unknown builtin, for now just dump it out and return undef. 603 if (hasAggregateLLVMType(E->getType())) 604 return RValue::getAggregate(CreateTempAlloca(ConvertType(E->getType()))); 605 return RValue::get(UndefValue::get(ConvertType(E->getType()))); 606} 607 608Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID, 609 const CallExpr *E) { 610 const char *TargetPrefix = Target.getTargetPrefix(); 611 if (strcmp(TargetPrefix, "x86") == 0) 612 return EmitX86BuiltinExpr(BuiltinID, E); 613 else if (strcmp(TargetPrefix, "ppc") == 0) 614 return EmitPPCBuiltinExpr(BuiltinID, E); 615 return 0; 616} 617 618Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, 619 const CallExpr *E) { 620 621 llvm::SmallVector<Value*, 4> Ops; 622 623 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) 624 Ops.push_back(EmitScalarExpr(E->getArg(i))); 625 626 switch (BuiltinID) { 627 default: return 0; 628 case X86::BI__builtin_ia32_pslldi128: 629 case X86::BI__builtin_ia32_psllqi128: 630 case X86::BI__builtin_ia32_psllwi128: 631 case X86::BI__builtin_ia32_psradi128: 632 case X86::BI__builtin_ia32_psrawi128: 633 case X86::BI__builtin_ia32_psrldi128: 634 case X86::BI__builtin_ia32_psrlqi128: 635 case X86::BI__builtin_ia32_psrlwi128: { 636 Ops[1] = Builder.CreateZExt(Ops[1], llvm::Type::Int64Ty, "zext"); 637 const llvm::Type *Ty = llvm::VectorType::get(llvm::Type::Int64Ty, 2); 638 llvm::Value *Zero = llvm::ConstantInt::get(llvm::Type::Int32Ty, 0); 639 Ops[1] = Builder.CreateInsertElement(llvm::UndefValue::get(Ty), 640 Ops[1], Zero, "insert"); 641 Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType(), "bitcast"); 642 const char *name = 0; 643 Intrinsic::ID ID = Intrinsic::not_intrinsic; 644 645 switch (BuiltinID) { 646 default: assert(0 && "Unsupported shift intrinsic!"); 647 case X86::BI__builtin_ia32_pslldi128: 648 name = "pslldi"; 649 ID = Intrinsic::x86_sse2_psll_d; 650 break; 651 case X86::BI__builtin_ia32_psllqi128: 652 name = "psllqi"; 653 ID = Intrinsic::x86_sse2_psll_q; 654 break; 655 case X86::BI__builtin_ia32_psllwi128: 656 name = "psllwi"; 657 ID = Intrinsic::x86_sse2_psll_w; 658 break; 659 case X86::BI__builtin_ia32_psradi128: 660 name = "psradi"; 661 ID = Intrinsic::x86_sse2_psra_d; 662 break; 663 case X86::BI__builtin_ia32_psrawi128: 664 name = "psrawi"; 665 ID = Intrinsic::x86_sse2_psra_w; 666 break; 667 case X86::BI__builtin_ia32_psrldi128: 668 name = "psrldi"; 669 ID = Intrinsic::x86_sse2_psrl_d; 670 break; 671 case X86::BI__builtin_ia32_psrlqi128: 672 name = "psrlqi"; 673 ID = Intrinsic::x86_sse2_psrl_q; 674 break; 675 case X86::BI__builtin_ia32_psrlwi128: 676 name = "psrlwi"; 677 ID = Intrinsic::x86_sse2_psrl_w; 678 break; 679 } 680 llvm::Function *F = CGM.getIntrinsic(ID); 681 return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), name); 682 } 683 case X86::BI__builtin_ia32_pslldi: 684 case X86::BI__builtin_ia32_psllqi: 685 case X86::BI__builtin_ia32_psllwi: 686 case X86::BI__builtin_ia32_psradi: 687 case X86::BI__builtin_ia32_psrawi: 688 case X86::BI__builtin_ia32_psrldi: 689 case X86::BI__builtin_ia32_psrlqi: 690 case X86::BI__builtin_ia32_psrlwi: { 691 Ops[1] = Builder.CreateZExt(Ops[1], llvm::Type::Int64Ty, "zext"); 692 const llvm::Type *Ty = llvm::VectorType::get(llvm::Type::Int64Ty, 1); 693 Ops[1] = Builder.CreateBitCast(Ops[1], Ty, "bitcast"); 694 const char *name = 0; 695 Intrinsic::ID ID = Intrinsic::not_intrinsic; 696 697 switch (BuiltinID) { 698 default: assert(0 && "Unsupported shift intrinsic!"); 699 case X86::BI__builtin_ia32_pslldi: 700 name = "pslldi"; 701 ID = Intrinsic::x86_mmx_psll_d; 702 break; 703 case X86::BI__builtin_ia32_psllqi: 704 name = "psllqi"; 705 ID = Intrinsic::x86_mmx_psll_q; 706 break; 707 case X86::BI__builtin_ia32_psllwi: 708 name = "psllwi"; 709 ID = Intrinsic::x86_mmx_psll_w; 710 break; 711 case X86::BI__builtin_ia32_psradi: 712 name = "psradi"; 713 ID = Intrinsic::x86_mmx_psra_d; 714 break; 715 case X86::BI__builtin_ia32_psrawi: 716 name = "psrawi"; 717 ID = Intrinsic::x86_mmx_psra_w; 718 break; 719 case X86::BI__builtin_ia32_psrldi: 720 name = "psrldi"; 721 ID = Intrinsic::x86_mmx_psrl_d; 722 break; 723 case X86::BI__builtin_ia32_psrlqi: 724 name = "psrlqi"; 725 ID = Intrinsic::x86_mmx_psrl_q; 726 break; 727 case X86::BI__builtin_ia32_psrlwi: 728 name = "psrlwi"; 729 ID = Intrinsic::x86_mmx_psrl_w; 730 break; 731 } 732 llvm::Function *F = CGM.getIntrinsic(ID); 733 return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), name); 734 } 735 case X86::BI__builtin_ia32_cmpps: { 736 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse_cmp_ps); 737 return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpps"); 738 } 739 case X86::BI__builtin_ia32_cmpss: { 740 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse_cmp_ss); 741 return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpss"); 742 } 743 case X86::BI__builtin_ia32_ldmxcsr: { 744 llvm::Type *PtrTy = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); 745 Value *One = llvm::ConstantInt::get(llvm::Type::Int32Ty, 1); 746 Value *Tmp = Builder.CreateAlloca(llvm::Type::Int32Ty, One, "tmp"); 747 Builder.CreateStore(Ops[0], Tmp); 748 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr), 749 Builder.CreateBitCast(Tmp, PtrTy)); 750 } 751 case X86::BI__builtin_ia32_stmxcsr: { 752 llvm::Type *PtrTy = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); 753 Value *One = llvm::ConstantInt::get(llvm::Type::Int32Ty, 1); 754 Value *Tmp = Builder.CreateAlloca(llvm::Type::Int32Ty, One, "tmp"); 755 One = Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr), 756 Builder.CreateBitCast(Tmp, PtrTy)); 757 return Builder.CreateLoad(Tmp, "stmxcsr"); 758 } 759 case X86::BI__builtin_ia32_cmppd: { 760 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_cmp_pd); 761 return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmppd"); 762 } 763 case X86::BI__builtin_ia32_cmpsd: { 764 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_cmp_sd); 765 return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpsd"); 766 } 767 case X86::BI__builtin_ia32_storehps: 768 case X86::BI__builtin_ia32_storelps: { 769 const llvm::Type *EltTy = llvm::Type::Int64Ty; 770 llvm::Type *PtrTy = llvm::PointerType::getUnqual(EltTy); 771 llvm::Type *VecTy = llvm::VectorType::get(EltTy, 2); 772 773 // cast val v2i64 774 Ops[1] = Builder.CreateBitCast(Ops[1], VecTy, "cast"); 775 776 // extract (0, 1) 777 unsigned Index = BuiltinID == X86::BI__builtin_ia32_storelps ? 0 : 1; 778 llvm::Value *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, Index); 779 Ops[1] = Builder.CreateExtractElement(Ops[1], Idx, "extract"); 780 781 // cast pointer to i64 & store 782 Ops[0] = Builder.CreateBitCast(Ops[0], PtrTy); 783 return Builder.CreateStore(Ops[1], Ops[0]); 784 } 785 case X86::BI__builtin_ia32_loadlv4si: { 786 // load i64 787 const llvm::Type *EltTy = llvm::Type::Int64Ty; 788 llvm::Type *PtrTy = llvm::PointerType::getUnqual(EltTy); 789 Ops[0] = Builder.CreateBitCast(Ops[0], PtrTy); 790 Ops[0] = Builder.CreateLoad(Ops[0], "load"); 791 792 // scalar to vector: insert i64 into 2 x i64 undef 793 llvm::Type *VecTy = llvm::VectorType::get(EltTy, 2); 794 llvm::Value *Zero = llvm::ConstantInt::get(llvm::Type::Int32Ty, 0); 795 Ops[0] = Builder.CreateInsertElement(llvm::UndefValue::get(VecTy), 796 Ops[0], Zero, "s2v"); 797 798 // shuffle into zero vector. 799 std::vector<llvm::Constant *>Elts; 800 Elts.resize(2, llvm::ConstantInt::get(EltTy, 0)); 801 llvm::Value *ZV = ConstantVector::get(Elts); 802 Ops[0] = EmitShuffleVector(ZV, Ops[0], 2, 1, "loadl"); 803 804 // bitcast to result. 805 return Builder.CreateBitCast(Ops[0], 806 llvm::VectorType::get(llvm::Type::Int32Ty, 4)); 807 } 808 } 809} 810 811Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID, 812 const CallExpr *E) { 813 switch (BuiltinID) { 814 default: return 0; 815 } 816} 817