CGBuiltin.cpp revision 199482
1//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This contains code to emit Builtin calls as LLVM code.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CodeGenFunction.h"
15#include "CodeGenModule.h"
16#include "clang/Basic/TargetInfo.h"
17#include "clang/AST/APValue.h"
18#include "clang/AST/ASTContext.h"
19#include "clang/AST/Decl.h"
20#include "clang/Basic/TargetBuiltins.h"
21#include "llvm/Intrinsics.h"
22using namespace clang;
23using namespace CodeGen;
24using namespace llvm;
25
26/// Utility to insert an atomic instruction based on Instrinsic::ID
27/// and the expression node.
28static RValue EmitBinaryAtomic(CodeGenFunction& CGF,
29                               Intrinsic::ID Id, const CallExpr *E) {
30  const llvm::Type *ResType[2];
31  ResType[0] = CGF.ConvertType(E->getType());
32  ResType[1] = CGF.ConvertType(E->getArg(0)->getType());
33  Value *AtomF = CGF.CGM.getIntrinsic(Id, ResType, 2);
34  return RValue::get(CGF.Builder.CreateCall2(AtomF,
35                                             CGF.EmitScalarExpr(E->getArg(0)),
36                                             CGF.EmitScalarExpr(E->getArg(1))));
37}
38
39/// Utility to insert an atomic instruction based Instrinsic::ID and
40// the expression node, where the return value is the result of the
41// operation.
42static RValue EmitBinaryAtomicPost(CodeGenFunction& CGF,
43                                   Intrinsic::ID Id, const CallExpr *E,
44                                   Instruction::BinaryOps Op) {
45  const llvm::Type *ResType[2];
46  ResType[0] = CGF.ConvertType(E->getType());
47  ResType[1] = CGF.ConvertType(E->getArg(0)->getType());
48  Value *AtomF = CGF.CGM.getIntrinsic(Id, ResType, 2);
49  Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
50  Value *Operand = CGF.EmitScalarExpr(E->getArg(1));
51  Value *Result = CGF.Builder.CreateCall2(AtomF, Ptr, Operand);
52
53  if (Id == Intrinsic::atomic_load_nand)
54    Result = CGF.Builder.CreateNot(Result);
55
56
57  return RValue::get(CGF.Builder.CreateBinOp(Op, Result, Operand));
58}
59
60RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
61                                        unsigned BuiltinID, const CallExpr *E) {
62  // See if we can constant fold this builtin.  If so, don't emit it at all.
63  Expr::EvalResult Result;
64  if (E->Evaluate(Result, CGM.getContext())) {
65    if (Result.Val.isInt())
66      return RValue::get(llvm::ConstantInt::get(VMContext,
67                                                Result.Val.getInt()));
68    else if (Result.Val.isFloat())
69      return RValue::get(ConstantFP::get(VMContext, Result.Val.getFloat()));
70  }
71
72  switch (BuiltinID) {
73  default: break;  // Handle intrinsics and libm functions below.
74  case Builtin::BI__builtin___CFStringMakeConstantString:
75    return RValue::get(CGM.EmitConstantExpr(E, E->getType(), 0));
76  case Builtin::BI__builtin_stdarg_start:
77  case Builtin::BI__builtin_va_start:
78  case Builtin::BI__builtin_va_end: {
79    Value *ArgValue = EmitVAListRef(E->getArg(0));
80    const llvm::Type *DestType = llvm::Type::getInt8PtrTy(VMContext);
81    if (ArgValue->getType() != DestType)
82      ArgValue = Builder.CreateBitCast(ArgValue, DestType,
83                                       ArgValue->getName().data());
84
85    Intrinsic::ID inst = (BuiltinID == Builtin::BI__builtin_va_end) ?
86      Intrinsic::vaend : Intrinsic::vastart;
87    return RValue::get(Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue));
88  }
89  case Builtin::BI__builtin_va_copy: {
90    Value *DstPtr = EmitVAListRef(E->getArg(0));
91    Value *SrcPtr = EmitVAListRef(E->getArg(1));
92
93    const llvm::Type *Type = llvm::Type::getInt8PtrTy(VMContext);
94
95    DstPtr = Builder.CreateBitCast(DstPtr, Type);
96    SrcPtr = Builder.CreateBitCast(SrcPtr, Type);
97    return RValue::get(Builder.CreateCall2(CGM.getIntrinsic(Intrinsic::vacopy),
98                                           DstPtr, SrcPtr));
99  }
100  case Builtin::BI__builtin_abs: {
101    Value *ArgValue = EmitScalarExpr(E->getArg(0));
102
103    Value *NegOp = Builder.CreateNeg(ArgValue, "neg");
104    Value *CmpResult =
105    Builder.CreateICmpSGE(ArgValue,
106                          llvm::Constant::getNullValue(ArgValue->getType()),
107                                                            "abscond");
108    Value *Result =
109      Builder.CreateSelect(CmpResult, ArgValue, NegOp, "abs");
110
111    return RValue::get(Result);
112  }
113  case Builtin::BI__builtin_ctz:
114  case Builtin::BI__builtin_ctzl:
115  case Builtin::BI__builtin_ctzll: {
116    Value *ArgValue = EmitScalarExpr(E->getArg(0));
117
118    const llvm::Type *ArgType = ArgValue->getType();
119    Value *F = CGM.getIntrinsic(Intrinsic::cttz, &ArgType, 1);
120
121    const llvm::Type *ResultType = ConvertType(E->getType());
122    Value *Result = Builder.CreateCall(F, ArgValue, "tmp");
123    if (Result->getType() != ResultType)
124      Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
125                                     "cast");
126    return RValue::get(Result);
127  }
128  case Builtin::BI__builtin_clz:
129  case Builtin::BI__builtin_clzl:
130  case Builtin::BI__builtin_clzll: {
131    Value *ArgValue = EmitScalarExpr(E->getArg(0));
132
133    const llvm::Type *ArgType = ArgValue->getType();
134    Value *F = CGM.getIntrinsic(Intrinsic::ctlz, &ArgType, 1);
135
136    const llvm::Type *ResultType = ConvertType(E->getType());
137    Value *Result = Builder.CreateCall(F, ArgValue, "tmp");
138    if (Result->getType() != ResultType)
139      Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
140                                     "cast");
141    return RValue::get(Result);
142  }
143  case Builtin::BI__builtin_ffs:
144  case Builtin::BI__builtin_ffsl:
145  case Builtin::BI__builtin_ffsll: {
146    // ffs(x) -> x ? cttz(x) + 1 : 0
147    Value *ArgValue = EmitScalarExpr(E->getArg(0));
148
149    const llvm::Type *ArgType = ArgValue->getType();
150    Value *F = CGM.getIntrinsic(Intrinsic::cttz, &ArgType, 1);
151
152    const llvm::Type *ResultType = ConvertType(E->getType());
153    Value *Tmp = Builder.CreateAdd(Builder.CreateCall(F, ArgValue, "tmp"),
154                                   llvm::ConstantInt::get(ArgType, 1), "tmp");
155    Value *Zero = llvm::Constant::getNullValue(ArgType);
156    Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
157    Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs");
158    if (Result->getType() != ResultType)
159      Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
160                                     "cast");
161    return RValue::get(Result);
162  }
163  case Builtin::BI__builtin_parity:
164  case Builtin::BI__builtin_parityl:
165  case Builtin::BI__builtin_parityll: {
166    // parity(x) -> ctpop(x) & 1
167    Value *ArgValue = EmitScalarExpr(E->getArg(0));
168
169    const llvm::Type *ArgType = ArgValue->getType();
170    Value *F = CGM.getIntrinsic(Intrinsic::ctpop, &ArgType, 1);
171
172    const llvm::Type *ResultType = ConvertType(E->getType());
173    Value *Tmp = Builder.CreateCall(F, ArgValue, "tmp");
174    Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1),
175                                      "tmp");
176    if (Result->getType() != ResultType)
177      Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
178                                     "cast");
179    return RValue::get(Result);
180  }
181  case Builtin::BI__builtin_popcount:
182  case Builtin::BI__builtin_popcountl:
183  case Builtin::BI__builtin_popcountll: {
184    Value *ArgValue = EmitScalarExpr(E->getArg(0));
185
186    const llvm::Type *ArgType = ArgValue->getType();
187    Value *F = CGM.getIntrinsic(Intrinsic::ctpop, &ArgType, 1);
188
189    const llvm::Type *ResultType = ConvertType(E->getType());
190    Value *Result = Builder.CreateCall(F, ArgValue, "tmp");
191    if (Result->getType() != ResultType)
192      Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
193                                     "cast");
194    return RValue::get(Result);
195  }
196  case Builtin::BI__builtin_expect:
197    // FIXME: pass expect through to LLVM
198    return RValue::get(EmitScalarExpr(E->getArg(0)));
199  case Builtin::BI__builtin_bswap32:
200  case Builtin::BI__builtin_bswap64: {
201    Value *ArgValue = EmitScalarExpr(E->getArg(0));
202    const llvm::Type *ArgType = ArgValue->getType();
203    Value *F = CGM.getIntrinsic(Intrinsic::bswap, &ArgType, 1);
204    return RValue::get(Builder.CreateCall(F, ArgValue, "tmp"));
205  }
206  case Builtin::BI__builtin_object_size: {
207#if 1
208    // We pass this builtin onto the optimizer so that it can
209    // figure out the object size in more complex cases.
210    const llvm::Type *ResType[] = {
211      ConvertType(E->getType())
212    };
213    Value *F = CGM.getIntrinsic(Intrinsic::objectsize, ResType, 1);
214    return RValue::get(Builder.CreateCall2(F,
215                                           EmitScalarExpr(E->getArg(0)),
216                                           EmitScalarExpr(E->getArg(1))));
217#else
218    // FIXME: Remove after testing.
219    llvm::APSInt TypeArg = E->getArg(1)->EvaluateAsInt(CGM.getContext());
220    const llvm::Type *ResType = ConvertType(E->getType());
221    //    bool UseSubObject = TypeArg.getZExtValue() & 1;
222    bool UseMinimum = TypeArg.getZExtValue() & 2;
223    return RValue::get(
224      llvm::ConstantInt::get(ResType, UseMinimum ? 0 : -1LL));
225#endif
226  }
227  case Builtin::BI__builtin_prefetch: {
228    Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
229    // FIXME: Technically these constants should of type 'int', yes?
230    RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
231      llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0);
232    Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
233      llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 3);
234    Value *F = CGM.getIntrinsic(Intrinsic::prefetch, 0, 0);
235    return RValue::get(Builder.CreateCall3(F, Address, RW, Locality));
236  }
237  case Builtin::BI__builtin_trap: {
238    Value *F = CGM.getIntrinsic(Intrinsic::trap, 0, 0);
239    return RValue::get(Builder.CreateCall(F));
240  }
241  case Builtin::BI__builtin_unreachable: {
242    Value *V = Builder.CreateUnreachable();
243    Builder.ClearInsertionPoint();
244    return RValue::get(V);
245  }
246
247  case Builtin::BI__builtin_powi:
248  case Builtin::BI__builtin_powif:
249  case Builtin::BI__builtin_powil: {
250    Value *Base = EmitScalarExpr(E->getArg(0));
251    Value *Exponent = EmitScalarExpr(E->getArg(1));
252    const llvm::Type *ArgType = Base->getType();
253    Value *F = CGM.getIntrinsic(Intrinsic::powi, &ArgType, 1);
254    return RValue::get(Builder.CreateCall2(F, Base, Exponent, "tmp"));
255  }
256
257  case Builtin::BI__builtin_isgreater:
258  case Builtin::BI__builtin_isgreaterequal:
259  case Builtin::BI__builtin_isless:
260  case Builtin::BI__builtin_islessequal:
261  case Builtin::BI__builtin_islessgreater:
262  case Builtin::BI__builtin_isunordered: {
263    // Ordered comparisons: we know the arguments to these are matching scalar
264    // floating point values.
265    Value *LHS = EmitScalarExpr(E->getArg(0));
266    Value *RHS = EmitScalarExpr(E->getArg(1));
267
268    switch (BuiltinID) {
269    default: assert(0 && "Unknown ordered comparison");
270    case Builtin::BI__builtin_isgreater:
271      LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp");
272      break;
273    case Builtin::BI__builtin_isgreaterequal:
274      LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp");
275      break;
276    case Builtin::BI__builtin_isless:
277      LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp");
278      break;
279    case Builtin::BI__builtin_islessequal:
280      LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp");
281      break;
282    case Builtin::BI__builtin_islessgreater:
283      LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp");
284      break;
285    case Builtin::BI__builtin_isunordered:
286      LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp");
287      break;
288    }
289    // ZExt bool to int type.
290    return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType()),
291                                          "tmp"));
292  }
293  case Builtin::BI__builtin_isnan: {
294    Value *V = EmitScalarExpr(E->getArg(0));
295    V = Builder.CreateFCmpUNO(V, V, "cmp");
296    return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()), "tmp"));
297  }
298  case Builtin::BIalloca:
299  case Builtin::BI__builtin_alloca: {
300    // FIXME: LLVM IR Should allow alloca with an i64 size!
301    Value *Size = EmitScalarExpr(E->getArg(0));
302    Size = Builder.CreateIntCast(Size, llvm::Type::getInt32Ty(VMContext), false, "tmp");
303    return RValue::get(Builder.CreateAlloca(llvm::Type::getInt8Ty(VMContext), Size, "tmp"));
304  }
305  case Builtin::BI__builtin_bzero: {
306    Value *Address = EmitScalarExpr(E->getArg(0));
307    Builder.CreateCall4(CGM.getMemSetFn(), Address,
308                        llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), 0),
309                        EmitScalarExpr(E->getArg(1)),
310                        llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1));
311    return RValue::get(Address);
312  }
313  case Builtin::BI__builtin_memcpy: {
314    Value *Address = EmitScalarExpr(E->getArg(0));
315    Builder.CreateCall4(CGM.getMemCpyFn(), Address,
316                        EmitScalarExpr(E->getArg(1)),
317                        EmitScalarExpr(E->getArg(2)),
318                        llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1));
319    return RValue::get(Address);
320  }
321  case Builtin::BI__builtin_memmove: {
322    Value *Address = EmitScalarExpr(E->getArg(0));
323    Builder.CreateCall4(CGM.getMemMoveFn(), Address,
324                        EmitScalarExpr(E->getArg(1)),
325                        EmitScalarExpr(E->getArg(2)),
326                        llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1));
327    return RValue::get(Address);
328  }
329  case Builtin::BI__builtin_memset: {
330    Value *Address = EmitScalarExpr(E->getArg(0));
331    Builder.CreateCall4(CGM.getMemSetFn(), Address,
332                        Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
333                                            llvm::Type::getInt8Ty(VMContext)),
334                        EmitScalarExpr(E->getArg(2)),
335                        llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1));
336    return RValue::get(Address);
337  }
338  case Builtin::BI__builtin_return_address: {
339    Value *F = CGM.getIntrinsic(Intrinsic::returnaddress, 0, 0);
340    return RValue::get(Builder.CreateCall(F, EmitScalarExpr(E->getArg(0))));
341  }
342  case Builtin::BI__builtin_frame_address: {
343    Value *F = CGM.getIntrinsic(Intrinsic::frameaddress, 0, 0);
344    return RValue::get(Builder.CreateCall(F, EmitScalarExpr(E->getArg(0))));
345  }
346  case Builtin::BI__builtin_extract_return_addr: {
347    // FIXME: There should be a target hook for this
348    return RValue::get(EmitScalarExpr(E->getArg(0)));
349  }
350  case Builtin::BI__builtin_unwind_init: {
351    Value *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init, 0, 0);
352    return RValue::get(Builder.CreateCall(F));
353  }
354#if 0
355  // FIXME: Finish/enable when LLVM backend support stabilizes
356  case Builtin::BI__builtin_setjmp: {
357    Value *Buf = EmitScalarExpr(E->getArg(0));
358    // Store the frame pointer to the buffer
359    Value *FrameAddrF = CGM.getIntrinsic(Intrinsic::frameaddress, 0, 0);
360    Value *FrameAddr =
361        Builder.CreateCall(FrameAddrF,
362                           Constant::getNullValue(llvm::Type::getInt32Ty(VMContext)));
363    Builder.CreateStore(FrameAddr, Buf);
364    // Call the setjmp intrinsic
365    Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp, 0, 0);
366    const llvm::Type *DestType = llvm::Type::getInt8PtrTy(VMContext);
367    Buf = Builder.CreateBitCast(Buf, DestType);
368    return RValue::get(Builder.CreateCall(F, Buf));
369  }
370  case Builtin::BI__builtin_longjmp: {
371    Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp, 0, 0);
372    Value *Buf = EmitScalarExpr(E->getArg(0));
373    const llvm::Type *DestType = llvm::Type::getInt8PtrTy(VMContext);
374    Buf = Builder.CreateBitCast(Buf, DestType);
375    return RValue::get(Builder.CreateCall(F, Buf));
376  }
377#endif
378  case Builtin::BI__sync_fetch_and_add:
379  case Builtin::BI__sync_fetch_and_sub:
380  case Builtin::BI__sync_fetch_and_or:
381  case Builtin::BI__sync_fetch_and_and:
382  case Builtin::BI__sync_fetch_and_xor:
383  case Builtin::BI__sync_add_and_fetch:
384  case Builtin::BI__sync_sub_and_fetch:
385  case Builtin::BI__sync_and_and_fetch:
386  case Builtin::BI__sync_or_and_fetch:
387  case Builtin::BI__sync_xor_and_fetch:
388  case Builtin::BI__sync_val_compare_and_swap:
389  case Builtin::BI__sync_bool_compare_and_swap:
390  case Builtin::BI__sync_lock_test_and_set:
391  case Builtin::BI__sync_lock_release:
392    assert(0 && "Shouldn't make it through sema");
393  case Builtin::BI__sync_fetch_and_add_1:
394  case Builtin::BI__sync_fetch_and_add_2:
395  case Builtin::BI__sync_fetch_and_add_4:
396  case Builtin::BI__sync_fetch_and_add_8:
397  case Builtin::BI__sync_fetch_and_add_16:
398    return EmitBinaryAtomic(*this, Intrinsic::atomic_load_add, E);
399  case Builtin::BI__sync_fetch_and_sub_1:
400  case Builtin::BI__sync_fetch_and_sub_2:
401  case Builtin::BI__sync_fetch_and_sub_4:
402  case Builtin::BI__sync_fetch_and_sub_8:
403  case Builtin::BI__sync_fetch_and_sub_16:
404    return EmitBinaryAtomic(*this, Intrinsic::atomic_load_sub, E);
405  case Builtin::BI__sync_fetch_and_or_1:
406  case Builtin::BI__sync_fetch_and_or_2:
407  case Builtin::BI__sync_fetch_and_or_4:
408  case Builtin::BI__sync_fetch_and_or_8:
409  case Builtin::BI__sync_fetch_and_or_16:
410    return EmitBinaryAtomic(*this, Intrinsic::atomic_load_or, E);
411  case Builtin::BI__sync_fetch_and_and_1:
412  case Builtin::BI__sync_fetch_and_and_2:
413  case Builtin::BI__sync_fetch_and_and_4:
414  case Builtin::BI__sync_fetch_and_and_8:
415  case Builtin::BI__sync_fetch_and_and_16:
416    return EmitBinaryAtomic(*this, Intrinsic::atomic_load_and, E);
417  case Builtin::BI__sync_fetch_and_xor_1:
418  case Builtin::BI__sync_fetch_and_xor_2:
419  case Builtin::BI__sync_fetch_and_xor_4:
420  case Builtin::BI__sync_fetch_and_xor_8:
421  case Builtin::BI__sync_fetch_and_xor_16:
422    return EmitBinaryAtomic(*this, Intrinsic::atomic_load_xor, E);
423  case Builtin::BI__sync_fetch_and_nand_1:
424  case Builtin::BI__sync_fetch_and_nand_2:
425  case Builtin::BI__sync_fetch_and_nand_4:
426  case Builtin::BI__sync_fetch_and_nand_8:
427  case Builtin::BI__sync_fetch_and_nand_16:
428    return EmitBinaryAtomic(*this, Intrinsic::atomic_load_nand, E);
429
430  // Clang extensions: not overloaded yet.
431  case Builtin::BI__sync_fetch_and_min:
432    return EmitBinaryAtomic(*this, Intrinsic::atomic_load_min, E);
433  case Builtin::BI__sync_fetch_and_max:
434    return EmitBinaryAtomic(*this, Intrinsic::atomic_load_max, E);
435  case Builtin::BI__sync_fetch_and_umin:
436    return EmitBinaryAtomic(*this, Intrinsic::atomic_load_umin, E);
437  case Builtin::BI__sync_fetch_and_umax:
438    return EmitBinaryAtomic(*this, Intrinsic::atomic_load_umax, E);
439
440  case Builtin::BI__sync_add_and_fetch_1:
441  case Builtin::BI__sync_add_and_fetch_2:
442  case Builtin::BI__sync_add_and_fetch_4:
443  case Builtin::BI__sync_add_and_fetch_8:
444  case Builtin::BI__sync_add_and_fetch_16:
445    return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_add, E,
446                                llvm::Instruction::Add);
447  case Builtin::BI__sync_sub_and_fetch_1:
448  case Builtin::BI__sync_sub_and_fetch_2:
449  case Builtin::BI__sync_sub_and_fetch_4:
450  case Builtin::BI__sync_sub_and_fetch_8:
451  case Builtin::BI__sync_sub_and_fetch_16:
452    return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_sub, E,
453                                llvm::Instruction::Sub);
454  case Builtin::BI__sync_and_and_fetch_1:
455  case Builtin::BI__sync_and_and_fetch_2:
456  case Builtin::BI__sync_and_and_fetch_4:
457  case Builtin::BI__sync_and_and_fetch_8:
458  case Builtin::BI__sync_and_and_fetch_16:
459    return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_and, E,
460                                llvm::Instruction::And);
461  case Builtin::BI__sync_or_and_fetch_1:
462  case Builtin::BI__sync_or_and_fetch_2:
463  case Builtin::BI__sync_or_and_fetch_4:
464  case Builtin::BI__sync_or_and_fetch_8:
465  case Builtin::BI__sync_or_and_fetch_16:
466    return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_or, E,
467                                llvm::Instruction::Or);
468  case Builtin::BI__sync_xor_and_fetch_1:
469  case Builtin::BI__sync_xor_and_fetch_2:
470  case Builtin::BI__sync_xor_and_fetch_4:
471  case Builtin::BI__sync_xor_and_fetch_8:
472  case Builtin::BI__sync_xor_and_fetch_16:
473    return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_xor, E,
474                                llvm::Instruction::Xor);
475  case Builtin::BI__sync_nand_and_fetch_1:
476  case Builtin::BI__sync_nand_and_fetch_2:
477  case Builtin::BI__sync_nand_and_fetch_4:
478  case Builtin::BI__sync_nand_and_fetch_8:
479  case Builtin::BI__sync_nand_and_fetch_16:
480    return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_nand, E,
481                                llvm::Instruction::And);
482
483  case Builtin::BI__sync_val_compare_and_swap_1:
484  case Builtin::BI__sync_val_compare_and_swap_2:
485  case Builtin::BI__sync_val_compare_and_swap_4:
486  case Builtin::BI__sync_val_compare_and_swap_8:
487  case Builtin::BI__sync_val_compare_and_swap_16:
488  {
489    const llvm::Type *ResType[2];
490    ResType[0]= ConvertType(E->getType());
491    ResType[1] = ConvertType(E->getArg(0)->getType());
492    Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap, ResType, 2);
493    return RValue::get(Builder.CreateCall3(AtomF,
494                                           EmitScalarExpr(E->getArg(0)),
495                                           EmitScalarExpr(E->getArg(1)),
496                                           EmitScalarExpr(E->getArg(2))));
497  }
498
499  case Builtin::BI__sync_bool_compare_and_swap_1:
500  case Builtin::BI__sync_bool_compare_and_swap_2:
501  case Builtin::BI__sync_bool_compare_and_swap_4:
502  case Builtin::BI__sync_bool_compare_and_swap_8:
503  case Builtin::BI__sync_bool_compare_and_swap_16:
504  {
505    const llvm::Type *ResType[2];
506    ResType[0]= ConvertType(E->getArg(1)->getType());
507    ResType[1] = llvm::PointerType::getUnqual(ResType[0]);
508    Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap, ResType, 2);
509    Value *OldVal = EmitScalarExpr(E->getArg(1));
510    Value *PrevVal = Builder.CreateCall3(AtomF,
511                                        EmitScalarExpr(E->getArg(0)),
512                                        OldVal,
513                                        EmitScalarExpr(E->getArg(2)));
514    Value *Result = Builder.CreateICmpEQ(PrevVal, OldVal);
515    // zext bool to int.
516    return RValue::get(Builder.CreateZExt(Result, ConvertType(E->getType())));
517  }
518
519  case Builtin::BI__sync_lock_test_and_set_1:
520  case Builtin::BI__sync_lock_test_and_set_2:
521  case Builtin::BI__sync_lock_test_and_set_4:
522  case Builtin::BI__sync_lock_test_and_set_8:
523  case Builtin::BI__sync_lock_test_and_set_16:
524    return EmitBinaryAtomic(*this, Intrinsic::atomic_swap, E);
525  case Builtin::BI__sync_lock_release_1:
526  case Builtin::BI__sync_lock_release_2:
527  case Builtin::BI__sync_lock_release_4:
528  case Builtin::BI__sync_lock_release_8:
529  case Builtin::BI__sync_lock_release_16: {
530    Value *Ptr = EmitScalarExpr(E->getArg(0));
531    const llvm::Type *ElTy =
532      cast<llvm::PointerType>(Ptr->getType())->getElementType();
533    Builder.CreateStore(llvm::Constant::getNullValue(ElTy), Ptr, true);
534    return RValue::get(0);
535  }
536
537  case Builtin::BI__sync_synchronize: {
538    Value *C[5];
539    C[0] = C[1] = C[2] = C[3] = llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 1);
540    C[4] = llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 0);
541    Builder.CreateCall(CGM.getIntrinsic(Intrinsic::memory_barrier), C, C + 5);
542    return RValue::get(0);
543  }
544
545    // Library functions with special handling.
546  case Builtin::BIsqrt:
547  case Builtin::BIsqrtf:
548  case Builtin::BIsqrtl: {
549    // Rewrite sqrt to intrinsic if allowed.
550    if (!FD->hasAttr<ConstAttr>())
551      break;
552    Value *Arg0 = EmitScalarExpr(E->getArg(0));
553    const llvm::Type *ArgType = Arg0->getType();
554    Value *F = CGM.getIntrinsic(Intrinsic::sqrt, &ArgType, 1);
555    return RValue::get(Builder.CreateCall(F, Arg0, "tmp"));
556  }
557
558  case Builtin::BIpow:
559  case Builtin::BIpowf:
560  case Builtin::BIpowl: {
561    // Rewrite sqrt to intrinsic if allowed.
562    if (!FD->hasAttr<ConstAttr>())
563      break;
564    Value *Base = EmitScalarExpr(E->getArg(0));
565    Value *Exponent = EmitScalarExpr(E->getArg(1));
566    const llvm::Type *ArgType = Base->getType();
567    Value *F = CGM.getIntrinsic(Intrinsic::pow, &ArgType, 1);
568    return RValue::get(Builder.CreateCall2(F, Base, Exponent, "tmp"));
569  }
570  }
571
572  // If this is an alias for a libm function (e.g. __builtin_sin) turn it into
573  // that function.
574  if (getContext().BuiltinInfo.isLibFunction(BuiltinID) ||
575      getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID))
576    return EmitCall(CGM.getBuiltinLibFunction(FD, BuiltinID),
577                    E->getCallee()->getType(), E->arg_begin(),
578                    E->arg_end());
579
580  // See if we have a target specific intrinsic.
581  const char *Name = getContext().BuiltinInfo.GetName(BuiltinID);
582  Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic;
583  if (const char *Prefix =
584      llvm::Triple::getArchTypePrefix(Target.getTriple().getArch()))
585    IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix, Name);
586
587  if (IntrinsicID != Intrinsic::not_intrinsic) {
588    SmallVector<Value*, 16> Args;
589
590    Function *F = CGM.getIntrinsic(IntrinsicID);
591    const llvm::FunctionType *FTy = F->getFunctionType();
592
593    for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
594      Value *ArgValue = EmitScalarExpr(E->getArg(i));
595
596      // If the intrinsic arg type is different from the builtin arg type
597      // we need to do a bit cast.
598      const llvm::Type *PTy = FTy->getParamType(i);
599      if (PTy != ArgValue->getType()) {
600        assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) &&
601               "Must be able to losslessly bit cast to param");
602        ArgValue = Builder.CreateBitCast(ArgValue, PTy);
603      }
604
605      Args.push_back(ArgValue);
606    }
607
608    Value *V = Builder.CreateCall(F, Args.data(), Args.data() + Args.size());
609    QualType BuiltinRetType = E->getType();
610
611    const llvm::Type *RetTy = llvm::Type::getVoidTy(VMContext);
612    if (!BuiltinRetType->isVoidType()) RetTy = ConvertType(BuiltinRetType);
613
614    if (RetTy != V->getType()) {
615      assert(V->getType()->canLosslesslyBitCastTo(RetTy) &&
616             "Must be able to losslessly bit cast result type");
617      V = Builder.CreateBitCast(V, RetTy);
618    }
619
620    return RValue::get(V);
621  }
622
623  // See if we have a target specific builtin that needs to be lowered.
624  if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E))
625    return RValue::get(V);
626
627  ErrorUnsupported(E, "builtin function");
628
629  // Unknown builtin, for now just dump it out and return undef.
630  if (hasAggregateLLVMType(E->getType()))
631    return RValue::getAggregate(CreateTempAlloca(ConvertType(E->getType())));
632  return RValue::get(llvm::UndefValue::get(ConvertType(E->getType())));
633}
634
635Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
636                                              const CallExpr *E) {
637  switch (Target.getTriple().getArch()) {
638  case llvm::Triple::x86:
639  case llvm::Triple::x86_64:
640    return EmitX86BuiltinExpr(BuiltinID, E);
641  case llvm::Triple::ppc:
642  case llvm::Triple::ppc64:
643    return EmitPPCBuiltinExpr(BuiltinID, E);
644  default:
645    return 0;
646  }
647}
648
649Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
650                                           const CallExpr *E) {
651
652  llvm::SmallVector<Value*, 4> Ops;
653
654  for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
655    Ops.push_back(EmitScalarExpr(E->getArg(i)));
656
657  switch (BuiltinID) {
658  default: return 0;
659  case X86::BI__builtin_ia32_pslldi128:
660  case X86::BI__builtin_ia32_psllqi128:
661  case X86::BI__builtin_ia32_psllwi128:
662  case X86::BI__builtin_ia32_psradi128:
663  case X86::BI__builtin_ia32_psrawi128:
664  case X86::BI__builtin_ia32_psrldi128:
665  case X86::BI__builtin_ia32_psrlqi128:
666  case X86::BI__builtin_ia32_psrlwi128: {
667    Ops[1] = Builder.CreateZExt(Ops[1], llvm::Type::getInt64Ty(VMContext), "zext");
668    const llvm::Type *Ty = llvm::VectorType::get(llvm::Type::getInt64Ty(VMContext), 2);
669    llvm::Value *Zero = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0);
670    Ops[1] = Builder.CreateInsertElement(llvm::UndefValue::get(Ty),
671                                         Ops[1], Zero, "insert");
672    Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType(), "bitcast");
673    const char *name = 0;
674    Intrinsic::ID ID = Intrinsic::not_intrinsic;
675
676    switch (BuiltinID) {
677    default: assert(0 && "Unsupported shift intrinsic!");
678    case X86::BI__builtin_ia32_pslldi128:
679      name = "pslldi";
680      ID = Intrinsic::x86_sse2_psll_d;
681      break;
682    case X86::BI__builtin_ia32_psllqi128:
683      name = "psllqi";
684      ID = Intrinsic::x86_sse2_psll_q;
685      break;
686    case X86::BI__builtin_ia32_psllwi128:
687      name = "psllwi";
688      ID = Intrinsic::x86_sse2_psll_w;
689      break;
690    case X86::BI__builtin_ia32_psradi128:
691      name = "psradi";
692      ID = Intrinsic::x86_sse2_psra_d;
693      break;
694    case X86::BI__builtin_ia32_psrawi128:
695      name = "psrawi";
696      ID = Intrinsic::x86_sse2_psra_w;
697      break;
698    case X86::BI__builtin_ia32_psrldi128:
699      name = "psrldi";
700      ID = Intrinsic::x86_sse2_psrl_d;
701      break;
702    case X86::BI__builtin_ia32_psrlqi128:
703      name = "psrlqi";
704      ID = Intrinsic::x86_sse2_psrl_q;
705      break;
706    case X86::BI__builtin_ia32_psrlwi128:
707      name = "psrlwi";
708      ID = Intrinsic::x86_sse2_psrl_w;
709      break;
710    }
711    llvm::Function *F = CGM.getIntrinsic(ID);
712    return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), name);
713  }
714  case X86::BI__builtin_ia32_pslldi:
715  case X86::BI__builtin_ia32_psllqi:
716  case X86::BI__builtin_ia32_psllwi:
717  case X86::BI__builtin_ia32_psradi:
718  case X86::BI__builtin_ia32_psrawi:
719  case X86::BI__builtin_ia32_psrldi:
720  case X86::BI__builtin_ia32_psrlqi:
721  case X86::BI__builtin_ia32_psrlwi: {
722    Ops[1] = Builder.CreateZExt(Ops[1], llvm::Type::getInt64Ty(VMContext), "zext");
723    const llvm::Type *Ty = llvm::VectorType::get(llvm::Type::getInt64Ty(VMContext), 1);
724    Ops[1] = Builder.CreateBitCast(Ops[1], Ty, "bitcast");
725    const char *name = 0;
726    Intrinsic::ID ID = Intrinsic::not_intrinsic;
727
728    switch (BuiltinID) {
729    default: assert(0 && "Unsupported shift intrinsic!");
730    case X86::BI__builtin_ia32_pslldi:
731      name = "pslldi";
732      ID = Intrinsic::x86_mmx_psll_d;
733      break;
734    case X86::BI__builtin_ia32_psllqi:
735      name = "psllqi";
736      ID = Intrinsic::x86_mmx_psll_q;
737      break;
738    case X86::BI__builtin_ia32_psllwi:
739      name = "psllwi";
740      ID = Intrinsic::x86_mmx_psll_w;
741      break;
742    case X86::BI__builtin_ia32_psradi:
743      name = "psradi";
744      ID = Intrinsic::x86_mmx_psra_d;
745      break;
746    case X86::BI__builtin_ia32_psrawi:
747      name = "psrawi";
748      ID = Intrinsic::x86_mmx_psra_w;
749      break;
750    case X86::BI__builtin_ia32_psrldi:
751      name = "psrldi";
752      ID = Intrinsic::x86_mmx_psrl_d;
753      break;
754    case X86::BI__builtin_ia32_psrlqi:
755      name = "psrlqi";
756      ID = Intrinsic::x86_mmx_psrl_q;
757      break;
758    case X86::BI__builtin_ia32_psrlwi:
759      name = "psrlwi";
760      ID = Intrinsic::x86_mmx_psrl_w;
761      break;
762    }
763    llvm::Function *F = CGM.getIntrinsic(ID);
764    return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), name);
765  }
766  case X86::BI__builtin_ia32_cmpps: {
767    llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse_cmp_ps);
768    return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpps");
769  }
770  case X86::BI__builtin_ia32_cmpss: {
771    llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse_cmp_ss);
772    return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpss");
773  }
774  case X86::BI__builtin_ia32_ldmxcsr: {
775    const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
776    Value *One = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1);
777    Value *Tmp = Builder.CreateAlloca(llvm::Type::getInt32Ty(VMContext), One, "tmp");
778    Builder.CreateStore(Ops[0], Tmp);
779    return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr),
780                              Builder.CreateBitCast(Tmp, PtrTy));
781  }
782  case X86::BI__builtin_ia32_stmxcsr: {
783    const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
784    Value *One = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1);
785    Value *Tmp = Builder.CreateAlloca(llvm::Type::getInt32Ty(VMContext), One, "tmp");
786    One = Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
787                             Builder.CreateBitCast(Tmp, PtrTy));
788    return Builder.CreateLoad(Tmp, "stmxcsr");
789  }
790  case X86::BI__builtin_ia32_cmppd: {
791    llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_cmp_pd);
792    return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmppd");
793  }
794  case X86::BI__builtin_ia32_cmpsd: {
795    llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_cmp_sd);
796    return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpsd");
797  }
798  case X86::BI__builtin_ia32_storehps:
799  case X86::BI__builtin_ia32_storelps: {
800    const llvm::Type *EltTy = llvm::Type::getInt64Ty(VMContext);
801    llvm::Type *PtrTy = llvm::PointerType::getUnqual(EltTy);
802    llvm::Type *VecTy = llvm::VectorType::get(EltTy, 2);
803
804    // cast val v2i64
805    Ops[1] = Builder.CreateBitCast(Ops[1], VecTy, "cast");
806
807    // extract (0, 1)
808    unsigned Index = BuiltinID == X86::BI__builtin_ia32_storelps ? 0 : 1;
809    llvm::Value *Idx = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Index);
810    Ops[1] = Builder.CreateExtractElement(Ops[1], Idx, "extract");
811
812    // cast pointer to i64 & store
813    Ops[0] = Builder.CreateBitCast(Ops[0], PtrTy);
814    return Builder.CreateStore(Ops[1], Ops[0]);
815  }
816  }
817}
818
819Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
820                                           const CallExpr *E) {
821  switch (BuiltinID) {
822  default: return 0;
823  }
824}
825