CGObjC.cpp revision 243830
159157Sache//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
259157Sache//
386799Sache//                     The LLVM Compiler Infrastructure
486799Sache//
586799Sache// This file is distributed under the University of Illinois Open Source
686799Sache// License. See LICENSE.TXT for details.
786799Sache//
886799Sache//===----------------------------------------------------------------------===//
959157Sache//
1059157Sache// This contains code to emit Objective-C code as LLVM code.
1159157Sache//
1259157Sache//===----------------------------------------------------------------------===//
1359157Sache
1459157Sache#include "CGDebugInfo.h"
1559157Sache#include "CGObjCRuntime.h"
1659157Sache#include "CodeGenFunction.h"
1759157Sache#include "CodeGenModule.h"
1859157Sache#include "TargetInfo.h"
1959157Sache#include "clang/AST/ASTContext.h"
2059157Sache#include "clang/AST/DeclObjC.h"
2159157Sache#include "clang/AST/StmtObjC.h"
2259157Sache#include "clang/Basic/Diagnostic.h"
2359157Sache#include "llvm/ADT/STLExtras.h"
2459157Sache#include "llvm/DataLayout.h"
2586799Sache#include "llvm/InlineAsm.h"
2659157Sacheusing namespace clang;
2759157Sacheusing namespace CodeGen;
2859157Sache
2959157Sachetypedef llvm::PointerIntPair<llvm::Value*,1,bool> TryEmitResult;
3086799Sachestatic TryEmitResult
3186799SachetryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e);
3259157Sachestatic RValue AdjustRelatedResultType(CodeGenFunction &CGF,
3359157Sache                                      QualType ET,
3459157Sache                                      const ObjCMethodDecl *Method,
3559157Sache                                      RValue Result);
3659157Sache
3759157Sache/// Given the address of a variable of pointer type, find the correct
3859157Sache/// null to store into it.
3959157Sachestatic llvm::Constant *getNullForVariable(llvm::Value *addr) {
4059157Sache  llvm::Type *type =
4159157Sache    cast<llvm::PointerType>(addr->getType())->getElementType();
4259157Sache  return llvm::ConstantPointerNull::get(cast<llvm::PointerType>(type));
4359157Sache}
4459157Sache
4559157Sache/// Emits an instance of NSConstantString representing the object.
4659157Sachellvm::Value *CodeGenFunction::EmitObjCStringLiteral(const ObjCStringLiteral *E)
4759157Sache{
4886799Sache  llvm::Constant *C =
4959157Sache      CGM.getObjCRuntime().GenerateConstantString(E->getString());
5059157Sache  // FIXME: This bitcast should just be made an invariant on the Runtime.
51  return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType()));
52}
53
54/// EmitObjCBoxedExpr - This routine generates code to call
55/// the appropriate expression boxing method. This will either be
56/// one of +[NSNumber numberWith<Type>:], or +[NSString stringWithUTF8String:].
57///
58llvm::Value *
59CodeGenFunction::EmitObjCBoxedExpr(const ObjCBoxedExpr *E) {
60  // Generate the correct selector for this literal's concrete type.
61  const Expr *SubExpr = E->getSubExpr();
62  // Get the method.
63  const ObjCMethodDecl *BoxingMethod = E->getBoxingMethod();
64  assert(BoxingMethod && "BoxingMethod is null");
65  assert(BoxingMethod->isClassMethod() && "BoxingMethod must be a class method");
66  Selector Sel = BoxingMethod->getSelector();
67
68  // Generate a reference to the class pointer, which will be the receiver.
69  // Assumes that the method was introduced in the class that should be
70  // messaged (avoids pulling it out of the result type).
71  CGObjCRuntime &Runtime = CGM.getObjCRuntime();
72  const ObjCInterfaceDecl *ClassDecl = BoxingMethod->getClassInterface();
73  llvm::Value *Receiver = Runtime.GetClass(Builder, ClassDecl);
74
75  const ParmVarDecl *argDecl = *BoxingMethod->param_begin();
76  QualType ArgQT = argDecl->getType().getUnqualifiedType();
77  RValue RV = EmitAnyExpr(SubExpr);
78  CallArgList Args;
79  Args.add(RV, ArgQT);
80
81  RValue result = Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
82                                              BoxingMethod->getResultType(), Sel, Receiver, Args,
83                                              ClassDecl, BoxingMethod);
84  return Builder.CreateBitCast(result.getScalarVal(),
85                               ConvertType(E->getType()));
86}
87
88llvm::Value *CodeGenFunction::EmitObjCCollectionLiteral(const Expr *E,
89                                    const ObjCMethodDecl *MethodWithObjects) {
90  ASTContext &Context = CGM.getContext();
91  const ObjCDictionaryLiteral *DLE = 0;
92  const ObjCArrayLiteral *ALE = dyn_cast<ObjCArrayLiteral>(E);
93  if (!ALE)
94    DLE = cast<ObjCDictionaryLiteral>(E);
95
96  // Compute the type of the array we're initializing.
97  uint64_t NumElements =
98    ALE ? ALE->getNumElements() : DLE->getNumElements();
99  llvm::APInt APNumElements(Context.getTypeSize(Context.getSizeType()),
100                            NumElements);
101  QualType ElementType = Context.getObjCIdType().withConst();
102  QualType ElementArrayType
103    = Context.getConstantArrayType(ElementType, APNumElements,
104                                   ArrayType::Normal, /*IndexTypeQuals=*/0);
105
106  // Allocate the temporary array(s).
107  llvm::Value *Objects = CreateMemTemp(ElementArrayType, "objects");
108  llvm::Value *Keys = 0;
109  if (DLE)
110    Keys = CreateMemTemp(ElementArrayType, "keys");
111
112  // Perform the actual initialialization of the array(s).
113  for (uint64_t i = 0; i < NumElements; i++) {
114    if (ALE) {
115      // Emit the initializer.
116      const Expr *Rhs = ALE->getElement(i);
117      LValue LV = LValue::MakeAddr(Builder.CreateStructGEP(Objects, i),
118                                   ElementType,
119                                   Context.getTypeAlignInChars(Rhs->getType()),
120                                   Context);
121      EmitScalarInit(Rhs, /*D=*/0, LV, /*capturedByInit=*/false);
122    } else {
123      // Emit the key initializer.
124      const Expr *Key = DLE->getKeyValueElement(i).Key;
125      LValue KeyLV = LValue::MakeAddr(Builder.CreateStructGEP(Keys, i),
126                                      ElementType,
127                                    Context.getTypeAlignInChars(Key->getType()),
128                                      Context);
129      EmitScalarInit(Key, /*D=*/0, KeyLV, /*capturedByInit=*/false);
130
131      // Emit the value initializer.
132      const Expr *Value = DLE->getKeyValueElement(i).Value;
133      LValue ValueLV = LValue::MakeAddr(Builder.CreateStructGEP(Objects, i),
134                                        ElementType,
135                                  Context.getTypeAlignInChars(Value->getType()),
136                                        Context);
137      EmitScalarInit(Value, /*D=*/0, ValueLV, /*capturedByInit=*/false);
138    }
139  }
140
141  // Generate the argument list.
142  CallArgList Args;
143  ObjCMethodDecl::param_const_iterator PI = MethodWithObjects->param_begin();
144  const ParmVarDecl *argDecl = *PI++;
145  QualType ArgQT = argDecl->getType().getUnqualifiedType();
146  Args.add(RValue::get(Objects), ArgQT);
147  if (DLE) {
148    argDecl = *PI++;
149    ArgQT = argDecl->getType().getUnqualifiedType();
150    Args.add(RValue::get(Keys), ArgQT);
151  }
152  argDecl = *PI;
153  ArgQT = argDecl->getType().getUnqualifiedType();
154  llvm::Value *Count =
155    llvm::ConstantInt::get(CGM.getTypes().ConvertType(ArgQT), NumElements);
156  Args.add(RValue::get(Count), ArgQT);
157
158  // Generate a reference to the class pointer, which will be the receiver.
159  Selector Sel = MethodWithObjects->getSelector();
160  QualType ResultType = E->getType();
161  const ObjCObjectPointerType *InterfacePointerType
162    = ResultType->getAsObjCInterfacePointerType();
163  ObjCInterfaceDecl *Class
164    = InterfacePointerType->getObjectType()->getInterface();
165  CGObjCRuntime &Runtime = CGM.getObjCRuntime();
166  llvm::Value *Receiver = Runtime.GetClass(Builder, Class);
167
168  // Generate the message send.
169  RValue result
170    = Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
171                                  MethodWithObjects->getResultType(),
172                                  Sel,
173                                  Receiver, Args, Class,
174                                  MethodWithObjects);
175  return Builder.CreateBitCast(result.getScalarVal(),
176                               ConvertType(E->getType()));
177}
178
179llvm::Value *CodeGenFunction::EmitObjCArrayLiteral(const ObjCArrayLiteral *E) {
180  return EmitObjCCollectionLiteral(E, E->getArrayWithObjectsMethod());
181}
182
183llvm::Value *CodeGenFunction::EmitObjCDictionaryLiteral(
184                                            const ObjCDictionaryLiteral *E) {
185  return EmitObjCCollectionLiteral(E, E->getDictWithObjectsMethod());
186}
187
188/// Emit a selector.
189llvm::Value *CodeGenFunction::EmitObjCSelectorExpr(const ObjCSelectorExpr *E) {
190  // Untyped selector.
191  // Note that this implementation allows for non-constant strings to be passed
192  // as arguments to @selector().  Currently, the only thing preventing this
193  // behaviour is the type checking in the front end.
194  return CGM.getObjCRuntime().GetSelector(Builder, E->getSelector());
195}
196
197llvm::Value *CodeGenFunction::EmitObjCProtocolExpr(const ObjCProtocolExpr *E) {
198  // FIXME: This should pass the Decl not the name.
199  return CGM.getObjCRuntime().GenerateProtocolRef(Builder, E->getProtocol());
200}
201
202/// \brief Adjust the type of the result of an Objective-C message send
203/// expression when the method has a related result type.
204static RValue AdjustRelatedResultType(CodeGenFunction &CGF,
205                                      QualType ExpT,
206                                      const ObjCMethodDecl *Method,
207                                      RValue Result) {
208  if (!Method)
209    return Result;
210
211  if (!Method->hasRelatedResultType() ||
212      CGF.getContext().hasSameType(ExpT, Method->getResultType()) ||
213      !Result.isScalar())
214    return Result;
215
216  // We have applied a related result type. Cast the rvalue appropriately.
217  return RValue::get(CGF.Builder.CreateBitCast(Result.getScalarVal(),
218                                               CGF.ConvertType(ExpT)));
219}
220
221/// Decide whether to extend the lifetime of the receiver of a
222/// returns-inner-pointer message.
223static bool
224shouldExtendReceiverForInnerPointerMessage(const ObjCMessageExpr *message) {
225  switch (message->getReceiverKind()) {
226
227  // For a normal instance message, we should extend unless the
228  // receiver is loaded from a variable with precise lifetime.
229  case ObjCMessageExpr::Instance: {
230    const Expr *receiver = message->getInstanceReceiver();
231    const ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(receiver);
232    if (!ice || ice->getCastKind() != CK_LValueToRValue) return true;
233    receiver = ice->getSubExpr()->IgnoreParens();
234
235    // Only __strong variables.
236    if (receiver->getType().getObjCLifetime() != Qualifiers::OCL_Strong)
237      return true;
238
239    // All ivars and fields have precise lifetime.
240    if (isa<MemberExpr>(receiver) || isa<ObjCIvarRefExpr>(receiver))
241      return false;
242
243    // Otherwise, check for variables.
244    const DeclRefExpr *declRef = dyn_cast<DeclRefExpr>(ice->getSubExpr());
245    if (!declRef) return true;
246    const VarDecl *var = dyn_cast<VarDecl>(declRef->getDecl());
247    if (!var) return true;
248
249    // All variables have precise lifetime except local variables with
250    // automatic storage duration that aren't specially marked.
251    return (var->hasLocalStorage() &&
252            !var->hasAttr<ObjCPreciseLifetimeAttr>());
253  }
254
255  case ObjCMessageExpr::Class:
256  case ObjCMessageExpr::SuperClass:
257    // It's never necessary for class objects.
258    return false;
259
260  case ObjCMessageExpr::SuperInstance:
261    // We generally assume that 'self' lives throughout a method call.
262    return false;
263  }
264
265  llvm_unreachable("invalid receiver kind");
266}
267
268RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
269                                            ReturnValueSlot Return) {
270  // Only the lookup mechanism and first two arguments of the method
271  // implementation vary between runtimes.  We can get the receiver and
272  // arguments in generic code.
273
274  bool isDelegateInit = E->isDelegateInitCall();
275
276  const ObjCMethodDecl *method = E->getMethodDecl();
277
278  // We don't retain the receiver in delegate init calls, and this is
279  // safe because the receiver value is always loaded from 'self',
280  // which we zero out.  We don't want to Block_copy block receivers,
281  // though.
282  bool retainSelf =
283    (!isDelegateInit &&
284     CGM.getLangOpts().ObjCAutoRefCount &&
285     method &&
286     method->hasAttr<NSConsumesSelfAttr>());
287
288  CGObjCRuntime &Runtime = CGM.getObjCRuntime();
289  bool isSuperMessage = false;
290  bool isClassMessage = false;
291  ObjCInterfaceDecl *OID = 0;
292  // Find the receiver
293  QualType ReceiverType;
294  llvm::Value *Receiver = 0;
295  switch (E->getReceiverKind()) {
296  case ObjCMessageExpr::Instance:
297    ReceiverType = E->getInstanceReceiver()->getType();
298    if (retainSelf) {
299      TryEmitResult ter = tryEmitARCRetainScalarExpr(*this,
300                                                   E->getInstanceReceiver());
301      Receiver = ter.getPointer();
302      if (ter.getInt()) retainSelf = false;
303    } else
304      Receiver = EmitScalarExpr(E->getInstanceReceiver());
305    break;
306
307  case ObjCMessageExpr::Class: {
308    ReceiverType = E->getClassReceiver();
309    const ObjCObjectType *ObjTy = ReceiverType->getAs<ObjCObjectType>();
310    assert(ObjTy && "Invalid Objective-C class message send");
311    OID = ObjTy->getInterface();
312    assert(OID && "Invalid Objective-C class message send");
313    Receiver = Runtime.GetClass(Builder, OID);
314    isClassMessage = true;
315    break;
316  }
317
318  case ObjCMessageExpr::SuperInstance:
319    ReceiverType = E->getSuperType();
320    Receiver = LoadObjCSelf();
321    isSuperMessage = true;
322    break;
323
324  case ObjCMessageExpr::SuperClass:
325    ReceiverType = E->getSuperType();
326    Receiver = LoadObjCSelf();
327    isSuperMessage = true;
328    isClassMessage = true;
329    break;
330  }
331
332  if (retainSelf)
333    Receiver = EmitARCRetainNonBlock(Receiver);
334
335  // In ARC, we sometimes want to "extend the lifetime"
336  // (i.e. retain+autorelease) of receivers of returns-inner-pointer
337  // messages.
338  if (getLangOpts().ObjCAutoRefCount && method &&
339      method->hasAttr<ObjCReturnsInnerPointerAttr>() &&
340      shouldExtendReceiverForInnerPointerMessage(E))
341    Receiver = EmitARCRetainAutorelease(ReceiverType, Receiver);
342
343  QualType ResultType =
344    method ? method->getResultType() : E->getType();
345
346  CallArgList Args;
347  EmitCallArgs(Args, method, E->arg_begin(), E->arg_end());
348
349  // For delegate init calls in ARC, do an unsafe store of null into
350  // self.  This represents the call taking direct ownership of that
351  // value.  We have to do this after emitting the other call
352  // arguments because they might also reference self, but we don't
353  // have to worry about any of them modifying self because that would
354  // be an undefined read and write of an object in unordered
355  // expressions.
356  if (isDelegateInit) {
357    assert(getLangOpts().ObjCAutoRefCount &&
358           "delegate init calls should only be marked in ARC");
359
360    // Do an unsafe store of null into self.
361    llvm::Value *selfAddr =
362      LocalDeclMap[cast<ObjCMethodDecl>(CurCodeDecl)->getSelfDecl()];
363    assert(selfAddr && "no self entry for a delegate init call?");
364
365    Builder.CreateStore(getNullForVariable(selfAddr), selfAddr);
366  }
367
368  RValue result;
369  if (isSuperMessage) {
370    // super is only valid in an Objective-C method
371    const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
372    bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext());
373    result = Runtime.GenerateMessageSendSuper(*this, Return, ResultType,
374                                              E->getSelector(),
375                                              OMD->getClassInterface(),
376                                              isCategoryImpl,
377                                              Receiver,
378                                              isClassMessage,
379                                              Args,
380                                              method);
381  } else {
382    result = Runtime.GenerateMessageSend(*this, Return, ResultType,
383                                         E->getSelector(),
384                                         Receiver, Args, OID,
385                                         method);
386  }
387
388  // For delegate init calls in ARC, implicitly store the result of
389  // the call back into self.  This takes ownership of the value.
390  if (isDelegateInit) {
391    llvm::Value *selfAddr =
392      LocalDeclMap[cast<ObjCMethodDecl>(CurCodeDecl)->getSelfDecl()];
393    llvm::Value *newSelf = result.getScalarVal();
394
395    // The delegate return type isn't necessarily a matching type; in
396    // fact, it's quite likely to be 'id'.
397    llvm::Type *selfTy =
398      cast<llvm::PointerType>(selfAddr->getType())->getElementType();
399    newSelf = Builder.CreateBitCast(newSelf, selfTy);
400
401    Builder.CreateStore(newSelf, selfAddr);
402  }
403
404  return AdjustRelatedResultType(*this, E->getType(), method, result);
405}
406
407namespace {
408struct FinishARCDealloc : EHScopeStack::Cleanup {
409  void Emit(CodeGenFunction &CGF, Flags flags) {
410    const ObjCMethodDecl *method = cast<ObjCMethodDecl>(CGF.CurCodeDecl);
411
412    const ObjCImplDecl *impl = cast<ObjCImplDecl>(method->getDeclContext());
413    const ObjCInterfaceDecl *iface = impl->getClassInterface();
414    if (!iface->getSuperClass()) return;
415
416    bool isCategory = isa<ObjCCategoryImplDecl>(impl);
417
418    // Call [super dealloc] if we have a superclass.
419    llvm::Value *self = CGF.LoadObjCSelf();
420
421    CallArgList args;
422    CGF.CGM.getObjCRuntime().GenerateMessageSendSuper(CGF, ReturnValueSlot(),
423                                                      CGF.getContext().VoidTy,
424                                                      method->getSelector(),
425                                                      iface,
426                                                      isCategory,
427                                                      self,
428                                                      /*is class msg*/ false,
429                                                      args,
430                                                      method);
431  }
432};
433}
434
435/// StartObjCMethod - Begin emission of an ObjCMethod. This generates
436/// the LLVM function and sets the other context used by
437/// CodeGenFunction.
438void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD,
439                                      const ObjCContainerDecl *CD,
440                                      SourceLocation StartLoc) {
441  FunctionArgList args;
442  // Check if we should generate debug info for this method.
443  if (!OMD->hasAttr<NoDebugAttr>())
444    maybeInitializeDebugInfo();
445
446  llvm::Function *Fn = CGM.getObjCRuntime().GenerateMethod(OMD, CD);
447
448  const CGFunctionInfo &FI = CGM.getTypes().arrangeObjCMethodDeclaration(OMD);
449  CGM.SetInternalFunctionAttributes(OMD, Fn, FI);
450
451  args.push_back(OMD->getSelfDecl());
452  args.push_back(OMD->getCmdDecl());
453
454  for (ObjCMethodDecl::param_const_iterator PI = OMD->param_begin(),
455         E = OMD->param_end(); PI != E; ++PI)
456    args.push_back(*PI);
457
458  CurGD = OMD;
459
460  StartFunction(OMD, OMD->getResultType(), Fn, FI, args, StartLoc);
461
462  // In ARC, certain methods get an extra cleanup.
463  if (CGM.getLangOpts().ObjCAutoRefCount &&
464      OMD->isInstanceMethod() &&
465      OMD->getSelector().isUnarySelector()) {
466    const IdentifierInfo *ident =
467      OMD->getSelector().getIdentifierInfoForSlot(0);
468    if (ident->isStr("dealloc"))
469      EHStack.pushCleanup<FinishARCDealloc>(getARCCleanupKind());
470  }
471}
472
473static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF,
474                                              LValue lvalue, QualType type);
475
476/// Generate an Objective-C method.  An Objective-C method is a C function with
477/// its pointer, name, and types registered in the class struture.
478void CodeGenFunction::GenerateObjCMethod(const ObjCMethodDecl *OMD) {
479  StartObjCMethod(OMD, OMD->getClassInterface(), OMD->getLocStart());
480  EmitStmt(OMD->getBody());
481  FinishFunction(OMD->getBodyRBrace());
482}
483
484/// emitStructGetterCall - Call the runtime function to load a property
485/// into the return value slot.
486static void emitStructGetterCall(CodeGenFunction &CGF, ObjCIvarDecl *ivar,
487                                 bool isAtomic, bool hasStrong) {
488  ASTContext &Context = CGF.getContext();
489
490  llvm::Value *src =
491    CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), CGF.LoadObjCSelf(),
492                          ivar, 0).getAddress();
493
494  // objc_copyStruct (ReturnValue, &structIvar,
495  //                  sizeof (Type of Ivar), isAtomic, false);
496  CallArgList args;
497
498  llvm::Value *dest = CGF.Builder.CreateBitCast(CGF.ReturnValue, CGF.VoidPtrTy);
499  args.add(RValue::get(dest), Context.VoidPtrTy);
500
501  src = CGF.Builder.CreateBitCast(src, CGF.VoidPtrTy);
502  args.add(RValue::get(src), Context.VoidPtrTy);
503
504  CharUnits size = CGF.getContext().getTypeSizeInChars(ivar->getType());
505  args.add(RValue::get(CGF.CGM.getSize(size)), Context.getSizeType());
506  args.add(RValue::get(CGF.Builder.getInt1(isAtomic)), Context.BoolTy);
507  args.add(RValue::get(CGF.Builder.getInt1(hasStrong)), Context.BoolTy);
508
509  llvm::Value *fn = CGF.CGM.getObjCRuntime().GetGetStructFunction();
510  CGF.EmitCall(CGF.getTypes().arrangeFreeFunctionCall(Context.VoidTy, args,
511                                                      FunctionType::ExtInfo(),
512                                                      RequiredArgs::All),
513               fn, ReturnValueSlot(), args);
514}
515
516/// Determine whether the given architecture supports unaligned atomic
517/// accesses.  They don't have to be fast, just faster than a function
518/// call and a mutex.
519static bool hasUnalignedAtomics(llvm::Triple::ArchType arch) {
520  // FIXME: Allow unaligned atomic load/store on x86.  (It is not
521  // currently supported by the backend.)
522  return 0;
523}
524
525/// Return the maximum size that permits atomic accesses for the given
526/// architecture.
527static CharUnits getMaxAtomicAccessSize(CodeGenModule &CGM,
528                                        llvm::Triple::ArchType arch) {
529  // ARM has 8-byte atomic accesses, but it's not clear whether we
530  // want to rely on them here.
531
532  // In the default case, just assume that any size up to a pointer is
533  // fine given adequate alignment.
534  return CharUnits::fromQuantity(CGM.PointerSizeInBytes);
535}
536
537namespace {
538  class PropertyImplStrategy {
539  public:
540    enum StrategyKind {
541      /// The 'native' strategy is to use the architecture's provided
542      /// reads and writes.
543      Native,
544
545      /// Use objc_setProperty and objc_getProperty.
546      GetSetProperty,
547
548      /// Use objc_setProperty for the setter, but use expression
549      /// evaluation for the getter.
550      SetPropertyAndExpressionGet,
551
552      /// Use objc_copyStruct.
553      CopyStruct,
554
555      /// The 'expression' strategy is to emit normal assignment or
556      /// lvalue-to-rvalue expressions.
557      Expression
558    };
559
560    StrategyKind getKind() const { return StrategyKind(Kind); }
561
562    bool hasStrongMember() const { return HasStrong; }
563    bool isAtomic() const { return IsAtomic; }
564    bool isCopy() const { return IsCopy; }
565
566    CharUnits getIvarSize() const { return IvarSize; }
567    CharUnits getIvarAlignment() const { return IvarAlignment; }
568
569    PropertyImplStrategy(CodeGenModule &CGM,
570                         const ObjCPropertyImplDecl *propImpl);
571
572  private:
573    unsigned Kind : 8;
574    unsigned IsAtomic : 1;
575    unsigned IsCopy : 1;
576    unsigned HasStrong : 1;
577
578    CharUnits IvarSize;
579    CharUnits IvarAlignment;
580  };
581}
582
583/// Pick an implementation strategy for the given property synthesis.
584PropertyImplStrategy::PropertyImplStrategy(CodeGenModule &CGM,
585                                     const ObjCPropertyImplDecl *propImpl) {
586  const ObjCPropertyDecl *prop = propImpl->getPropertyDecl();
587  ObjCPropertyDecl::SetterKind setterKind = prop->getSetterKind();
588
589  IsCopy = (setterKind == ObjCPropertyDecl::Copy);
590  IsAtomic = prop->isAtomic();
591  HasStrong = false; // doesn't matter here.
592
593  // Evaluate the ivar's size and alignment.
594  ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
595  QualType ivarType = ivar->getType();
596  llvm::tie(IvarSize, IvarAlignment)
597    = CGM.getContext().getTypeInfoInChars(ivarType);
598
599  // If we have a copy property, we always have to use getProperty/setProperty.
600  // TODO: we could actually use setProperty and an expression for non-atomics.
601  if (IsCopy) {
602    Kind = GetSetProperty;
603    return;
604  }
605
606  // Handle retain.
607  if (setterKind == ObjCPropertyDecl::Retain) {
608    // In GC-only, there's nothing special that needs to be done.
609    if (CGM.getLangOpts().getGC() == LangOptions::GCOnly) {
610      // fallthrough
611
612    // In ARC, if the property is non-atomic, use expression emission,
613    // which translates to objc_storeStrong.  This isn't required, but
614    // it's slightly nicer.
615    } else if (CGM.getLangOpts().ObjCAutoRefCount && !IsAtomic) {
616      // Using standard expression emission for the setter is only
617      // acceptable if the ivar is __strong, which won't be true if
618      // the property is annotated with __attribute__((NSObject)).
619      // TODO: falling all the way back to objc_setProperty here is
620      // just laziness, though;  we could still use objc_storeStrong
621      // if we hacked it right.
622      if (ivarType.getObjCLifetime() == Qualifiers::OCL_Strong)
623        Kind = Expression;
624      else
625        Kind = SetPropertyAndExpressionGet;
626      return;
627
628    // Otherwise, we need to at least use setProperty.  However, if
629    // the property isn't atomic, we can use normal expression
630    // emission for the getter.
631    } else if (!IsAtomic) {
632      Kind = SetPropertyAndExpressionGet;
633      return;
634
635    // Otherwise, we have to use both setProperty and getProperty.
636    } else {
637      Kind = GetSetProperty;
638      return;
639    }
640  }
641
642  // If we're not atomic, just use expression accesses.
643  if (!IsAtomic) {
644    Kind = Expression;
645    return;
646  }
647
648  // Properties on bitfield ivars need to be emitted using expression
649  // accesses even if they're nominally atomic.
650  if (ivar->isBitField()) {
651    Kind = Expression;
652    return;
653  }
654
655  // GC-qualified or ARC-qualified ivars need to be emitted as
656  // expressions.  This actually works out to being atomic anyway,
657  // except for ARC __strong, but that should trigger the above code.
658  if (ivarType.hasNonTrivialObjCLifetime() ||
659      (CGM.getLangOpts().getGC() &&
660       CGM.getContext().getObjCGCAttrKind(ivarType))) {
661    Kind = Expression;
662    return;
663  }
664
665  // Compute whether the ivar has strong members.
666  if (CGM.getLangOpts().getGC())
667    if (const RecordType *recordType = ivarType->getAs<RecordType>())
668      HasStrong = recordType->getDecl()->hasObjectMember();
669
670  // We can never access structs with object members with a native
671  // access, because we need to use write barriers.  This is what
672  // objc_copyStruct is for.
673  if (HasStrong) {
674    Kind = CopyStruct;
675    return;
676  }
677
678  // Otherwise, this is target-dependent and based on the size and
679  // alignment of the ivar.
680
681  // If the size of the ivar is not a power of two, give up.  We don't
682  // want to get into the business of doing compare-and-swaps.
683  if (!IvarSize.isPowerOfTwo()) {
684    Kind = CopyStruct;
685    return;
686  }
687
688  llvm::Triple::ArchType arch =
689    CGM.getContext().getTargetInfo().getTriple().getArch();
690
691  // Most architectures require memory to fit within a single cache
692  // line, so the alignment has to be at least the size of the access.
693  // Otherwise we have to grab a lock.
694  if (IvarAlignment < IvarSize && !hasUnalignedAtomics(arch)) {
695    Kind = CopyStruct;
696    return;
697  }
698
699  // If the ivar's size exceeds the architecture's maximum atomic
700  // access size, we have to use CopyStruct.
701  if (IvarSize > getMaxAtomicAccessSize(CGM, arch)) {
702    Kind = CopyStruct;
703    return;
704  }
705
706  // Otherwise, we can use native loads and stores.
707  Kind = Native;
708}
709
710/// \brief Generate an Objective-C property getter function.
711///
712/// The given Decl must be an ObjCImplementationDecl. \@synthesize
713/// is illegal within a category.
714void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP,
715                                         const ObjCPropertyImplDecl *PID) {
716  llvm::Constant *AtomicHelperFn =
717    GenerateObjCAtomicGetterCopyHelperFunction(PID);
718  const ObjCPropertyDecl *PD = PID->getPropertyDecl();
719  ObjCMethodDecl *OMD = PD->getGetterMethodDecl();
720  assert(OMD && "Invalid call to generate getter (empty method)");
721  StartObjCMethod(OMD, IMP->getClassInterface(), OMD->getLocStart());
722
723  generateObjCGetterBody(IMP, PID, OMD, AtomicHelperFn);
724
725  FinishFunction();
726}
727
728static bool hasTrivialGetExpr(const ObjCPropertyImplDecl *propImpl) {
729  const Expr *getter = propImpl->getGetterCXXConstructor();
730  if (!getter) return true;
731
732  // Sema only makes only of these when the ivar has a C++ class type,
733  // so the form is pretty constrained.
734
735  // If the property has a reference type, we might just be binding a
736  // reference, in which case the result will be a gl-value.  We should
737  // treat this as a non-trivial operation.
738  if (getter->isGLValue())
739    return false;
740
741  // If we selected a trivial copy-constructor, we're okay.
742  if (const CXXConstructExpr *construct = dyn_cast<CXXConstructExpr>(getter))
743    return (construct->getConstructor()->isTrivial());
744
745  // The constructor might require cleanups (in which case it's never
746  // trivial).
747  assert(isa<ExprWithCleanups>(getter));
748  return false;
749}
750
751/// emitCPPObjectAtomicGetterCall - Call the runtime function to
752/// copy the ivar into the resturn slot.
753static void emitCPPObjectAtomicGetterCall(CodeGenFunction &CGF,
754                                          llvm::Value *returnAddr,
755                                          ObjCIvarDecl *ivar,
756                                          llvm::Constant *AtomicHelperFn) {
757  // objc_copyCppObjectAtomic (&returnSlot, &CppObjectIvar,
758  //                           AtomicHelperFn);
759  CallArgList args;
760
761  // The 1st argument is the return Slot.
762  args.add(RValue::get(returnAddr), CGF.getContext().VoidPtrTy);
763
764  // The 2nd argument is the address of the ivar.
765  llvm::Value *ivarAddr =
766  CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(),
767                        CGF.LoadObjCSelf(), ivar, 0).getAddress();
768  ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy);
769  args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy);
770
771  // Third argument is the helper function.
772  args.add(RValue::get(AtomicHelperFn), CGF.getContext().VoidPtrTy);
773
774  llvm::Value *copyCppAtomicObjectFn =
775  CGF.CGM.getObjCRuntime().GetCppAtomicObjectFunction();
776  CGF.EmitCall(CGF.getTypes().arrangeFreeFunctionCall(CGF.getContext().VoidTy,
777                                                      args,
778                                                      FunctionType::ExtInfo(),
779                                                      RequiredArgs::All),
780               copyCppAtomicObjectFn, ReturnValueSlot(), args);
781}
782
783void
784CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
785                                        const ObjCPropertyImplDecl *propImpl,
786                                        const ObjCMethodDecl *GetterMethodDecl,
787                                        llvm::Constant *AtomicHelperFn) {
788  // If there's a non-trivial 'get' expression, we just have to emit that.
789  if (!hasTrivialGetExpr(propImpl)) {
790    if (!AtomicHelperFn) {
791      ReturnStmt ret(SourceLocation(), propImpl->getGetterCXXConstructor(),
792                     /*nrvo*/ 0);
793      EmitReturnStmt(ret);
794    }
795    else {
796      ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
797      emitCPPObjectAtomicGetterCall(*this, ReturnValue,
798                                    ivar, AtomicHelperFn);
799    }
800    return;
801  }
802
803  const ObjCPropertyDecl *prop = propImpl->getPropertyDecl();
804  QualType propType = prop->getType();
805  ObjCMethodDecl *getterMethod = prop->getGetterMethodDecl();
806
807  ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
808
809  // Pick an implementation strategy.
810  PropertyImplStrategy strategy(CGM, propImpl);
811  switch (strategy.getKind()) {
812  case PropertyImplStrategy::Native: {
813    // We don't need to do anything for a zero-size struct.
814    if (strategy.getIvarSize().isZero())
815      return;
816
817    LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, 0);
818
819    // Currently, all atomic accesses have to be through integer
820    // types, so there's no point in trying to pick a prettier type.
821    llvm::Type *bitcastType =
822      llvm::Type::getIntNTy(getLLVMContext(),
823                            getContext().toBits(strategy.getIvarSize()));
824    bitcastType = bitcastType->getPointerTo(); // addrspace 0 okay
825
826    // Perform an atomic load.  This does not impose ordering constraints.
827    llvm::Value *ivarAddr = LV.getAddress();
828    ivarAddr = Builder.CreateBitCast(ivarAddr, bitcastType);
829    llvm::LoadInst *load = Builder.CreateLoad(ivarAddr, "load");
830    load->setAlignment(strategy.getIvarAlignment().getQuantity());
831    load->setAtomic(llvm::Unordered);
832
833    // Store that value into the return address.  Doing this with a
834    // bitcast is likely to produce some pretty ugly IR, but it's not
835    // the *most* terrible thing in the world.
836    Builder.CreateStore(load, Builder.CreateBitCast(ReturnValue, bitcastType));
837
838    // Make sure we don't do an autorelease.
839    AutoreleaseResult = false;
840    return;
841  }
842
843  case PropertyImplStrategy::GetSetProperty: {
844    llvm::Value *getPropertyFn =
845      CGM.getObjCRuntime().GetPropertyGetFunction();
846    if (!getPropertyFn) {
847      CGM.ErrorUnsupported(propImpl, "Obj-C getter requiring atomic copy");
848      return;
849    }
850
851    // Return (ivar-type) objc_getProperty((id) self, _cmd, offset, true).
852    // FIXME: Can't this be simpler? This might even be worse than the
853    // corresponding gcc code.
854    llvm::Value *cmd =
855      Builder.CreateLoad(LocalDeclMap[getterMethod->getCmdDecl()], "cmd");
856    llvm::Value *self = Builder.CreateBitCast(LoadObjCSelf(), VoidPtrTy);
857    llvm::Value *ivarOffset =
858      EmitIvarOffset(classImpl->getClassInterface(), ivar);
859
860    CallArgList args;
861    args.add(RValue::get(self), getContext().getObjCIdType());
862    args.add(RValue::get(cmd), getContext().getObjCSelType());
863    args.add(RValue::get(ivarOffset), getContext().getPointerDiffType());
864    args.add(RValue::get(Builder.getInt1(strategy.isAtomic())),
865             getContext().BoolTy);
866
867    // FIXME: We shouldn't need to get the function info here, the
868    // runtime already should have computed it to build the function.
869    RValue RV = EmitCall(getTypes().arrangeFreeFunctionCall(propType, args,
870                                                       FunctionType::ExtInfo(),
871                                                            RequiredArgs::All),
872                         getPropertyFn, ReturnValueSlot(), args);
873
874    // We need to fix the type here. Ivars with copy & retain are
875    // always objects so we don't need to worry about complex or
876    // aggregates.
877    RV = RValue::get(Builder.CreateBitCast(RV.getScalarVal(),
878           getTypes().ConvertType(getterMethod->getResultType())));
879
880    EmitReturnOfRValue(RV, propType);
881
882    // objc_getProperty does an autorelease, so we should suppress ours.
883    AutoreleaseResult = false;
884
885    return;
886  }
887
888  case PropertyImplStrategy::CopyStruct:
889    emitStructGetterCall(*this, ivar, strategy.isAtomic(),
890                         strategy.hasStrongMember());
891    return;
892
893  case PropertyImplStrategy::Expression:
894  case PropertyImplStrategy::SetPropertyAndExpressionGet: {
895    LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, 0);
896
897    QualType ivarType = ivar->getType();
898    if (ivarType->isAnyComplexType()) {
899      ComplexPairTy pair = LoadComplexFromAddr(LV.getAddress(),
900                                               LV.isVolatileQualified());
901      StoreComplexToAddr(pair, ReturnValue, LV.isVolatileQualified());
902    } else if (hasAggregateLLVMType(ivarType)) {
903      // The return value slot is guaranteed to not be aliased, but
904      // that's not necessarily the same as "on the stack", so
905      // we still potentially need objc_memmove_collectable.
906      EmitAggregateCopy(ReturnValue, LV.getAddress(), ivarType);
907    } else {
908      llvm::Value *value;
909      if (propType->isReferenceType()) {
910        value = LV.getAddress();
911      } else {
912        // We want to load and autoreleaseReturnValue ARC __weak ivars.
913        if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) {
914          value = emitARCRetainLoadOfScalar(*this, LV, ivarType);
915
916        // Otherwise we want to do a simple load, suppressing the
917        // final autorelease.
918        } else {
919          value = EmitLoadOfLValue(LV).getScalarVal();
920          AutoreleaseResult = false;
921        }
922
923        value = Builder.CreateBitCast(value, ConvertType(propType));
924        value = Builder.CreateBitCast(value,
925                  ConvertType(GetterMethodDecl->getResultType()));
926      }
927
928      EmitReturnOfRValue(RValue::get(value), propType);
929    }
930    return;
931  }
932
933  }
934  llvm_unreachable("bad @property implementation strategy!");
935}
936
937/// emitStructSetterCall - Call the runtime function to store the value
938/// from the first formal parameter into the given ivar.
939static void emitStructSetterCall(CodeGenFunction &CGF, ObjCMethodDecl *OMD,
940                                 ObjCIvarDecl *ivar) {
941  // objc_copyStruct (&structIvar, &Arg,
942  //                  sizeof (struct something), true, false);
943  CallArgList args;
944
945  // The first argument is the address of the ivar.
946  llvm::Value *ivarAddr = CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(),
947                                                CGF.LoadObjCSelf(), ivar, 0)
948    .getAddress();
949  ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy);
950  args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy);
951
952  // The second argument is the address of the parameter variable.
953  ParmVarDecl *argVar = *OMD->param_begin();
954  DeclRefExpr argRef(argVar, false, argVar->getType().getNonReferenceType(),
955                     VK_LValue, SourceLocation());
956  llvm::Value *argAddr = CGF.EmitLValue(&argRef).getAddress();
957  argAddr = CGF.Builder.CreateBitCast(argAddr, CGF.Int8PtrTy);
958  args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy);
959
960  // The third argument is the sizeof the type.
961  llvm::Value *size =
962    CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(ivar->getType()));
963  args.add(RValue::get(size), CGF.getContext().getSizeType());
964
965  // The fourth argument is the 'isAtomic' flag.
966  args.add(RValue::get(CGF.Builder.getTrue()), CGF.getContext().BoolTy);
967
968  // The fifth argument is the 'hasStrong' flag.
969  // FIXME: should this really always be false?
970  args.add(RValue::get(CGF.Builder.getFalse()), CGF.getContext().BoolTy);
971
972  llvm::Value *copyStructFn = CGF.CGM.getObjCRuntime().GetSetStructFunction();
973  CGF.EmitCall(CGF.getTypes().arrangeFreeFunctionCall(CGF.getContext().VoidTy,
974                                                      args,
975                                                      FunctionType::ExtInfo(),
976                                                      RequiredArgs::All),
977               copyStructFn, ReturnValueSlot(), args);
978}
979
980/// emitCPPObjectAtomicSetterCall - Call the runtime function to store
981/// the value from the first formal parameter into the given ivar, using
982/// the Cpp API for atomic Cpp objects with non-trivial copy assignment.
983static void emitCPPObjectAtomicSetterCall(CodeGenFunction &CGF,
984                                          ObjCMethodDecl *OMD,
985                                          ObjCIvarDecl *ivar,
986                                          llvm::Constant *AtomicHelperFn) {
987  // objc_copyCppObjectAtomic (&CppObjectIvar, &Arg,
988  //                           AtomicHelperFn);
989  CallArgList args;
990
991  // The first argument is the address of the ivar.
992  llvm::Value *ivarAddr =
993    CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(),
994                          CGF.LoadObjCSelf(), ivar, 0).getAddress();
995  ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy);
996  args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy);
997
998  // The second argument is the address of the parameter variable.
999  ParmVarDecl *argVar = *OMD->param_begin();
1000  DeclRefExpr argRef(argVar, false, argVar->getType().getNonReferenceType(),
1001                     VK_LValue, SourceLocation());
1002  llvm::Value *argAddr = CGF.EmitLValue(&argRef).getAddress();
1003  argAddr = CGF.Builder.CreateBitCast(argAddr, CGF.Int8PtrTy);
1004  args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy);
1005
1006  // Third argument is the helper function.
1007  args.add(RValue::get(AtomicHelperFn), CGF.getContext().VoidPtrTy);
1008
1009  llvm::Value *copyCppAtomicObjectFn =
1010    CGF.CGM.getObjCRuntime().GetCppAtomicObjectFunction();
1011  CGF.EmitCall(CGF.getTypes().arrangeFreeFunctionCall(CGF.getContext().VoidTy,
1012                                                      args,
1013                                                      FunctionType::ExtInfo(),
1014                                                      RequiredArgs::All),
1015               copyCppAtomicObjectFn, ReturnValueSlot(), args);
1016
1017
1018}
1019
1020
1021static bool hasTrivialSetExpr(const ObjCPropertyImplDecl *PID) {
1022  Expr *setter = PID->getSetterCXXAssignment();
1023  if (!setter) return true;
1024
1025  // Sema only makes only of these when the ivar has a C++ class type,
1026  // so the form is pretty constrained.
1027
1028  // An operator call is trivial if the function it calls is trivial.
1029  // This also implies that there's nothing non-trivial going on with
1030  // the arguments, because operator= can only be trivial if it's a
1031  // synthesized assignment operator and therefore both parameters are
1032  // references.
1033  if (CallExpr *call = dyn_cast<CallExpr>(setter)) {
1034    if (const FunctionDecl *callee
1035          = dyn_cast_or_null<FunctionDecl>(call->getCalleeDecl()))
1036      if (callee->isTrivial())
1037        return true;
1038    return false;
1039  }
1040
1041  assert(isa<ExprWithCleanups>(setter));
1042  return false;
1043}
1044
1045static bool UseOptimizedSetter(CodeGenModule &CGM) {
1046  if (CGM.getLangOpts().getGC() != LangOptions::NonGC)
1047    return false;
1048  return CGM.getLangOpts().ObjCRuntime.hasOptimizedSetter();
1049}
1050
1051void
1052CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
1053                                        const ObjCPropertyImplDecl *propImpl,
1054                                        llvm::Constant *AtomicHelperFn) {
1055  const ObjCPropertyDecl *prop = propImpl->getPropertyDecl();
1056  ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
1057  ObjCMethodDecl *setterMethod = prop->getSetterMethodDecl();
1058
1059  // Just use the setter expression if Sema gave us one and it's
1060  // non-trivial.
1061  if (!hasTrivialSetExpr(propImpl)) {
1062    if (!AtomicHelperFn)
1063      // If non-atomic, assignment is called directly.
1064      EmitStmt(propImpl->getSetterCXXAssignment());
1065    else
1066      // If atomic, assignment is called via a locking api.
1067      emitCPPObjectAtomicSetterCall(*this, setterMethod, ivar,
1068                                    AtomicHelperFn);
1069    return;
1070  }
1071
1072  PropertyImplStrategy strategy(CGM, propImpl);
1073  switch (strategy.getKind()) {
1074  case PropertyImplStrategy::Native: {
1075    // We don't need to do anything for a zero-size struct.
1076    if (strategy.getIvarSize().isZero())
1077      return;
1078
1079    llvm::Value *argAddr = LocalDeclMap[*setterMethod->param_begin()];
1080
1081    LValue ivarLValue =
1082      EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, /*quals*/ 0);
1083    llvm::Value *ivarAddr = ivarLValue.getAddress();
1084
1085    // Currently, all atomic accesses have to be through integer
1086    // types, so there's no point in trying to pick a prettier type.
1087    llvm::Type *bitcastType =
1088      llvm::Type::getIntNTy(getLLVMContext(),
1089                            getContext().toBits(strategy.getIvarSize()));
1090    bitcastType = bitcastType->getPointerTo(); // addrspace 0 okay
1091
1092    // Cast both arguments to the chosen operation type.
1093    argAddr = Builder.CreateBitCast(argAddr, bitcastType);
1094    ivarAddr = Builder.CreateBitCast(ivarAddr, bitcastType);
1095
1096    // This bitcast load is likely to cause some nasty IR.
1097    llvm::Value *load = Builder.CreateLoad(argAddr);
1098
1099    // Perform an atomic store.  There are no memory ordering requirements.
1100    llvm::StoreInst *store = Builder.CreateStore(load, ivarAddr);
1101    store->setAlignment(strategy.getIvarAlignment().getQuantity());
1102    store->setAtomic(llvm::Unordered);
1103    return;
1104  }
1105
1106  case PropertyImplStrategy::GetSetProperty:
1107  case PropertyImplStrategy::SetPropertyAndExpressionGet: {
1108
1109    llvm::Value *setOptimizedPropertyFn = 0;
1110    llvm::Value *setPropertyFn = 0;
1111    if (UseOptimizedSetter(CGM)) {
1112      // 10.8 and iOS 6.0 code and GC is off
1113      setOptimizedPropertyFn =
1114        CGM.getObjCRuntime()
1115           .GetOptimizedPropertySetFunction(strategy.isAtomic(),
1116                                            strategy.isCopy());
1117      if (!setOptimizedPropertyFn) {
1118        CGM.ErrorUnsupported(propImpl, "Obj-C optimized setter - NYI");
1119        return;
1120      }
1121    }
1122    else {
1123      setPropertyFn = CGM.getObjCRuntime().GetPropertySetFunction();
1124      if (!setPropertyFn) {
1125        CGM.ErrorUnsupported(propImpl, "Obj-C setter requiring atomic copy");
1126        return;
1127      }
1128    }
1129
1130    // Emit objc_setProperty((id) self, _cmd, offset, arg,
1131    //                       <is-atomic>, <is-copy>).
1132    llvm::Value *cmd =
1133      Builder.CreateLoad(LocalDeclMap[setterMethod->getCmdDecl()]);
1134    llvm::Value *self =
1135      Builder.CreateBitCast(LoadObjCSelf(), VoidPtrTy);
1136    llvm::Value *ivarOffset =
1137      EmitIvarOffset(classImpl->getClassInterface(), ivar);
1138    llvm::Value *arg = LocalDeclMap[*setterMethod->param_begin()];
1139    arg = Builder.CreateBitCast(Builder.CreateLoad(arg, "arg"), VoidPtrTy);
1140
1141    CallArgList args;
1142    args.add(RValue::get(self), getContext().getObjCIdType());
1143    args.add(RValue::get(cmd), getContext().getObjCSelType());
1144    if (setOptimizedPropertyFn) {
1145      args.add(RValue::get(arg), getContext().getObjCIdType());
1146      args.add(RValue::get(ivarOffset), getContext().getPointerDiffType());
1147      EmitCall(getTypes().arrangeFreeFunctionCall(getContext().VoidTy, args,
1148                                                  FunctionType::ExtInfo(),
1149                                                  RequiredArgs::All),
1150               setOptimizedPropertyFn, ReturnValueSlot(), args);
1151    } else {
1152      args.add(RValue::get(ivarOffset), getContext().getPointerDiffType());
1153      args.add(RValue::get(arg), getContext().getObjCIdType());
1154      args.add(RValue::get(Builder.getInt1(strategy.isAtomic())),
1155               getContext().BoolTy);
1156      args.add(RValue::get(Builder.getInt1(strategy.isCopy())),
1157               getContext().BoolTy);
1158      // FIXME: We shouldn't need to get the function info here, the runtime
1159      // already should have computed it to build the function.
1160      EmitCall(getTypes().arrangeFreeFunctionCall(getContext().VoidTy, args,
1161                                                  FunctionType::ExtInfo(),
1162                                                  RequiredArgs::All),
1163               setPropertyFn, ReturnValueSlot(), args);
1164    }
1165
1166    return;
1167  }
1168
1169  case PropertyImplStrategy::CopyStruct:
1170    emitStructSetterCall(*this, setterMethod, ivar);
1171    return;
1172
1173  case PropertyImplStrategy::Expression:
1174    break;
1175  }
1176
1177  // Otherwise, fake up some ASTs and emit a normal assignment.
1178  ValueDecl *selfDecl = setterMethod->getSelfDecl();
1179  DeclRefExpr self(selfDecl, false, selfDecl->getType(),
1180                   VK_LValue, SourceLocation());
1181  ImplicitCastExpr selfLoad(ImplicitCastExpr::OnStack,
1182                            selfDecl->getType(), CK_LValueToRValue, &self,
1183                            VK_RValue);
1184  ObjCIvarRefExpr ivarRef(ivar, ivar->getType().getNonReferenceType(),
1185                          SourceLocation(), &selfLoad, true, true);
1186
1187  ParmVarDecl *argDecl = *setterMethod->param_begin();
1188  QualType argType = argDecl->getType().getNonReferenceType();
1189  DeclRefExpr arg(argDecl, false, argType, VK_LValue, SourceLocation());
1190  ImplicitCastExpr argLoad(ImplicitCastExpr::OnStack,
1191                           argType.getUnqualifiedType(), CK_LValueToRValue,
1192                           &arg, VK_RValue);
1193
1194  // The property type can differ from the ivar type in some situations with
1195  // Objective-C pointer types, we can always bit cast the RHS in these cases.
1196  // The following absurdity is just to ensure well-formed IR.
1197  CastKind argCK = CK_NoOp;
1198  if (ivarRef.getType()->isObjCObjectPointerType()) {
1199    if (argLoad.getType()->isObjCObjectPointerType())
1200      argCK = CK_BitCast;
1201    else if (argLoad.getType()->isBlockPointerType())
1202      argCK = CK_BlockPointerToObjCPointerCast;
1203    else
1204      argCK = CK_CPointerToObjCPointerCast;
1205  } else if (ivarRef.getType()->isBlockPointerType()) {
1206     if (argLoad.getType()->isBlockPointerType())
1207      argCK = CK_BitCast;
1208    else
1209      argCK = CK_AnyPointerToBlockPointerCast;
1210  } else if (ivarRef.getType()->isPointerType()) {
1211    argCK = CK_BitCast;
1212  }
1213  ImplicitCastExpr argCast(ImplicitCastExpr::OnStack,
1214                           ivarRef.getType(), argCK, &argLoad,
1215                           VK_RValue);
1216  Expr *finalArg = &argLoad;
1217  if (!getContext().hasSameUnqualifiedType(ivarRef.getType(),
1218                                           argLoad.getType()))
1219    finalArg = &argCast;
1220
1221
1222  BinaryOperator assign(&ivarRef, finalArg, BO_Assign,
1223                        ivarRef.getType(), VK_RValue, OK_Ordinary,
1224                        SourceLocation(), false);
1225  EmitStmt(&assign);
1226}
1227
1228/// \brief Generate an Objective-C property setter function.
1229///
1230/// The given Decl must be an ObjCImplementationDecl. \@synthesize
1231/// is illegal within a category.
1232void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP,
1233                                         const ObjCPropertyImplDecl *PID) {
1234  llvm::Constant *AtomicHelperFn =
1235    GenerateObjCAtomicSetterCopyHelperFunction(PID);
1236  const ObjCPropertyDecl *PD = PID->getPropertyDecl();
1237  ObjCMethodDecl *OMD = PD->getSetterMethodDecl();
1238  assert(OMD && "Invalid call to generate setter (empty method)");
1239  StartObjCMethod(OMD, IMP->getClassInterface(), OMD->getLocStart());
1240
1241  generateObjCSetterBody(IMP, PID, AtomicHelperFn);
1242
1243  FinishFunction();
1244}
1245
1246namespace {
1247  struct DestroyIvar : EHScopeStack::Cleanup {
1248  private:
1249    llvm::Value *addr;
1250    const ObjCIvarDecl *ivar;
1251    CodeGenFunction::Destroyer *destroyer;
1252    bool useEHCleanupForArray;
1253  public:
1254    DestroyIvar(llvm::Value *addr, const ObjCIvarDecl *ivar,
1255                CodeGenFunction::Destroyer *destroyer,
1256                bool useEHCleanupForArray)
1257      : addr(addr), ivar(ivar), destroyer(destroyer),
1258        useEHCleanupForArray(useEHCleanupForArray) {}
1259
1260    void Emit(CodeGenFunction &CGF, Flags flags) {
1261      LValue lvalue
1262        = CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), addr, ivar, /*CVR*/ 0);
1263      CGF.emitDestroy(lvalue.getAddress(), ivar->getType(), destroyer,
1264                      flags.isForNormalCleanup() && useEHCleanupForArray);
1265    }
1266  };
1267}
1268
1269/// Like CodeGenFunction::destroyARCStrong, but do it with a call.
1270static void destroyARCStrongWithStore(CodeGenFunction &CGF,
1271                                      llvm::Value *addr,
1272                                      QualType type) {
1273  llvm::Value *null = getNullForVariable(addr);
1274  CGF.EmitARCStoreStrongCall(addr, null, /*ignored*/ true);
1275}
1276
1277static void emitCXXDestructMethod(CodeGenFunction &CGF,
1278                                  ObjCImplementationDecl *impl) {
1279  CodeGenFunction::RunCleanupsScope scope(CGF);
1280
1281  llvm::Value *self = CGF.LoadObjCSelf();
1282
1283  const ObjCInterfaceDecl *iface = impl->getClassInterface();
1284  for (const ObjCIvarDecl *ivar = iface->all_declared_ivar_begin();
1285       ivar; ivar = ivar->getNextIvar()) {
1286    QualType type = ivar->getType();
1287
1288    // Check whether the ivar is a destructible type.
1289    QualType::DestructionKind dtorKind = type.isDestructedType();
1290    if (!dtorKind) continue;
1291
1292    CodeGenFunction::Destroyer *destroyer = 0;
1293
1294    // Use a call to objc_storeStrong to destroy strong ivars, for the
1295    // general benefit of the tools.
1296    if (dtorKind == QualType::DK_objc_strong_lifetime) {
1297      destroyer = destroyARCStrongWithStore;
1298
1299    // Otherwise use the default for the destruction kind.
1300    } else {
1301      destroyer = CGF.getDestroyer(dtorKind);
1302    }
1303
1304    CleanupKind cleanupKind = CGF.getCleanupKind(dtorKind);
1305
1306    CGF.EHStack.pushCleanup<DestroyIvar>(cleanupKind, self, ivar, destroyer,
1307                                         cleanupKind & EHCleanup);
1308  }
1309
1310  assert(scope.requiresCleanups() && "nothing to do in .cxx_destruct?");
1311}
1312
1313void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
1314                                                 ObjCMethodDecl *MD,
1315                                                 bool ctor) {
1316  MD->createImplicitParams(CGM.getContext(), IMP->getClassInterface());
1317  StartObjCMethod(MD, IMP->getClassInterface(), MD->getLocStart());
1318
1319  // Emit .cxx_construct.
1320  if (ctor) {
1321    // Suppress the final autorelease in ARC.
1322    AutoreleaseResult = false;
1323
1324    SmallVector<CXXCtorInitializer *, 8> IvarInitializers;
1325    for (ObjCImplementationDecl::init_const_iterator B = IMP->init_begin(),
1326           E = IMP->init_end(); B != E; ++B) {
1327      CXXCtorInitializer *IvarInit = (*B);
1328      FieldDecl *Field = IvarInit->getAnyMember();
1329      ObjCIvarDecl  *Ivar = cast<ObjCIvarDecl>(Field);
1330      LValue LV = EmitLValueForIvar(TypeOfSelfObject(),
1331                                    LoadObjCSelf(), Ivar, 0);
1332      EmitAggExpr(IvarInit->getInit(),
1333                  AggValueSlot::forLValue(LV, AggValueSlot::IsDestructed,
1334                                          AggValueSlot::DoesNotNeedGCBarriers,
1335                                          AggValueSlot::IsNotAliased));
1336    }
1337    // constructor returns 'self'.
1338    CodeGenTypes &Types = CGM.getTypes();
1339    QualType IdTy(CGM.getContext().getObjCIdType());
1340    llvm::Value *SelfAsId =
1341      Builder.CreateBitCast(LoadObjCSelf(), Types.ConvertType(IdTy));
1342    EmitReturnOfRValue(RValue::get(SelfAsId), IdTy);
1343
1344  // Emit .cxx_destruct.
1345  } else {
1346    emitCXXDestructMethod(*this, IMP);
1347  }
1348  FinishFunction();
1349}
1350
1351bool CodeGenFunction::IndirectObjCSetterArg(const CGFunctionInfo &FI) {
1352  CGFunctionInfo::const_arg_iterator it = FI.arg_begin();
1353  it++; it++;
1354  const ABIArgInfo &AI = it->info;
1355  // FIXME. Is this sufficient check?
1356  return (AI.getKind() == ABIArgInfo::Indirect);
1357}
1358
1359bool CodeGenFunction::IvarTypeWithAggrGCObjects(QualType Ty) {
1360  if (CGM.getLangOpts().getGC() == LangOptions::NonGC)
1361    return false;
1362  if (const RecordType *FDTTy = Ty.getTypePtr()->getAs<RecordType>())
1363    return FDTTy->getDecl()->hasObjectMember();
1364  return false;
1365}
1366
1367llvm::Value *CodeGenFunction::LoadObjCSelf() {
1368  const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
1369  return Builder.CreateLoad(LocalDeclMap[OMD->getSelfDecl()], "self");
1370}
1371
1372QualType CodeGenFunction::TypeOfSelfObject() {
1373  const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
1374  ImplicitParamDecl *selfDecl = OMD->getSelfDecl();
1375  const ObjCObjectPointerType *PTy = cast<ObjCObjectPointerType>(
1376    getContext().getCanonicalType(selfDecl->getType()));
1377  return PTy->getPointeeType();
1378}
1379
1380void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
1381  llvm::Constant *EnumerationMutationFn =
1382    CGM.getObjCRuntime().EnumerationMutationFunction();
1383
1384  if (!EnumerationMutationFn) {
1385    CGM.ErrorUnsupported(&S, "Obj-C fast enumeration for this runtime");
1386    return;
1387  }
1388
1389  CGDebugInfo *DI = getDebugInfo();
1390  if (DI)
1391    DI->EmitLexicalBlockStart(Builder, S.getSourceRange().getBegin());
1392
1393  // The local variable comes into scope immediately.
1394  AutoVarEmission variable = AutoVarEmission::invalid();
1395  if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement()))
1396    variable = EmitAutoVarAlloca(*cast<VarDecl>(SD->getSingleDecl()));
1397
1398  JumpDest LoopEnd = getJumpDestInCurrentScope("forcoll.end");
1399
1400  // Fast enumeration state.
1401  QualType StateTy = CGM.getObjCFastEnumerationStateType();
1402  llvm::Value *StatePtr = CreateMemTemp(StateTy, "state.ptr");
1403  EmitNullInitialization(StatePtr, StateTy);
1404
1405  // Number of elements in the items array.
1406  static const unsigned NumItems = 16;
1407
1408  // Fetch the countByEnumeratingWithState:objects:count: selector.
1409  IdentifierInfo *II[] = {
1410    &CGM.getContext().Idents.get("countByEnumeratingWithState"),
1411    &CGM.getContext().Idents.get("objects"),
1412    &CGM.getContext().Idents.get("count")
1413  };
1414  Selector FastEnumSel =
1415    CGM.getContext().Selectors.getSelector(llvm::array_lengthof(II), &II[0]);
1416
1417  QualType ItemsTy =
1418    getContext().getConstantArrayType(getContext().getObjCIdType(),
1419                                      llvm::APInt(32, NumItems),
1420                                      ArrayType::Normal, 0);
1421  llvm::Value *ItemsPtr = CreateMemTemp(ItemsTy, "items.ptr");
1422
1423  // Emit the collection pointer.  In ARC, we do a retain.
1424  llvm::Value *Collection;
1425  if (getLangOpts().ObjCAutoRefCount) {
1426    Collection = EmitARCRetainScalarExpr(S.getCollection());
1427
1428    // Enter a cleanup to do the release.
1429    EmitObjCConsumeObject(S.getCollection()->getType(), Collection);
1430  } else {
1431    Collection = EmitScalarExpr(S.getCollection());
1432  }
1433
1434  // The 'continue' label needs to appear within the cleanup for the
1435  // collection object.
1436  JumpDest AfterBody = getJumpDestInCurrentScope("forcoll.next");
1437
1438  // Send it our message:
1439  CallArgList Args;
1440
1441  // The first argument is a temporary of the enumeration-state type.
1442  Args.add(RValue::get(StatePtr), getContext().getPointerType(StateTy));
1443
1444  // The second argument is a temporary array with space for NumItems
1445  // pointers.  We'll actually be loading elements from the array
1446  // pointer written into the control state; this buffer is so that
1447  // collections that *aren't* backed by arrays can still queue up
1448  // batches of elements.
1449  Args.add(RValue::get(ItemsPtr), getContext().getPointerType(ItemsTy));
1450
1451  // The third argument is the capacity of that temporary array.
1452  llvm::Type *UnsignedLongLTy = ConvertType(getContext().UnsignedLongTy);
1453  llvm::Constant *Count = llvm::ConstantInt::get(UnsignedLongLTy, NumItems);
1454  Args.add(RValue::get(Count), getContext().UnsignedLongTy);
1455
1456  // Start the enumeration.
1457  RValue CountRV =
1458    CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
1459                                             getContext().UnsignedLongTy,
1460                                             FastEnumSel,
1461                                             Collection, Args);
1462
1463  // The initial number of objects that were returned in the buffer.
1464  llvm::Value *initialBufferLimit = CountRV.getScalarVal();
1465
1466  llvm::BasicBlock *EmptyBB = createBasicBlock("forcoll.empty");
1467  llvm::BasicBlock *LoopInitBB = createBasicBlock("forcoll.loopinit");
1468
1469  llvm::Value *zero = llvm::Constant::getNullValue(UnsignedLongLTy);
1470
1471  // If the limit pointer was zero to begin with, the collection is
1472  // empty; skip all this.
1473  Builder.CreateCondBr(Builder.CreateICmpEQ(initialBufferLimit, zero, "iszero"),
1474                       EmptyBB, LoopInitBB);
1475
1476  // Otherwise, initialize the loop.
1477  EmitBlock(LoopInitBB);
1478
1479  // Save the initial mutations value.  This is the value at an
1480  // address that was written into the state object by
1481  // countByEnumeratingWithState:objects:count:.
1482  llvm::Value *StateMutationsPtrPtr =
1483    Builder.CreateStructGEP(StatePtr, 2, "mutationsptr.ptr");
1484  llvm::Value *StateMutationsPtr = Builder.CreateLoad(StateMutationsPtrPtr,
1485                                                      "mutationsptr");
1486
1487  llvm::Value *initialMutations =
1488    Builder.CreateLoad(StateMutationsPtr, "forcoll.initial-mutations");
1489
1490  // Start looping.  This is the point we return to whenever we have a
1491  // fresh, non-empty batch of objects.
1492  llvm::BasicBlock *LoopBodyBB = createBasicBlock("forcoll.loopbody");
1493  EmitBlock(LoopBodyBB);
1494
1495  // The current index into the buffer.
1496  llvm::PHINode *index = Builder.CreatePHI(UnsignedLongLTy, 3, "forcoll.index");
1497  index->addIncoming(zero, LoopInitBB);
1498
1499  // The current buffer size.
1500  llvm::PHINode *count = Builder.CreatePHI(UnsignedLongLTy, 3, "forcoll.count");
1501  count->addIncoming(initialBufferLimit, LoopInitBB);
1502
1503  // Check whether the mutations value has changed from where it was
1504  // at start.  StateMutationsPtr should actually be invariant between
1505  // refreshes.
1506  StateMutationsPtr = Builder.CreateLoad(StateMutationsPtrPtr, "mutationsptr");
1507  llvm::Value *currentMutations
1508    = Builder.CreateLoad(StateMutationsPtr, "statemutations");
1509
1510  llvm::BasicBlock *WasMutatedBB = createBasicBlock("forcoll.mutated");
1511  llvm::BasicBlock *WasNotMutatedBB = createBasicBlock("forcoll.notmutated");
1512
1513  Builder.CreateCondBr(Builder.CreateICmpEQ(currentMutations, initialMutations),
1514                       WasNotMutatedBB, WasMutatedBB);
1515
1516  // If so, call the enumeration-mutation function.
1517  EmitBlock(WasMutatedBB);
1518  llvm::Value *V =
1519    Builder.CreateBitCast(Collection,
1520                          ConvertType(getContext().getObjCIdType()));
1521  CallArgList Args2;
1522  Args2.add(RValue::get(V), getContext().getObjCIdType());
1523  // FIXME: We shouldn't need to get the function info here, the runtime already
1524  // should have computed it to build the function.
1525  EmitCall(CGM.getTypes().arrangeFreeFunctionCall(getContext().VoidTy, Args2,
1526                                                  FunctionType::ExtInfo(),
1527                                                  RequiredArgs::All),
1528           EnumerationMutationFn, ReturnValueSlot(), Args2);
1529
1530  // Otherwise, or if the mutation function returns, just continue.
1531  EmitBlock(WasNotMutatedBB);
1532
1533  // Initialize the element variable.
1534  RunCleanupsScope elementVariableScope(*this);
1535  bool elementIsVariable;
1536  LValue elementLValue;
1537  QualType elementType;
1538  if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement())) {
1539    // Initialize the variable, in case it's a __block variable or something.
1540    EmitAutoVarInit(variable);
1541
1542    const VarDecl* D = cast<VarDecl>(SD->getSingleDecl());
1543    DeclRefExpr tempDRE(const_cast<VarDecl*>(D), false, D->getType(),
1544                        VK_LValue, SourceLocation());
1545    elementLValue = EmitLValue(&tempDRE);
1546    elementType = D->getType();
1547    elementIsVariable = true;
1548
1549    if (D->isARCPseudoStrong())
1550      elementLValue.getQuals().setObjCLifetime(Qualifiers::OCL_ExplicitNone);
1551  } else {
1552    elementLValue = LValue(); // suppress warning
1553    elementType = cast<Expr>(S.getElement())->getType();
1554    elementIsVariable = false;
1555  }
1556  llvm::Type *convertedElementType = ConvertType(elementType);
1557
1558  // Fetch the buffer out of the enumeration state.
1559  // TODO: this pointer should actually be invariant between
1560  // refreshes, which would help us do certain loop optimizations.
1561  llvm::Value *StateItemsPtr =
1562    Builder.CreateStructGEP(StatePtr, 1, "stateitems.ptr");
1563  llvm::Value *EnumStateItems =
1564    Builder.CreateLoad(StateItemsPtr, "stateitems");
1565
1566  // Fetch the value at the current index from the buffer.
1567  llvm::Value *CurrentItemPtr =
1568    Builder.CreateGEP(EnumStateItems, index, "currentitem.ptr");
1569  llvm::Value *CurrentItem = Builder.CreateLoad(CurrentItemPtr);
1570
1571  // Cast that value to the right type.
1572  CurrentItem = Builder.CreateBitCast(CurrentItem, convertedElementType,
1573                                      "currentitem");
1574
1575  // Make sure we have an l-value.  Yes, this gets evaluated every
1576  // time through the loop.
1577  if (!elementIsVariable) {
1578    elementLValue = EmitLValue(cast<Expr>(S.getElement()));
1579    EmitStoreThroughLValue(RValue::get(CurrentItem), elementLValue);
1580  } else {
1581    EmitScalarInit(CurrentItem, elementLValue);
1582  }
1583
1584  // If we do have an element variable, this assignment is the end of
1585  // its initialization.
1586  if (elementIsVariable)
1587    EmitAutoVarCleanups(variable);
1588
1589  // Perform the loop body, setting up break and continue labels.
1590  BreakContinueStack.push_back(BreakContinue(LoopEnd, AfterBody));
1591  {
1592    RunCleanupsScope Scope(*this);
1593    EmitStmt(S.getBody());
1594  }
1595  BreakContinueStack.pop_back();
1596
1597  // Destroy the element variable now.
1598  elementVariableScope.ForceCleanup();
1599
1600  // Check whether there are more elements.
1601  EmitBlock(AfterBody.getBlock());
1602
1603  llvm::BasicBlock *FetchMoreBB = createBasicBlock("forcoll.refetch");
1604
1605  // First we check in the local buffer.
1606  llvm::Value *indexPlusOne
1607    = Builder.CreateAdd(index, llvm::ConstantInt::get(UnsignedLongLTy, 1));
1608
1609  // If we haven't overrun the buffer yet, we can continue.
1610  Builder.CreateCondBr(Builder.CreateICmpULT(indexPlusOne, count),
1611                       LoopBodyBB, FetchMoreBB);
1612
1613  index->addIncoming(indexPlusOne, AfterBody.getBlock());
1614  count->addIncoming(count, AfterBody.getBlock());
1615
1616  // Otherwise, we have to fetch more elements.
1617  EmitBlock(FetchMoreBB);
1618
1619  CountRV =
1620    CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
1621                                             getContext().UnsignedLongTy,
1622                                             FastEnumSel,
1623                                             Collection, Args);
1624
1625  // If we got a zero count, we're done.
1626  llvm::Value *refetchCount = CountRV.getScalarVal();
1627
1628  // (note that the message send might split FetchMoreBB)
1629  index->addIncoming(zero, Builder.GetInsertBlock());
1630  count->addIncoming(refetchCount, Builder.GetInsertBlock());
1631
1632  Builder.CreateCondBr(Builder.CreateICmpEQ(refetchCount, zero),
1633                       EmptyBB, LoopBodyBB);
1634
1635  // No more elements.
1636  EmitBlock(EmptyBB);
1637
1638  if (!elementIsVariable) {
1639    // If the element was not a declaration, set it to be null.
1640
1641    llvm::Value *null = llvm::Constant::getNullValue(convertedElementType);
1642    elementLValue = EmitLValue(cast<Expr>(S.getElement()));
1643    EmitStoreThroughLValue(RValue::get(null), elementLValue);
1644  }
1645
1646  if (DI)
1647    DI->EmitLexicalBlockEnd(Builder, S.getSourceRange().getEnd());
1648
1649  // Leave the cleanup we entered in ARC.
1650  if (getLangOpts().ObjCAutoRefCount)
1651    PopCleanupBlock();
1652
1653  EmitBlock(LoopEnd.getBlock());
1654}
1655
1656void CodeGenFunction::EmitObjCAtTryStmt(const ObjCAtTryStmt &S) {
1657  CGM.getObjCRuntime().EmitTryStmt(*this, S);
1658}
1659
1660void CodeGenFunction::EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S) {
1661  CGM.getObjCRuntime().EmitThrowStmt(*this, S);
1662}
1663
1664void CodeGenFunction::EmitObjCAtSynchronizedStmt(
1665                                              const ObjCAtSynchronizedStmt &S) {
1666  CGM.getObjCRuntime().EmitSynchronizedStmt(*this, S);
1667}
1668
1669/// Produce the code for a CK_ARCProduceObject.  Just does a
1670/// primitive retain.
1671llvm::Value *CodeGenFunction::EmitObjCProduceObject(QualType type,
1672                                                    llvm::Value *value) {
1673  return EmitARCRetain(type, value);
1674}
1675
1676namespace {
1677  struct CallObjCRelease : EHScopeStack::Cleanup {
1678    CallObjCRelease(llvm::Value *object) : object(object) {}
1679    llvm::Value *object;
1680
1681    void Emit(CodeGenFunction &CGF, Flags flags) {
1682      CGF.EmitARCRelease(object, /*precise*/ true);
1683    }
1684  };
1685}
1686
1687/// Produce the code for a CK_ARCConsumeObject.  Does a primitive
1688/// release at the end of the full-expression.
1689llvm::Value *CodeGenFunction::EmitObjCConsumeObject(QualType type,
1690                                                    llvm::Value *object) {
1691  // If we're in a conditional branch, we need to make the cleanup
1692  // conditional.
1693  pushFullExprCleanup<CallObjCRelease>(getARCCleanupKind(), object);
1694  return object;
1695}
1696
1697llvm::Value *CodeGenFunction::EmitObjCExtendObjectLifetime(QualType type,
1698                                                           llvm::Value *value) {
1699  return EmitARCRetainAutorelease(type, value);
1700}
1701
1702
1703static llvm::Constant *createARCRuntimeFunction(CodeGenModule &CGM,
1704                                                llvm::FunctionType *type,
1705                                                StringRef fnName) {
1706  llvm::Constant *fn = CGM.CreateRuntimeFunction(type, fnName);
1707
1708  // If the target runtime doesn't naturally support ARC, emit weak
1709  // references to the runtime support library.  We don't really
1710  // permit this to fail, but we need a particular relocation style.
1711  if (llvm::Function *f = dyn_cast<llvm::Function>(fn)) {
1712    if (!CGM.getLangOpts().ObjCRuntime.hasNativeARC())
1713      f->setLinkage(llvm::Function::ExternalWeakLinkage);
1714    // set nonlazybind attribute for these APIs for performance.
1715    if (fnName == "objc_retain" || fnName  == "objc_release")
1716      f->addFnAttr(llvm::Attributes::NonLazyBind);
1717  }
1718
1719  return fn;
1720}
1721
1722/// Perform an operation having the signature
1723///   i8* (i8*)
1724/// where a null input causes a no-op and returns null.
1725static llvm::Value *emitARCValueOperation(CodeGenFunction &CGF,
1726                                          llvm::Value *value,
1727                                          llvm::Constant *&fn,
1728                                          StringRef fnName) {
1729  if (isa<llvm::ConstantPointerNull>(value)) return value;
1730
1731  if (!fn) {
1732    std::vector<llvm::Type*> args(1, CGF.Int8PtrTy);
1733    llvm::FunctionType *fnType =
1734      llvm::FunctionType::get(CGF.Int8PtrTy, args, false);
1735    fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
1736  }
1737
1738  // Cast the argument to 'id'.
1739  llvm::Type *origType = value->getType();
1740  value = CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy);
1741
1742  // Call the function.
1743  llvm::CallInst *call = CGF.Builder.CreateCall(fn, value);
1744  call->setDoesNotThrow();
1745
1746  // Cast the result back to the original type.
1747  return CGF.Builder.CreateBitCast(call, origType);
1748}
1749
1750/// Perform an operation having the following signature:
1751///   i8* (i8**)
1752static llvm::Value *emitARCLoadOperation(CodeGenFunction &CGF,
1753                                         llvm::Value *addr,
1754                                         llvm::Constant *&fn,
1755                                         StringRef fnName) {
1756  if (!fn) {
1757    std::vector<llvm::Type*> args(1, CGF.Int8PtrPtrTy);
1758    llvm::FunctionType *fnType =
1759      llvm::FunctionType::get(CGF.Int8PtrTy, args, false);
1760    fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
1761  }
1762
1763  // Cast the argument to 'id*'.
1764  llvm::Type *origType = addr->getType();
1765  addr = CGF.Builder.CreateBitCast(addr, CGF.Int8PtrPtrTy);
1766
1767  // Call the function.
1768  llvm::CallInst *call = CGF.Builder.CreateCall(fn, addr);
1769  call->setDoesNotThrow();
1770
1771  // Cast the result back to a dereference of the original type.
1772  llvm::Value *result = call;
1773  if (origType != CGF.Int8PtrPtrTy)
1774    result = CGF.Builder.CreateBitCast(result,
1775                        cast<llvm::PointerType>(origType)->getElementType());
1776
1777  return result;
1778}
1779
1780/// Perform an operation having the following signature:
1781///   i8* (i8**, i8*)
1782static llvm::Value *emitARCStoreOperation(CodeGenFunction &CGF,
1783                                          llvm::Value *addr,
1784                                          llvm::Value *value,
1785                                          llvm::Constant *&fn,
1786                                          StringRef fnName,
1787                                          bool ignored) {
1788  assert(cast<llvm::PointerType>(addr->getType())->getElementType()
1789           == value->getType());
1790
1791  if (!fn) {
1792    llvm::Type *argTypes[] = { CGF.Int8PtrPtrTy, CGF.Int8PtrTy };
1793
1794    llvm::FunctionType *fnType
1795      = llvm::FunctionType::get(CGF.Int8PtrTy, argTypes, false);
1796    fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
1797  }
1798
1799  llvm::Type *origType = value->getType();
1800
1801  addr = CGF.Builder.CreateBitCast(addr, CGF.Int8PtrPtrTy);
1802  value = CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy);
1803
1804  llvm::CallInst *result = CGF.Builder.CreateCall2(fn, addr, value);
1805  result->setDoesNotThrow();
1806
1807  if (ignored) return 0;
1808
1809  return CGF.Builder.CreateBitCast(result, origType);
1810}
1811
1812/// Perform an operation having the following signature:
1813///   void (i8**, i8**)
1814static void emitARCCopyOperation(CodeGenFunction &CGF,
1815                                 llvm::Value *dst,
1816                                 llvm::Value *src,
1817                                 llvm::Constant *&fn,
1818                                 StringRef fnName) {
1819  assert(dst->getType() == src->getType());
1820
1821  if (!fn) {
1822    std::vector<llvm::Type*> argTypes(2, CGF.Int8PtrPtrTy);
1823    llvm::FunctionType *fnType
1824      = llvm::FunctionType::get(CGF.Builder.getVoidTy(), argTypes, false);
1825    fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
1826  }
1827
1828  dst = CGF.Builder.CreateBitCast(dst, CGF.Int8PtrPtrTy);
1829  src = CGF.Builder.CreateBitCast(src, CGF.Int8PtrPtrTy);
1830
1831  llvm::CallInst *result = CGF.Builder.CreateCall2(fn, dst, src);
1832  result->setDoesNotThrow();
1833}
1834
1835/// Produce the code to do a retain.  Based on the type, calls one of:
1836///   call i8* \@objc_retain(i8* %value)
1837///   call i8* \@objc_retainBlock(i8* %value)
1838llvm::Value *CodeGenFunction::EmitARCRetain(QualType type, llvm::Value *value) {
1839  if (type->isBlockPointerType())
1840    return EmitARCRetainBlock(value, /*mandatory*/ false);
1841  else
1842    return EmitARCRetainNonBlock(value);
1843}
1844
1845/// Retain the given object, with normal retain semantics.
1846///   call i8* \@objc_retain(i8* %value)
1847llvm::Value *CodeGenFunction::EmitARCRetainNonBlock(llvm::Value *value) {
1848  return emitARCValueOperation(*this, value,
1849                               CGM.getARCEntrypoints().objc_retain,
1850                               "objc_retain");
1851}
1852
1853/// Retain the given block, with _Block_copy semantics.
1854///   call i8* \@objc_retainBlock(i8* %value)
1855///
1856/// \param mandatory - If false, emit the call with metadata
1857/// indicating that it's okay for the optimizer to eliminate this call
1858/// if it can prove that the block never escapes except down the stack.
1859llvm::Value *CodeGenFunction::EmitARCRetainBlock(llvm::Value *value,
1860                                                 bool mandatory) {
1861  llvm::Value *result
1862    = emitARCValueOperation(*this, value,
1863                            CGM.getARCEntrypoints().objc_retainBlock,
1864                            "objc_retainBlock");
1865
1866  // If the copy isn't mandatory, add !clang.arc.copy_on_escape to
1867  // tell the optimizer that it doesn't need to do this copy if the
1868  // block doesn't escape, where being passed as an argument doesn't
1869  // count as escaping.
1870  if (!mandatory && isa<llvm::Instruction>(result)) {
1871    llvm::CallInst *call
1872      = cast<llvm::CallInst>(result->stripPointerCasts());
1873    assert(call->getCalledValue() == CGM.getARCEntrypoints().objc_retainBlock);
1874
1875    SmallVector<llvm::Value*,1> args;
1876    call->setMetadata("clang.arc.copy_on_escape",
1877                      llvm::MDNode::get(Builder.getContext(), args));
1878  }
1879
1880  return result;
1881}
1882
1883/// Retain the given object which is the result of a function call.
1884///   call i8* \@objc_retainAutoreleasedReturnValue(i8* %value)
1885///
1886/// Yes, this function name is one character away from a different
1887/// call with completely different semantics.
1888llvm::Value *
1889CodeGenFunction::EmitARCRetainAutoreleasedReturnValue(llvm::Value *value) {
1890  // Fetch the void(void) inline asm which marks that we're going to
1891  // retain the autoreleased return value.
1892  llvm::InlineAsm *&marker
1893    = CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker;
1894  if (!marker) {
1895    StringRef assembly
1896      = CGM.getTargetCodeGenInfo()
1897           .getARCRetainAutoreleasedReturnValueMarker();
1898
1899    // If we have an empty assembly string, there's nothing to do.
1900    if (assembly.empty()) {
1901
1902    // Otherwise, at -O0, build an inline asm that we're going to call
1903    // in a moment.
1904    } else if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
1905      llvm::FunctionType *type =
1906        llvm::FunctionType::get(VoidTy, /*variadic*/false);
1907
1908      marker = llvm::InlineAsm::get(type, assembly, "", /*sideeffects*/ true);
1909
1910    // If we're at -O1 and above, we don't want to litter the code
1911    // with this marker yet, so leave a breadcrumb for the ARC
1912    // optimizer to pick up.
1913    } else {
1914      llvm::NamedMDNode *metadata =
1915        CGM.getModule().getOrInsertNamedMetadata(
1916                            "clang.arc.retainAutoreleasedReturnValueMarker");
1917      assert(metadata->getNumOperands() <= 1);
1918      if (metadata->getNumOperands() == 0) {
1919        llvm::Value *string = llvm::MDString::get(getLLVMContext(), assembly);
1920        metadata->addOperand(llvm::MDNode::get(getLLVMContext(), string));
1921      }
1922    }
1923  }
1924
1925  // Call the marker asm if we made one, which we do only at -O0.
1926  if (marker) Builder.CreateCall(marker);
1927
1928  return emitARCValueOperation(*this, value,
1929                     CGM.getARCEntrypoints().objc_retainAutoreleasedReturnValue,
1930                               "objc_retainAutoreleasedReturnValue");
1931}
1932
1933/// Release the given object.
1934///   call void \@objc_release(i8* %value)
1935void CodeGenFunction::EmitARCRelease(llvm::Value *value, bool precise) {
1936  if (isa<llvm::ConstantPointerNull>(value)) return;
1937
1938  llvm::Constant *&fn = CGM.getARCEntrypoints().objc_release;
1939  if (!fn) {
1940    std::vector<llvm::Type*> args(1, Int8PtrTy);
1941    llvm::FunctionType *fnType =
1942      llvm::FunctionType::get(Builder.getVoidTy(), args, false);
1943    fn = createARCRuntimeFunction(CGM, fnType, "objc_release");
1944  }
1945
1946  // Cast the argument to 'id'.
1947  value = Builder.CreateBitCast(value, Int8PtrTy);
1948
1949  // Call objc_release.
1950  llvm::CallInst *call = Builder.CreateCall(fn, value);
1951  call->setDoesNotThrow();
1952
1953  if (!precise) {
1954    SmallVector<llvm::Value*,1> args;
1955    call->setMetadata("clang.imprecise_release",
1956                      llvm::MDNode::get(Builder.getContext(), args));
1957  }
1958}
1959
1960/// Destroy a __strong variable.
1961///
1962/// At -O0, emit a call to store 'null' into the address;
1963/// instrumenting tools prefer this because the address is exposed,
1964/// but it's relatively cumbersome to optimize.
1965///
1966/// At -O1 and above, just load and call objc_release.
1967///
1968///   call void \@objc_storeStrong(i8** %addr, i8* null)
1969void CodeGenFunction::EmitARCDestroyStrong(llvm::Value *addr, bool precise) {
1970  if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
1971    llvm::PointerType *addrTy = cast<llvm::PointerType>(addr->getType());
1972    llvm::Value *null = llvm::ConstantPointerNull::get(
1973                          cast<llvm::PointerType>(addrTy->getElementType()));
1974    EmitARCStoreStrongCall(addr, null, /*ignored*/ true);
1975    return;
1976  }
1977
1978  llvm::Value *value = Builder.CreateLoad(addr);
1979  EmitARCRelease(value, precise);
1980}
1981
1982/// Store into a strong object.  Always calls this:
1983///   call void \@objc_storeStrong(i8** %addr, i8* %value)
1984llvm::Value *CodeGenFunction::EmitARCStoreStrongCall(llvm::Value *addr,
1985                                                     llvm::Value *value,
1986                                                     bool ignored) {
1987  assert(cast<llvm::PointerType>(addr->getType())->getElementType()
1988           == value->getType());
1989
1990  llvm::Constant *&fn = CGM.getARCEntrypoints().objc_storeStrong;
1991  if (!fn) {
1992    llvm::Type *argTypes[] = { Int8PtrPtrTy, Int8PtrTy };
1993    llvm::FunctionType *fnType
1994      = llvm::FunctionType::get(Builder.getVoidTy(), argTypes, false);
1995    fn = createARCRuntimeFunction(CGM, fnType, "objc_storeStrong");
1996  }
1997
1998  addr = Builder.CreateBitCast(addr, Int8PtrPtrTy);
1999  llvm::Value *castValue = Builder.CreateBitCast(value, Int8PtrTy);
2000
2001  Builder.CreateCall2(fn, addr, castValue)->setDoesNotThrow();
2002
2003  if (ignored) return 0;
2004  return value;
2005}
2006
2007/// Store into a strong object.  Sometimes calls this:
2008///   call void \@objc_storeStrong(i8** %addr, i8* %value)
2009/// Other times, breaks it down into components.
2010llvm::Value *CodeGenFunction::EmitARCStoreStrong(LValue dst,
2011                                                 llvm::Value *newValue,
2012                                                 bool ignored) {
2013  QualType type = dst.getType();
2014  bool isBlock = type->isBlockPointerType();
2015
2016  // Use a store barrier at -O0 unless this is a block type or the
2017  // lvalue is inadequately aligned.
2018  if (shouldUseFusedARCCalls() &&
2019      !isBlock &&
2020      (dst.getAlignment().isZero() ||
2021       dst.getAlignment() >= CharUnits::fromQuantity(PointerAlignInBytes))) {
2022    return EmitARCStoreStrongCall(dst.getAddress(), newValue, ignored);
2023  }
2024
2025  // Otherwise, split it out.
2026
2027  // Retain the new value.
2028  newValue = EmitARCRetain(type, newValue);
2029
2030  // Read the old value.
2031  llvm::Value *oldValue = EmitLoadOfScalar(dst);
2032
2033  // Store.  We do this before the release so that any deallocs won't
2034  // see the old value.
2035  EmitStoreOfScalar(newValue, dst);
2036
2037  // Finally, release the old value.
2038  EmitARCRelease(oldValue, /*precise*/ false);
2039
2040  return newValue;
2041}
2042
2043/// Autorelease the given object.
2044///   call i8* \@objc_autorelease(i8* %value)
2045llvm::Value *CodeGenFunction::EmitARCAutorelease(llvm::Value *value) {
2046  return emitARCValueOperation(*this, value,
2047                               CGM.getARCEntrypoints().objc_autorelease,
2048                               "objc_autorelease");
2049}
2050
2051/// Autorelease the given object.
2052///   call i8* \@objc_autoreleaseReturnValue(i8* %value)
2053llvm::Value *
2054CodeGenFunction::EmitARCAutoreleaseReturnValue(llvm::Value *value) {
2055  return emitARCValueOperation(*this, value,
2056                            CGM.getARCEntrypoints().objc_autoreleaseReturnValue,
2057                               "objc_autoreleaseReturnValue");
2058}
2059
2060/// Do a fused retain/autorelease of the given object.
2061///   call i8* \@objc_retainAutoreleaseReturnValue(i8* %value)
2062llvm::Value *
2063CodeGenFunction::EmitARCRetainAutoreleaseReturnValue(llvm::Value *value) {
2064  return emitARCValueOperation(*this, value,
2065                     CGM.getARCEntrypoints().objc_retainAutoreleaseReturnValue,
2066                               "objc_retainAutoreleaseReturnValue");
2067}
2068
2069/// Do a fused retain/autorelease of the given object.
2070///   call i8* \@objc_retainAutorelease(i8* %value)
2071/// or
2072///   %retain = call i8* \@objc_retainBlock(i8* %value)
2073///   call i8* \@objc_autorelease(i8* %retain)
2074llvm::Value *CodeGenFunction::EmitARCRetainAutorelease(QualType type,
2075                                                       llvm::Value *value) {
2076  if (!type->isBlockPointerType())
2077    return EmitARCRetainAutoreleaseNonBlock(value);
2078
2079  if (isa<llvm::ConstantPointerNull>(value)) return value;
2080
2081  llvm::Type *origType = value->getType();
2082  value = Builder.CreateBitCast(value, Int8PtrTy);
2083  value = EmitARCRetainBlock(value, /*mandatory*/ true);
2084  value = EmitARCAutorelease(value);
2085  return Builder.CreateBitCast(value, origType);
2086}
2087
2088/// Do a fused retain/autorelease of the given object.
2089///   call i8* \@objc_retainAutorelease(i8* %value)
2090llvm::Value *
2091CodeGenFunction::EmitARCRetainAutoreleaseNonBlock(llvm::Value *value) {
2092  return emitARCValueOperation(*this, value,
2093                               CGM.getARCEntrypoints().objc_retainAutorelease,
2094                               "objc_retainAutorelease");
2095}
2096
2097/// i8* \@objc_loadWeak(i8** %addr)
2098/// Essentially objc_autorelease(objc_loadWeakRetained(addr)).
2099llvm::Value *CodeGenFunction::EmitARCLoadWeak(llvm::Value *addr) {
2100  return emitARCLoadOperation(*this, addr,
2101                              CGM.getARCEntrypoints().objc_loadWeak,
2102                              "objc_loadWeak");
2103}
2104
2105/// i8* \@objc_loadWeakRetained(i8** %addr)
2106llvm::Value *CodeGenFunction::EmitARCLoadWeakRetained(llvm::Value *addr) {
2107  return emitARCLoadOperation(*this, addr,
2108                              CGM.getARCEntrypoints().objc_loadWeakRetained,
2109                              "objc_loadWeakRetained");
2110}
2111
2112/// i8* \@objc_storeWeak(i8** %addr, i8* %value)
2113/// Returns %value.
2114llvm::Value *CodeGenFunction::EmitARCStoreWeak(llvm::Value *addr,
2115                                               llvm::Value *value,
2116                                               bool ignored) {
2117  return emitARCStoreOperation(*this, addr, value,
2118                               CGM.getARCEntrypoints().objc_storeWeak,
2119                               "objc_storeWeak", ignored);
2120}
2121
2122/// i8* \@objc_initWeak(i8** %addr, i8* %value)
2123/// Returns %value.  %addr is known to not have a current weak entry.
2124/// Essentially equivalent to:
2125///   *addr = nil; objc_storeWeak(addr, value);
2126void CodeGenFunction::EmitARCInitWeak(llvm::Value *addr, llvm::Value *value) {
2127  // If we're initializing to null, just write null to memory; no need
2128  // to get the runtime involved.  But don't do this if optimization
2129  // is enabled, because accounting for this would make the optimizer
2130  // much more complicated.
2131  if (isa<llvm::ConstantPointerNull>(value) &&
2132      CGM.getCodeGenOpts().OptimizationLevel == 0) {
2133    Builder.CreateStore(value, addr);
2134    return;
2135  }
2136
2137  emitARCStoreOperation(*this, addr, value,
2138                        CGM.getARCEntrypoints().objc_initWeak,
2139                        "objc_initWeak", /*ignored*/ true);
2140}
2141
2142/// void \@objc_destroyWeak(i8** %addr)
2143/// Essentially objc_storeWeak(addr, nil).
2144void CodeGenFunction::EmitARCDestroyWeak(llvm::Value *addr) {
2145  llvm::Constant *&fn = CGM.getARCEntrypoints().objc_destroyWeak;
2146  if (!fn) {
2147    std::vector<llvm::Type*> args(1, Int8PtrPtrTy);
2148    llvm::FunctionType *fnType =
2149      llvm::FunctionType::get(Builder.getVoidTy(), args, false);
2150    fn = createARCRuntimeFunction(CGM, fnType, "objc_destroyWeak");
2151  }
2152
2153  // Cast the argument to 'id*'.
2154  addr = Builder.CreateBitCast(addr, Int8PtrPtrTy);
2155
2156  llvm::CallInst *call = Builder.CreateCall(fn, addr);
2157  call->setDoesNotThrow();
2158}
2159
2160/// void \@objc_moveWeak(i8** %dest, i8** %src)
2161/// Disregards the current value in %dest.  Leaves %src pointing to nothing.
2162/// Essentially (objc_copyWeak(dest, src), objc_destroyWeak(src)).
2163void CodeGenFunction::EmitARCMoveWeak(llvm::Value *dst, llvm::Value *src) {
2164  emitARCCopyOperation(*this, dst, src,
2165                       CGM.getARCEntrypoints().objc_moveWeak,
2166                       "objc_moveWeak");
2167}
2168
2169/// void \@objc_copyWeak(i8** %dest, i8** %src)
2170/// Disregards the current value in %dest.  Essentially
2171///   objc_release(objc_initWeak(dest, objc_readWeakRetained(src)))
2172void CodeGenFunction::EmitARCCopyWeak(llvm::Value *dst, llvm::Value *src) {
2173  emitARCCopyOperation(*this, dst, src,
2174                       CGM.getARCEntrypoints().objc_copyWeak,
2175                       "objc_copyWeak");
2176}
2177
2178/// Produce the code to do a objc_autoreleasepool_push.
2179///   call i8* \@objc_autoreleasePoolPush(void)
2180llvm::Value *CodeGenFunction::EmitObjCAutoreleasePoolPush() {
2181  llvm::Constant *&fn = CGM.getRREntrypoints().objc_autoreleasePoolPush;
2182  if (!fn) {
2183    llvm::FunctionType *fnType =
2184      llvm::FunctionType::get(Int8PtrTy, false);
2185    fn = createARCRuntimeFunction(CGM, fnType, "objc_autoreleasePoolPush");
2186  }
2187
2188  llvm::CallInst *call = Builder.CreateCall(fn);
2189  call->setDoesNotThrow();
2190
2191  return call;
2192}
2193
2194/// Produce the code to do a primitive release.
2195///   call void \@objc_autoreleasePoolPop(i8* %ptr)
2196void CodeGenFunction::EmitObjCAutoreleasePoolPop(llvm::Value *value) {
2197  assert(value->getType() == Int8PtrTy);
2198
2199  llvm::Constant *&fn = CGM.getRREntrypoints().objc_autoreleasePoolPop;
2200  if (!fn) {
2201    std::vector<llvm::Type*> args(1, Int8PtrTy);
2202    llvm::FunctionType *fnType =
2203      llvm::FunctionType::get(Builder.getVoidTy(), args, false);
2204
2205    // We don't want to use a weak import here; instead we should not
2206    // fall into this path.
2207    fn = createARCRuntimeFunction(CGM, fnType, "objc_autoreleasePoolPop");
2208  }
2209
2210  llvm::CallInst *call = Builder.CreateCall(fn, value);
2211  call->setDoesNotThrow();
2212}
2213
2214/// Produce the code to do an MRR version objc_autoreleasepool_push.
2215/// Which is: [[NSAutoreleasePool alloc] init];
2216/// Where alloc is declared as: + (id) alloc; in NSAutoreleasePool class.
2217/// init is declared as: - (id) init; in its NSObject super class.
2218///
2219llvm::Value *CodeGenFunction::EmitObjCMRRAutoreleasePoolPush() {
2220  CGObjCRuntime &Runtime = CGM.getObjCRuntime();
2221  llvm::Value *Receiver = Runtime.EmitNSAutoreleasePoolClassRef(Builder);
2222  // [NSAutoreleasePool alloc]
2223  IdentifierInfo *II = &CGM.getContext().Idents.get("alloc");
2224  Selector AllocSel = getContext().Selectors.getSelector(0, &II);
2225  CallArgList Args;
2226  RValue AllocRV =
2227    Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
2228                                getContext().getObjCIdType(),
2229                                AllocSel, Receiver, Args);
2230
2231  // [Receiver init]
2232  Receiver = AllocRV.getScalarVal();
2233  II = &CGM.getContext().Idents.get("init");
2234  Selector InitSel = getContext().Selectors.getSelector(0, &II);
2235  RValue InitRV =
2236    Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
2237                                getContext().getObjCIdType(),
2238                                InitSel, Receiver, Args);
2239  return InitRV.getScalarVal();
2240}
2241
2242/// Produce the code to do a primitive release.
2243/// [tmp drain];
2244void CodeGenFunction::EmitObjCMRRAutoreleasePoolPop(llvm::Value *Arg) {
2245  IdentifierInfo *II = &CGM.getContext().Idents.get("drain");
2246  Selector DrainSel = getContext().Selectors.getSelector(0, &II);
2247  CallArgList Args;
2248  CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
2249                              getContext().VoidTy, DrainSel, Arg, Args);
2250}
2251
2252void CodeGenFunction::destroyARCStrongPrecise(CodeGenFunction &CGF,
2253                                              llvm::Value *addr,
2254                                              QualType type) {
2255  CGF.EmitARCDestroyStrong(addr, /*precise*/ true);
2256}
2257
2258void CodeGenFunction::destroyARCStrongImprecise(CodeGenFunction &CGF,
2259                                                llvm::Value *addr,
2260                                                QualType type) {
2261  CGF.EmitARCDestroyStrong(addr, /*precise*/ false);
2262}
2263
2264void CodeGenFunction::destroyARCWeak(CodeGenFunction &CGF,
2265                                     llvm::Value *addr,
2266                                     QualType type) {
2267  CGF.EmitARCDestroyWeak(addr);
2268}
2269
2270namespace {
2271  struct CallObjCAutoreleasePoolObject : EHScopeStack::Cleanup {
2272    llvm::Value *Token;
2273
2274    CallObjCAutoreleasePoolObject(llvm::Value *token) : Token(token) {}
2275
2276    void Emit(CodeGenFunction &CGF, Flags flags) {
2277      CGF.EmitObjCAutoreleasePoolPop(Token);
2278    }
2279  };
2280  struct CallObjCMRRAutoreleasePoolObject : EHScopeStack::Cleanup {
2281    llvm::Value *Token;
2282
2283    CallObjCMRRAutoreleasePoolObject(llvm::Value *token) : Token(token) {}
2284
2285    void Emit(CodeGenFunction &CGF, Flags flags) {
2286      CGF.EmitObjCMRRAutoreleasePoolPop(Token);
2287    }
2288  };
2289}
2290
2291void CodeGenFunction::EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr) {
2292  if (CGM.getLangOpts().ObjCAutoRefCount)
2293    EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(NormalCleanup, Ptr);
2294  else
2295    EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(NormalCleanup, Ptr);
2296}
2297
2298static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF,
2299                                                  LValue lvalue,
2300                                                  QualType type) {
2301  switch (type.getObjCLifetime()) {
2302  case Qualifiers::OCL_None:
2303  case Qualifiers::OCL_ExplicitNone:
2304  case Qualifiers::OCL_Strong:
2305  case Qualifiers::OCL_Autoreleasing:
2306    return TryEmitResult(CGF.EmitLoadOfLValue(lvalue).getScalarVal(),
2307                         false);
2308
2309  case Qualifiers::OCL_Weak:
2310    return TryEmitResult(CGF.EmitARCLoadWeakRetained(lvalue.getAddress()),
2311                         true);
2312  }
2313
2314  llvm_unreachable("impossible lifetime!");
2315}
2316
2317static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF,
2318                                                  const Expr *e) {
2319  e = e->IgnoreParens();
2320  QualType type = e->getType();
2321
2322  // If we're loading retained from a __strong xvalue, we can avoid
2323  // an extra retain/release pair by zeroing out the source of this
2324  // "move" operation.
2325  if (e->isXValue() &&
2326      !type.isConstQualified() &&
2327      type.getObjCLifetime() == Qualifiers::OCL_Strong) {
2328    // Emit the lvalue.
2329    LValue lv = CGF.EmitLValue(e);
2330
2331    // Load the object pointer.
2332    llvm::Value *result = CGF.EmitLoadOfLValue(lv).getScalarVal();
2333
2334    // Set the source pointer to NULL.
2335    CGF.EmitStoreOfScalar(getNullForVariable(lv.getAddress()), lv);
2336
2337    return TryEmitResult(result, true);
2338  }
2339
2340  // As a very special optimization, in ARC++, if the l-value is the
2341  // result of a non-volatile assignment, do a simple retain of the
2342  // result of the call to objc_storeWeak instead of reloading.
2343  if (CGF.getLangOpts().CPlusPlus &&
2344      !type.isVolatileQualified() &&
2345      type.getObjCLifetime() == Qualifiers::OCL_Weak &&
2346      isa<BinaryOperator>(e) &&
2347      cast<BinaryOperator>(e)->getOpcode() == BO_Assign)
2348    return TryEmitResult(CGF.EmitScalarExpr(e), false);
2349
2350  return tryEmitARCRetainLoadOfScalar(CGF, CGF.EmitLValue(e), type);
2351}
2352
2353static llvm::Value *emitARCRetainAfterCall(CodeGenFunction &CGF,
2354                                           llvm::Value *value);
2355
2356/// Given that the given expression is some sort of call (which does
2357/// not return retained), emit a retain following it.
2358static llvm::Value *emitARCRetainCall(CodeGenFunction &CGF, const Expr *e) {
2359  llvm::Value *value = CGF.EmitScalarExpr(e);
2360  return emitARCRetainAfterCall(CGF, value);
2361}
2362
2363static llvm::Value *emitARCRetainAfterCall(CodeGenFunction &CGF,
2364                                           llvm::Value *value) {
2365  if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(value)) {
2366    CGBuilderTy::InsertPoint ip = CGF.Builder.saveIP();
2367
2368    // Place the retain immediately following the call.
2369    CGF.Builder.SetInsertPoint(call->getParent(),
2370                               ++llvm::BasicBlock::iterator(call));
2371    value = CGF.EmitARCRetainAutoreleasedReturnValue(value);
2372
2373    CGF.Builder.restoreIP(ip);
2374    return value;
2375  } else if (llvm::InvokeInst *invoke = dyn_cast<llvm::InvokeInst>(value)) {
2376    CGBuilderTy::InsertPoint ip = CGF.Builder.saveIP();
2377
2378    // Place the retain at the beginning of the normal destination block.
2379    llvm::BasicBlock *BB = invoke->getNormalDest();
2380    CGF.Builder.SetInsertPoint(BB, BB->begin());
2381    value = CGF.EmitARCRetainAutoreleasedReturnValue(value);
2382
2383    CGF.Builder.restoreIP(ip);
2384    return value;
2385
2386  // Bitcasts can arise because of related-result returns.  Rewrite
2387  // the operand.
2388  } else if (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(value)) {
2389    llvm::Value *operand = bitcast->getOperand(0);
2390    operand = emitARCRetainAfterCall(CGF, operand);
2391    bitcast->setOperand(0, operand);
2392    return bitcast;
2393
2394  // Generic fall-back case.
2395  } else {
2396    // Retain using the non-block variant: we never need to do a copy
2397    // of a block that's been returned to us.
2398    return CGF.EmitARCRetainNonBlock(value);
2399  }
2400}
2401
2402/// Determine whether it might be important to emit a separate
2403/// objc_retain_block on the result of the given expression, or
2404/// whether it's okay to just emit it in a +1 context.
2405static bool shouldEmitSeparateBlockRetain(const Expr *e) {
2406  assert(e->getType()->isBlockPointerType());
2407  e = e->IgnoreParens();
2408
2409  // For future goodness, emit block expressions directly in +1
2410  // contexts if we can.
2411  if (isa<BlockExpr>(e))
2412    return false;
2413
2414  if (const CastExpr *cast = dyn_cast<CastExpr>(e)) {
2415    switch (cast->getCastKind()) {
2416    // Emitting these operations in +1 contexts is goodness.
2417    case CK_LValueToRValue:
2418    case CK_ARCReclaimReturnedObject:
2419    case CK_ARCConsumeObject:
2420    case CK_ARCProduceObject:
2421      return false;
2422
2423    // These operations preserve a block type.
2424    case CK_NoOp:
2425    case CK_BitCast:
2426      return shouldEmitSeparateBlockRetain(cast->getSubExpr());
2427
2428    // These operations are known to be bad (or haven't been considered).
2429    case CK_AnyPointerToBlockPointerCast:
2430    default:
2431      return true;
2432    }
2433  }
2434
2435  return true;
2436}
2437
2438/// Try to emit a PseudoObjectExpr at +1.
2439///
2440/// This massively duplicates emitPseudoObjectRValue.
2441static TryEmitResult tryEmitARCRetainPseudoObject(CodeGenFunction &CGF,
2442                                                  const PseudoObjectExpr *E) {
2443  llvm::SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques;
2444
2445  // Find the result expression.
2446  const Expr *resultExpr = E->getResultExpr();
2447  assert(resultExpr);
2448  TryEmitResult result;
2449
2450  for (PseudoObjectExpr::const_semantics_iterator
2451         i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) {
2452    const Expr *semantic = *i;
2453
2454    // If this semantic expression is an opaque value, bind it
2455    // to the result of its source expression.
2456    if (const OpaqueValueExpr *ov = dyn_cast<OpaqueValueExpr>(semantic)) {
2457      typedef CodeGenFunction::OpaqueValueMappingData OVMA;
2458      OVMA opaqueData;
2459
2460      // If this semantic is the result of the pseudo-object
2461      // expression, try to evaluate the source as +1.
2462      if (ov == resultExpr) {
2463        assert(!OVMA::shouldBindAsLValue(ov));
2464        result = tryEmitARCRetainScalarExpr(CGF, ov->getSourceExpr());
2465        opaqueData = OVMA::bind(CGF, ov, RValue::get(result.getPointer()));
2466
2467      // Otherwise, just bind it.
2468      } else {
2469        opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr());
2470      }
2471      opaques.push_back(opaqueData);
2472
2473    // Otherwise, if the expression is the result, evaluate it
2474    // and remember the result.
2475    } else if (semantic == resultExpr) {
2476      result = tryEmitARCRetainScalarExpr(CGF, semantic);
2477
2478    // Otherwise, evaluate the expression in an ignored context.
2479    } else {
2480      CGF.EmitIgnoredExpr(semantic);
2481    }
2482  }
2483
2484  // Unbind all the opaques now.
2485  for (unsigned i = 0, e = opaques.size(); i != e; ++i)
2486    opaques[i].unbind(CGF);
2487
2488  return result;
2489}
2490
2491static TryEmitResult
2492tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e) {
2493  // Look through cleanups.
2494  if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) {
2495    CGF.enterFullExpression(cleanups);
2496    CodeGenFunction::RunCleanupsScope scope(CGF);
2497    return tryEmitARCRetainScalarExpr(CGF, cleanups->getSubExpr());
2498  }
2499
2500  // The desired result type, if it differs from the type of the
2501  // ultimate opaque expression.
2502  llvm::Type *resultType = 0;
2503
2504  while (true) {
2505    e = e->IgnoreParens();
2506
2507    // There's a break at the end of this if-chain;  anything
2508    // that wants to keep looping has to explicitly continue.
2509    if (const CastExpr *ce = dyn_cast<CastExpr>(e)) {
2510      switch (ce->getCastKind()) {
2511      // No-op casts don't change the type, so we just ignore them.
2512      case CK_NoOp:
2513        e = ce->getSubExpr();
2514        continue;
2515
2516      case CK_LValueToRValue: {
2517        TryEmitResult loadResult
2518          = tryEmitARCRetainLoadOfScalar(CGF, ce->getSubExpr());
2519        if (resultType) {
2520          llvm::Value *value = loadResult.getPointer();
2521          value = CGF.Builder.CreateBitCast(value, resultType);
2522          loadResult.setPointer(value);
2523        }
2524        return loadResult;
2525      }
2526
2527      // These casts can change the type, so remember that and
2528      // soldier on.  We only need to remember the outermost such
2529      // cast, though.
2530      case CK_CPointerToObjCPointerCast:
2531      case CK_BlockPointerToObjCPointerCast:
2532      case CK_AnyPointerToBlockPointerCast:
2533      case CK_BitCast:
2534        if (!resultType)
2535          resultType = CGF.ConvertType(ce->getType());
2536        e = ce->getSubExpr();
2537        assert(e->getType()->hasPointerRepresentation());
2538        continue;
2539
2540      // For consumptions, just emit the subexpression and thus elide
2541      // the retain/release pair.
2542      case CK_ARCConsumeObject: {
2543        llvm::Value *result = CGF.EmitScalarExpr(ce->getSubExpr());
2544        if (resultType) result = CGF.Builder.CreateBitCast(result, resultType);
2545        return TryEmitResult(result, true);
2546      }
2547
2548      // Block extends are net +0.  Naively, we could just recurse on
2549      // the subexpression, but actually we need to ensure that the
2550      // value is copied as a block, so there's a little filter here.
2551      case CK_ARCExtendBlockObject: {
2552        llvm::Value *result; // will be a +0 value
2553
2554        // If we can't safely assume the sub-expression will produce a
2555        // block-copied value, emit the sub-expression at +0.
2556        if (shouldEmitSeparateBlockRetain(ce->getSubExpr())) {
2557          result = CGF.EmitScalarExpr(ce->getSubExpr());
2558
2559        // Otherwise, try to emit the sub-expression at +1 recursively.
2560        } else {
2561          TryEmitResult subresult
2562            = tryEmitARCRetainScalarExpr(CGF, ce->getSubExpr());
2563          result = subresult.getPointer();
2564
2565          // If that produced a retained value, just use that,
2566          // possibly casting down.
2567          if (subresult.getInt()) {
2568            if (resultType)
2569              result = CGF.Builder.CreateBitCast(result, resultType);
2570            return TryEmitResult(result, true);
2571          }
2572
2573          // Otherwise it's +0.
2574        }
2575
2576        // Retain the object as a block, then cast down.
2577        result = CGF.EmitARCRetainBlock(result, /*mandatory*/ true);
2578        if (resultType) result = CGF.Builder.CreateBitCast(result, resultType);
2579        return TryEmitResult(result, true);
2580      }
2581
2582      // For reclaims, emit the subexpression as a retained call and
2583      // skip the consumption.
2584      case CK_ARCReclaimReturnedObject: {
2585        llvm::Value *result = emitARCRetainCall(CGF, ce->getSubExpr());
2586        if (resultType) result = CGF.Builder.CreateBitCast(result, resultType);
2587        return TryEmitResult(result, true);
2588      }
2589
2590      default:
2591        break;
2592      }
2593
2594    // Skip __extension__.
2595    } else if (const UnaryOperator *op = dyn_cast<UnaryOperator>(e)) {
2596      if (op->getOpcode() == UO_Extension) {
2597        e = op->getSubExpr();
2598        continue;
2599      }
2600
2601    // For calls and message sends, use the retained-call logic.
2602    // Delegate inits are a special case in that they're the only
2603    // returns-retained expression that *isn't* surrounded by
2604    // a consume.
2605    } else if (isa<CallExpr>(e) ||
2606               (isa<ObjCMessageExpr>(e) &&
2607                !cast<ObjCMessageExpr>(e)->isDelegateInitCall())) {
2608      llvm::Value *result = emitARCRetainCall(CGF, e);
2609      if (resultType) result = CGF.Builder.CreateBitCast(result, resultType);
2610      return TryEmitResult(result, true);
2611
2612    // Look through pseudo-object expressions.
2613    } else if (const PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) {
2614      TryEmitResult result
2615        = tryEmitARCRetainPseudoObject(CGF, pseudo);
2616      if (resultType) {
2617        llvm::Value *value = result.getPointer();
2618        value = CGF.Builder.CreateBitCast(value, resultType);
2619        result.setPointer(value);
2620      }
2621      return result;
2622    }
2623
2624    // Conservatively halt the search at any other expression kind.
2625    break;
2626  }
2627
2628  // We didn't find an obvious production, so emit what we've got and
2629  // tell the caller that we didn't manage to retain.
2630  llvm::Value *result = CGF.EmitScalarExpr(e);
2631  if (resultType) result = CGF.Builder.CreateBitCast(result, resultType);
2632  return TryEmitResult(result, false);
2633}
2634
2635static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF,
2636                                                LValue lvalue,
2637                                                QualType type) {
2638  TryEmitResult result = tryEmitARCRetainLoadOfScalar(CGF, lvalue, type);
2639  llvm::Value *value = result.getPointer();
2640  if (!result.getInt())
2641    value = CGF.EmitARCRetain(type, value);
2642  return value;
2643}
2644
2645/// EmitARCRetainScalarExpr - Semantically equivalent to
2646/// EmitARCRetainObject(e->getType(), EmitScalarExpr(e)), but making a
2647/// best-effort attempt to peephole expressions that naturally produce
2648/// retained objects.
2649llvm::Value *CodeGenFunction::EmitARCRetainScalarExpr(const Expr *e) {
2650  TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e);
2651  llvm::Value *value = result.getPointer();
2652  if (!result.getInt())
2653    value = EmitARCRetain(e->getType(), value);
2654  return value;
2655}
2656
2657llvm::Value *
2658CodeGenFunction::EmitARCRetainAutoreleaseScalarExpr(const Expr *e) {
2659  TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e);
2660  llvm::Value *value = result.getPointer();
2661  if (result.getInt())
2662    value = EmitARCAutorelease(value);
2663  else
2664    value = EmitARCRetainAutorelease(e->getType(), value);
2665  return value;
2666}
2667
2668llvm::Value *CodeGenFunction::EmitARCExtendBlockObject(const Expr *e) {
2669  llvm::Value *result;
2670  bool doRetain;
2671
2672  if (shouldEmitSeparateBlockRetain(e)) {
2673    result = EmitScalarExpr(e);
2674    doRetain = true;
2675  } else {
2676    TryEmitResult subresult = tryEmitARCRetainScalarExpr(*this, e);
2677    result = subresult.getPointer();
2678    doRetain = !subresult.getInt();
2679  }
2680
2681  if (doRetain)
2682    result = EmitARCRetainBlock(result, /*mandatory*/ true);
2683  return EmitObjCConsumeObject(e->getType(), result);
2684}
2685
2686llvm::Value *CodeGenFunction::EmitObjCThrowOperand(const Expr *expr) {
2687  // In ARC, retain and autorelease the expression.
2688  if (getLangOpts().ObjCAutoRefCount) {
2689    // Do so before running any cleanups for the full-expression.
2690    // tryEmitARCRetainScalarExpr does make an effort to do things
2691    // inside cleanups, but there are crazy cases like
2692    //   @throw A().foo;
2693    // where a full retain+autorelease is required and would
2694    // otherwise happen after the destructor for the temporary.
2695    if (const ExprWithCleanups *ewc = dyn_cast<ExprWithCleanups>(expr)) {
2696      enterFullExpression(ewc);
2697      expr = ewc->getSubExpr();
2698    }
2699
2700    CodeGenFunction::RunCleanupsScope cleanups(*this);
2701    return EmitARCRetainAutoreleaseScalarExpr(expr);
2702  }
2703
2704  // Otherwise, use the normal scalar-expression emission.  The
2705  // exception machinery doesn't do anything special with the
2706  // exception like retaining it, so there's no safety associated with
2707  // only running cleanups after the throw has started, and when it
2708  // matters it tends to be substantially inferior code.
2709  return EmitScalarExpr(expr);
2710}
2711
2712std::pair<LValue,llvm::Value*>
2713CodeGenFunction::EmitARCStoreStrong(const BinaryOperator *e,
2714                                    bool ignored) {
2715  // Evaluate the RHS first.
2716  TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e->getRHS());
2717  llvm::Value *value = result.getPointer();
2718
2719  bool hasImmediateRetain = result.getInt();
2720
2721  // If we didn't emit a retained object, and the l-value is of block
2722  // type, then we need to emit the block-retain immediately in case
2723  // it invalidates the l-value.
2724  if (!hasImmediateRetain && e->getType()->isBlockPointerType()) {
2725    value = EmitARCRetainBlock(value, /*mandatory*/ false);
2726    hasImmediateRetain = true;
2727  }
2728
2729  LValue lvalue = EmitLValue(e->getLHS());
2730
2731  // If the RHS was emitted retained, expand this.
2732  if (hasImmediateRetain) {
2733    llvm::Value *oldValue =
2734      EmitLoadOfScalar(lvalue);
2735    EmitStoreOfScalar(value, lvalue);
2736    EmitARCRelease(oldValue, /*precise*/ false);
2737  } else {
2738    value = EmitARCStoreStrong(lvalue, value, ignored);
2739  }
2740
2741  return std::pair<LValue,llvm::Value*>(lvalue, value);
2742}
2743
2744std::pair<LValue,llvm::Value*>
2745CodeGenFunction::EmitARCStoreAutoreleasing(const BinaryOperator *e) {
2746  llvm::Value *value = EmitARCRetainAutoreleaseScalarExpr(e->getRHS());
2747  LValue lvalue = EmitLValue(e->getLHS());
2748
2749  EmitStoreOfScalar(value, lvalue);
2750
2751  return std::pair<LValue,llvm::Value*>(lvalue, value);
2752}
2753
2754void CodeGenFunction::EmitObjCAutoreleasePoolStmt(
2755                                          const ObjCAutoreleasePoolStmt &ARPS) {
2756  const Stmt *subStmt = ARPS.getSubStmt();
2757  const CompoundStmt &S = cast<CompoundStmt>(*subStmt);
2758
2759  CGDebugInfo *DI = getDebugInfo();
2760  if (DI)
2761    DI->EmitLexicalBlockStart(Builder, S.getLBracLoc());
2762
2763  // Keep track of the current cleanup stack depth.
2764  RunCleanupsScope Scope(*this);
2765  if (CGM.getLangOpts().ObjCRuntime.hasNativeARC()) {
2766    llvm::Value *token = EmitObjCAutoreleasePoolPush();
2767    EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(NormalCleanup, token);
2768  } else {
2769    llvm::Value *token = EmitObjCMRRAutoreleasePoolPush();
2770    EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(NormalCleanup, token);
2771  }
2772
2773  for (CompoundStmt::const_body_iterator I = S.body_begin(),
2774       E = S.body_end(); I != E; ++I)
2775    EmitStmt(*I);
2776
2777  if (DI)
2778    DI->EmitLexicalBlockEnd(Builder, S.getRBracLoc());
2779}
2780
2781/// EmitExtendGCLifetime - Given a pointer to an Objective-C object,
2782/// make sure it survives garbage collection until this point.
2783void CodeGenFunction::EmitExtendGCLifetime(llvm::Value *object) {
2784  // We just use an inline assembly.
2785  llvm::FunctionType *extenderType
2786    = llvm::FunctionType::get(VoidTy, VoidPtrTy, RequiredArgs::All);
2787  llvm::Value *extender
2788    = llvm::InlineAsm::get(extenderType,
2789                           /* assembly */ "",
2790                           /* constraints */ "r",
2791                           /* side effects */ true);
2792
2793  object = Builder.CreateBitCast(object, VoidPtrTy);
2794  Builder.CreateCall(extender, object)->setDoesNotThrow();
2795}
2796
2797static bool hasAtomicCopyHelperAPI(const ObjCRuntime &runtime) {
2798  // For now, only NeXT has these APIs.
2799  return runtime.isNeXTFamily();
2800}
2801
2802/// GenerateObjCAtomicSetterCopyHelperFunction - Given a c++ object type with
2803/// non-trivial copy assignment function, produce following helper function.
2804/// static void copyHelper(Ty *dest, const Ty *source) { *dest = *source; }
2805///
2806llvm::Constant *
2807CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction(
2808                                        const ObjCPropertyImplDecl *PID) {
2809  // FIXME. This api is for NeXt runtime only for now.
2810  if (!getLangOpts().CPlusPlus ||
2811      !hasAtomicCopyHelperAPI(getLangOpts().ObjCRuntime))
2812    return 0;
2813  QualType Ty = PID->getPropertyIvarDecl()->getType();
2814  if (!Ty->isRecordType())
2815    return 0;
2816  const ObjCPropertyDecl *PD = PID->getPropertyDecl();
2817  if ((!(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_atomic)))
2818    return 0;
2819  llvm::Constant * HelperFn = 0;
2820  if (hasTrivialSetExpr(PID))
2821    return 0;
2822  assert(PID->getSetterCXXAssignment() && "SetterCXXAssignment - null");
2823  if ((HelperFn = CGM.getAtomicSetterHelperFnMap(Ty)))
2824    return HelperFn;
2825
2826  ASTContext &C = getContext();
2827  IdentifierInfo *II
2828    = &CGM.getContext().Idents.get("__assign_helper_atomic_property_");
2829  FunctionDecl *FD = FunctionDecl::Create(C,
2830                                          C.getTranslationUnitDecl(),
2831                                          SourceLocation(),
2832                                          SourceLocation(), II, C.VoidTy, 0,
2833                                          SC_Static,
2834                                          SC_None,
2835                                          false,
2836                                          false);
2837
2838  QualType DestTy = C.getPointerType(Ty);
2839  QualType SrcTy = Ty;
2840  SrcTy.addConst();
2841  SrcTy = C.getPointerType(SrcTy);
2842
2843  FunctionArgList args;
2844  ImplicitParamDecl dstDecl(FD, SourceLocation(), 0, DestTy);
2845  args.push_back(&dstDecl);
2846  ImplicitParamDecl srcDecl(FD, SourceLocation(), 0, SrcTy);
2847  args.push_back(&srcDecl);
2848
2849  const CGFunctionInfo &FI =
2850    CGM.getTypes().arrangeFunctionDeclaration(C.VoidTy, args,
2851                                              FunctionType::ExtInfo(),
2852                                              RequiredArgs::All);
2853
2854  llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI);
2855
2856  llvm::Function *Fn =
2857    llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
2858                           "__assign_helper_atomic_property_",
2859                           &CGM.getModule());
2860
2861  // Initialize debug info if needed.
2862  maybeInitializeDebugInfo();
2863
2864  StartFunction(FD, C.VoidTy, Fn, FI, args, SourceLocation());
2865
2866  DeclRefExpr DstExpr(&dstDecl, false, DestTy,
2867                      VK_RValue, SourceLocation());
2868  UnaryOperator DST(&DstExpr, UO_Deref, DestTy->getPointeeType(),
2869                    VK_LValue, OK_Ordinary, SourceLocation());
2870
2871  DeclRefExpr SrcExpr(&srcDecl, false, SrcTy,
2872                      VK_RValue, SourceLocation());
2873  UnaryOperator SRC(&SrcExpr, UO_Deref, SrcTy->getPointeeType(),
2874                    VK_LValue, OK_Ordinary, SourceLocation());
2875
2876  Expr *Args[2] = { &DST, &SRC };
2877  CallExpr *CalleeExp = cast<CallExpr>(PID->getSetterCXXAssignment());
2878  CXXOperatorCallExpr TheCall(C, OO_Equal, CalleeExp->getCallee(),
2879                              Args, DestTy->getPointeeType(),
2880                              VK_LValue, SourceLocation(), false);
2881
2882  EmitStmt(&TheCall);
2883
2884  FinishFunction();
2885  HelperFn = llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy);
2886  CGM.setAtomicSetterHelperFnMap(Ty, HelperFn);
2887  return HelperFn;
2888}
2889
2890llvm::Constant *
2891CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction(
2892                                            const ObjCPropertyImplDecl *PID) {
2893  // FIXME. This api is for NeXt runtime only for now.
2894  if (!getLangOpts().CPlusPlus ||
2895      !hasAtomicCopyHelperAPI(getLangOpts().ObjCRuntime))
2896    return 0;
2897  const ObjCPropertyDecl *PD = PID->getPropertyDecl();
2898  QualType Ty = PD->getType();
2899  if (!Ty->isRecordType())
2900    return 0;
2901  if ((!(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_atomic)))
2902    return 0;
2903  llvm::Constant * HelperFn = 0;
2904
2905  if (hasTrivialGetExpr(PID))
2906    return 0;
2907  assert(PID->getGetterCXXConstructor() && "getGetterCXXConstructor - null");
2908  if ((HelperFn = CGM.getAtomicGetterHelperFnMap(Ty)))
2909    return HelperFn;
2910
2911
2912  ASTContext &C = getContext();
2913  IdentifierInfo *II
2914  = &CGM.getContext().Idents.get("__copy_helper_atomic_property_");
2915  FunctionDecl *FD = FunctionDecl::Create(C,
2916                                          C.getTranslationUnitDecl(),
2917                                          SourceLocation(),
2918                                          SourceLocation(), II, C.VoidTy, 0,
2919                                          SC_Static,
2920                                          SC_None,
2921                                          false,
2922                                          false);
2923
2924  QualType DestTy = C.getPointerType(Ty);
2925  QualType SrcTy = Ty;
2926  SrcTy.addConst();
2927  SrcTy = C.getPointerType(SrcTy);
2928
2929  FunctionArgList args;
2930  ImplicitParamDecl dstDecl(FD, SourceLocation(), 0, DestTy);
2931  args.push_back(&dstDecl);
2932  ImplicitParamDecl srcDecl(FD, SourceLocation(), 0, SrcTy);
2933  args.push_back(&srcDecl);
2934
2935  const CGFunctionInfo &FI =
2936  CGM.getTypes().arrangeFunctionDeclaration(C.VoidTy, args,
2937                                            FunctionType::ExtInfo(),
2938                                            RequiredArgs::All);
2939
2940  llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI);
2941
2942  llvm::Function *Fn =
2943  llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
2944                         "__copy_helper_atomic_property_", &CGM.getModule());
2945
2946  // Initialize debug info if needed.
2947  maybeInitializeDebugInfo();
2948
2949  StartFunction(FD, C.VoidTy, Fn, FI, args, SourceLocation());
2950
2951  DeclRefExpr SrcExpr(&srcDecl, false, SrcTy,
2952                      VK_RValue, SourceLocation());
2953
2954  UnaryOperator SRC(&SrcExpr, UO_Deref, SrcTy->getPointeeType(),
2955                    VK_LValue, OK_Ordinary, SourceLocation());
2956
2957  CXXConstructExpr *CXXConstExpr =
2958    cast<CXXConstructExpr>(PID->getGetterCXXConstructor());
2959
2960  SmallVector<Expr*, 4> ConstructorArgs;
2961  ConstructorArgs.push_back(&SRC);
2962  CXXConstructExpr::arg_iterator A = CXXConstExpr->arg_begin();
2963  ++A;
2964
2965  for (CXXConstructExpr::arg_iterator AEnd = CXXConstExpr->arg_end();
2966       A != AEnd; ++A)
2967    ConstructorArgs.push_back(*A);
2968
2969  CXXConstructExpr *TheCXXConstructExpr =
2970    CXXConstructExpr::Create(C, Ty, SourceLocation(),
2971                             CXXConstExpr->getConstructor(),
2972                             CXXConstExpr->isElidable(),
2973                             ConstructorArgs,
2974                             CXXConstExpr->hadMultipleCandidates(),
2975                             CXXConstExpr->isListInitialization(),
2976                             CXXConstExpr->requiresZeroInitialization(),
2977                             CXXConstExpr->getConstructionKind(),
2978                             SourceRange());
2979
2980  DeclRefExpr DstExpr(&dstDecl, false, DestTy,
2981                      VK_RValue, SourceLocation());
2982
2983  RValue DV = EmitAnyExpr(&DstExpr);
2984  CharUnits Alignment
2985    = getContext().getTypeAlignInChars(TheCXXConstructExpr->getType());
2986  EmitAggExpr(TheCXXConstructExpr,
2987              AggValueSlot::forAddr(DV.getScalarVal(), Alignment, Qualifiers(),
2988                                    AggValueSlot::IsDestructed,
2989                                    AggValueSlot::DoesNotNeedGCBarriers,
2990                                    AggValueSlot::IsNotAliased));
2991
2992  FinishFunction();
2993  HelperFn = llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy);
2994  CGM.setAtomicGetterHelperFnMap(Ty, HelperFn);
2995  return HelperFn;
2996}
2997
2998llvm::Value *
2999CodeGenFunction::EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty) {
3000  // Get selectors for retain/autorelease.
3001  IdentifierInfo *CopyID = &getContext().Idents.get("copy");
3002  Selector CopySelector =
3003      getContext().Selectors.getNullarySelector(CopyID);
3004  IdentifierInfo *AutoreleaseID = &getContext().Idents.get("autorelease");
3005  Selector AutoreleaseSelector =
3006      getContext().Selectors.getNullarySelector(AutoreleaseID);
3007
3008  // Emit calls to retain/autorelease.
3009  CGObjCRuntime &Runtime = CGM.getObjCRuntime();
3010  llvm::Value *Val = Block;
3011  RValue Result;
3012  Result = Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
3013                                       Ty, CopySelector,
3014                                       Val, CallArgList(), 0, 0);
3015  Val = Result.getScalarVal();
3016  Result = Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
3017                                       Ty, AutoreleaseSelector,
3018                                       Val, CallArgList(), 0, 0);
3019  Val = Result.getScalarVal();
3020  return Val;
3021}
3022
3023
3024CGObjCRuntime::~CGObjCRuntime() {}
3025