CGBlocks.cpp revision 198092
1//===--- CGBlocks.cpp - Emit LLVM Code for declarations -------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This contains code to emit blocks. 11// 12//===----------------------------------------------------------------------===// 13 14#include "CGDebugInfo.h" 15#include "CodeGenFunction.h" 16#include "CodeGenModule.h" 17#include "clang/AST/DeclObjC.h" 18#include "llvm/Module.h" 19#include "llvm/Target/TargetData.h" 20#include <algorithm> 21#include <cstdio> 22 23using namespace clang; 24using namespace CodeGen; 25 26llvm::Constant *CodeGenFunction:: 27BuildDescriptorBlockDecl(bool BlockHasCopyDispose, uint64_t Size, 28 const llvm::StructType* Ty, 29 std::vector<HelperInfo> *NoteForHelper) { 30 const llvm::Type *UnsignedLongTy 31 = CGM.getTypes().ConvertType(getContext().UnsignedLongTy); 32 llvm::Constant *C; 33 std::vector<llvm::Constant*> Elts; 34 35 // reserved 36 C = llvm::ConstantInt::get(UnsignedLongTy, 0); 37 Elts.push_back(C); 38 39 // Size 40 // FIXME: What is the right way to say this doesn't fit? We should give 41 // a user diagnostic in that case. Better fix would be to change the 42 // API to size_t. 43 C = llvm::ConstantInt::get(UnsignedLongTy, Size); 44 Elts.push_back(C); 45 46 if (BlockHasCopyDispose) { 47 // copy_func_helper_decl 48 Elts.push_back(BuildCopyHelper(Ty, NoteForHelper)); 49 50 // destroy_func_decl 51 Elts.push_back(BuildDestroyHelper(Ty, NoteForHelper)); 52 } 53 54 C = llvm::ConstantStruct::get(VMContext, Elts, false); 55 56 C = new llvm::GlobalVariable(CGM.getModule(), C->getType(), true, 57 llvm::GlobalValue::InternalLinkage, 58 C, "__block_descriptor_tmp"); 59 return C; 60} 61 62llvm::Constant *BlockModule::getNSConcreteGlobalBlock() { 63 if (NSConcreteGlobalBlock == 0) 64 NSConcreteGlobalBlock = CGM.CreateRuntimeVariable(PtrToInt8Ty, 65 "_NSConcreteGlobalBlock"); 66 return NSConcreteGlobalBlock; 67} 68 69llvm::Constant *BlockModule::getNSConcreteStackBlock() { 70 if (NSConcreteStackBlock == 0) 71 NSConcreteStackBlock = CGM.CreateRuntimeVariable(PtrToInt8Ty, 72 "_NSConcreteStackBlock"); 73 return NSConcreteStackBlock; 74} 75 76static void CollectBlockDeclRefInfo(const Stmt *S, 77 CodeGenFunction::BlockInfo &Info) { 78 for (Stmt::const_child_iterator I = S->child_begin(), E = S->child_end(); 79 I != E; ++I) 80 if (*I) 81 CollectBlockDeclRefInfo(*I, Info); 82 83 if (const BlockDeclRefExpr *DE = dyn_cast<BlockDeclRefExpr>(S)) { 84 // FIXME: Handle enums. 85 if (isa<FunctionDecl>(DE->getDecl())) 86 return; 87 88 if (DE->isByRef()) 89 Info.ByRefDeclRefs.push_back(DE); 90 else 91 Info.ByCopyDeclRefs.push_back(DE); 92 } 93} 94 95/// CanBlockBeGlobal - Given a BlockInfo struct, determines if a block can be 96/// declared as a global variable instead of on the stack. 97static bool CanBlockBeGlobal(const CodeGenFunction::BlockInfo &Info) { 98 return Info.ByRefDeclRefs.empty() && Info.ByCopyDeclRefs.empty(); 99} 100 101// FIXME: Push most into CGM, passing down a few bits, like current function 102// name. 103llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) { 104 105 std::string Name = CurFn->getName(); 106 CodeGenFunction::BlockInfo Info(0, Name.c_str()); 107 CollectBlockDeclRefInfo(BE->getBody(), Info); 108 109 // Check if the block can be global. 110 // FIXME: This test doesn't work for nested blocks yet. Longer term, I'd like 111 // to just have one code path. We should move this function into CGM and pass 112 // CGF, then we can just check to see if CGF is 0. 113 if (0 && CanBlockBeGlobal(Info)) 114 return CGM.GetAddrOfGlobalBlock(BE, Name.c_str()); 115 116 std::vector<llvm::Constant*> Elts(5); 117 llvm::Constant *C; 118 llvm::Value *V; 119 120 { 121 // C = BuildBlockStructInitlist(); 122 unsigned int flags = BLOCK_HAS_DESCRIPTOR; 123 124 // We run this first so that we set BlockHasCopyDispose from the entire 125 // block literal. 126 // __invoke 127 uint64_t subBlockSize, subBlockAlign; 128 llvm::SmallVector<const Expr *, 8> subBlockDeclRefDecls; 129 bool subBlockHasCopyDispose = false; 130 llvm::Function *Fn 131 = CodeGenFunction(CGM).GenerateBlockFunction(BE, Info, CurFuncDecl, 132 LocalDeclMap, 133 subBlockSize, 134 subBlockAlign, 135 subBlockDeclRefDecls, 136 subBlockHasCopyDispose); 137 BlockHasCopyDispose |= subBlockHasCopyDispose; 138 Elts[3] = Fn; 139 140 // FIXME: Don't use BlockHasCopyDispose, it is set more often then 141 // necessary, for example: { ^{ __block int i; ^{ i = 1; }(); }(); } 142 if (subBlockHasCopyDispose) 143 flags |= BLOCK_HAS_COPY_DISPOSE; 144 145 // __isa 146 C = CGM.getNSConcreteStackBlock(); 147 C = llvm::ConstantExpr::getBitCast(C, PtrToInt8Ty); 148 Elts[0] = C; 149 150 // __flags 151 const llvm::IntegerType *IntTy = cast<llvm::IntegerType>( 152 CGM.getTypes().ConvertType(CGM.getContext().IntTy)); 153 C = llvm::ConstantInt::get(IntTy, flags); 154 Elts[1] = C; 155 156 // __reserved 157 C = llvm::ConstantInt::get(IntTy, 0); 158 Elts[2] = C; 159 160 if (subBlockDeclRefDecls.size() == 0) { 161 // __descriptor 162 Elts[4] = BuildDescriptorBlockDecl(subBlockHasCopyDispose, subBlockSize, 0, 0); 163 164 // Optimize to being a global block. 165 Elts[0] = CGM.getNSConcreteGlobalBlock(); 166 Elts[1] = llvm::ConstantInt::get(IntTy, flags|BLOCK_IS_GLOBAL); 167 168 C = llvm::ConstantStruct::get(VMContext, Elts, false); 169 170 char Name[32]; 171 sprintf(Name, "__block_holder_tmp_%d", CGM.getGlobalUniqueCount()); 172 C = new llvm::GlobalVariable(CGM.getModule(), C->getType(), true, 173 llvm::GlobalValue::InternalLinkage, 174 C, Name); 175 QualType BPT = BE->getType(); 176 C = llvm::ConstantExpr::getBitCast(C, ConvertType(BPT)); 177 return C; 178 } 179 180 std::vector<const llvm::Type *> Types(5+subBlockDeclRefDecls.size()); 181 for (int i=0; i<4; ++i) 182 Types[i] = Elts[i]->getType(); 183 Types[4] = PtrToInt8Ty; 184 185 for (unsigned i=0; i < subBlockDeclRefDecls.size(); ++i) { 186 const Expr *E = subBlockDeclRefDecls[i]; 187 const BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(E); 188 QualType Ty = E->getType(); 189 if (BDRE && BDRE->isByRef()) { 190 Types[i+5] = llvm::PointerType::get(BuildByRefType(BDRE->getDecl()), 0); 191 } else 192 Types[i+5] = ConvertType(Ty); 193 } 194 195 llvm::StructType *Ty = llvm::StructType::get(VMContext, Types, true); 196 197 llvm::AllocaInst *A = CreateTempAlloca(Ty); 198 A->setAlignment(subBlockAlign); 199 V = A; 200 201 std::vector<HelperInfo> NoteForHelper(subBlockDeclRefDecls.size()); 202 int helpersize = 0; 203 204 for (unsigned i=0; i<4; ++i) 205 Builder.CreateStore(Elts[i], Builder.CreateStructGEP(V, i, "block.tmp")); 206 207 for (unsigned i=0; i < subBlockDeclRefDecls.size(); ++i) 208 { 209 // FIXME: Push const down. 210 Expr *E = const_cast<Expr*>(subBlockDeclRefDecls[i]); 211 DeclRefExpr *DR; 212 ValueDecl *VD; 213 214 DR = dyn_cast<DeclRefExpr>(E); 215 // Skip padding. 216 if (DR) continue; 217 218 BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(E); 219 VD = BDRE->getDecl(); 220 221 llvm::Value* Addr = Builder.CreateStructGEP(V, i+5, "tmp"); 222 NoteForHelper[helpersize].index = i+5; 223 NoteForHelper[helpersize].RequiresCopying 224 = BlockRequiresCopying(VD->getType()); 225 NoteForHelper[helpersize].flag 226 = (VD->getType()->isBlockPointerType() 227 ? BLOCK_FIELD_IS_BLOCK 228 : BLOCK_FIELD_IS_OBJECT); 229 230 if (LocalDeclMap[VD]) { 231 if (BDRE->isByRef()) { 232 NoteForHelper[helpersize].flag = BLOCK_FIELD_IS_BYREF | 233 // FIXME: Someone double check this. 234 (VD->getType().isObjCGCWeak() ? BLOCK_FIELD_IS_WEAK : 0); 235 llvm::Value *Loc = LocalDeclMap[VD]; 236 Loc = Builder.CreateStructGEP(Loc, 1, "forwarding"); 237 Loc = Builder.CreateLoad(Loc, false); 238 Builder.CreateStore(Loc, Addr); 239 ++helpersize; 240 continue; 241 } else 242 E = new (getContext()) DeclRefExpr (cast<NamedDecl>(VD), 243 VD->getType(), SourceLocation(), 244 false, false); 245 } 246 if (BDRE->isByRef()) { 247 NoteForHelper[helpersize].flag = BLOCK_FIELD_IS_BYREF | 248 // FIXME: Someone double check this. 249 (VD->getType().isObjCGCWeak() ? BLOCK_FIELD_IS_WEAK : 0); 250 E = new (getContext()) 251 UnaryOperator(E, UnaryOperator::AddrOf, 252 getContext().getPointerType(E->getType()), 253 SourceLocation()); 254 } 255 ++helpersize; 256 257 RValue r = EmitAnyExpr(E, Addr, false); 258 if (r.isScalar()) { 259 llvm::Value *Loc = r.getScalarVal(); 260 const llvm::Type *Ty = Types[i+5]; 261 if (BDRE->isByRef()) { 262 // E is now the address of the value field, instead, we want the 263 // address of the actual ByRef struct. We optimize this slightly 264 // compared to gcc by not grabbing the forwarding slot as this must 265 // be done during Block_copy for us, and we can postpone the work 266 // until then. 267 uint64_t offset = BlockDecls[BDRE->getDecl()]; 268 269 llvm::Value *BlockLiteral = LoadBlockStruct(); 270 271 Loc = Builder.CreateGEP(BlockLiteral, 272 llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext), 273 offset), 274 "block.literal"); 275 Ty = llvm::PointerType::get(Ty, 0); 276 Loc = Builder.CreateBitCast(Loc, Ty); 277 Loc = Builder.CreateLoad(Loc, false); 278 // Loc = Builder.CreateBitCast(Loc, Ty); 279 } 280 Builder.CreateStore(Loc, Addr); 281 } else if (r.isComplex()) 282 // FIXME: implement 283 ErrorUnsupported(BE, "complex in block literal"); 284 else if (r.isAggregate()) 285 ; // Already created into the destination 286 else 287 assert (0 && "bad block variable"); 288 // FIXME: Ensure that the offset created by the backend for 289 // the struct matches the previously computed offset in BlockDecls. 290 } 291 NoteForHelper.resize(helpersize); 292 293 // __descriptor 294 llvm::Value *Descriptor = BuildDescriptorBlockDecl(subBlockHasCopyDispose, 295 subBlockSize, Ty, 296 &NoteForHelper); 297 Descriptor = Builder.CreateBitCast(Descriptor, PtrToInt8Ty); 298 Builder.CreateStore(Descriptor, Builder.CreateStructGEP(V, 4, "block.tmp")); 299 } 300 301 QualType BPT = BE->getType(); 302 return Builder.CreateBitCast(V, ConvertType(BPT)); 303} 304 305 306const llvm::Type *BlockModule::getBlockDescriptorType() { 307 if (BlockDescriptorType) 308 return BlockDescriptorType; 309 310 const llvm::Type *UnsignedLongTy = 311 getTypes().ConvertType(getContext().UnsignedLongTy); 312 313 // struct __block_descriptor { 314 // unsigned long reserved; 315 // unsigned long block_size; 316 // }; 317 BlockDescriptorType = llvm::StructType::get(UnsignedLongTy->getContext(), 318 UnsignedLongTy, 319 UnsignedLongTy, 320 NULL); 321 322 getModule().addTypeName("struct.__block_descriptor", 323 BlockDescriptorType); 324 325 return BlockDescriptorType; 326} 327 328const llvm::Type *BlockModule::getGenericBlockLiteralType() { 329 if (GenericBlockLiteralType) 330 return GenericBlockLiteralType; 331 332 const llvm::Type *BlockDescPtrTy = 333 llvm::PointerType::getUnqual(getBlockDescriptorType()); 334 335 const llvm::IntegerType *IntTy = cast<llvm::IntegerType>( 336 getTypes().ConvertType(getContext().IntTy)); 337 338 // struct __block_literal_generic { 339 // void *__isa; 340 // int __flags; 341 // int __reserved; 342 // void (*__invoke)(void *); 343 // struct __block_descriptor *__descriptor; 344 // }; 345 GenericBlockLiteralType = llvm::StructType::get(IntTy->getContext(), 346 PtrToInt8Ty, 347 IntTy, 348 IntTy, 349 PtrToInt8Ty, 350 BlockDescPtrTy, 351 NULL); 352 353 getModule().addTypeName("struct.__block_literal_generic", 354 GenericBlockLiteralType); 355 356 return GenericBlockLiteralType; 357} 358 359const llvm::Type *BlockModule::getGenericExtendedBlockLiteralType() { 360 if (GenericExtendedBlockLiteralType) 361 return GenericExtendedBlockLiteralType; 362 363 const llvm::Type *BlockDescPtrTy = 364 llvm::PointerType::getUnqual(getBlockDescriptorType()); 365 366 const llvm::IntegerType *IntTy = cast<llvm::IntegerType>( 367 getTypes().ConvertType(getContext().IntTy)); 368 369 // struct __block_literal_generic { 370 // void *__isa; 371 // int __flags; 372 // int __reserved; 373 // void (*__invoke)(void *); 374 // struct __block_descriptor *__descriptor; 375 // void *__copy_func_helper_decl; 376 // void *__destroy_func_decl; 377 // }; 378 GenericExtendedBlockLiteralType = llvm::StructType::get(IntTy->getContext(), 379 PtrToInt8Ty, 380 IntTy, 381 IntTy, 382 PtrToInt8Ty, 383 BlockDescPtrTy, 384 PtrToInt8Ty, 385 PtrToInt8Ty, 386 NULL); 387 388 getModule().addTypeName("struct.__block_literal_extended_generic", 389 GenericExtendedBlockLiteralType); 390 391 return GenericExtendedBlockLiteralType; 392} 393 394bool BlockFunction::BlockRequiresCopying(QualType Ty) { 395 return CGM.BlockRequiresCopying(Ty); 396} 397 398RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr* E) { 399 const BlockPointerType *BPT = 400 E->getCallee()->getType()->getAs<BlockPointerType>(); 401 402 llvm::Value *Callee = EmitScalarExpr(E->getCallee()); 403 404 // Get a pointer to the generic block literal. 405 const llvm::Type *BlockLiteralTy = 406 llvm::PointerType::getUnqual(CGM.getGenericBlockLiteralType()); 407 408 // Bitcast the callee to a block literal. 409 llvm::Value *BlockLiteral = 410 Builder.CreateBitCast(Callee, BlockLiteralTy, "block.literal"); 411 412 // Get the function pointer from the literal. 413 llvm::Value *FuncPtr = Builder.CreateStructGEP(BlockLiteral, 3, "tmp"); 414 415 BlockLiteral = 416 Builder.CreateBitCast(BlockLiteral, 417 llvm::Type::getInt8PtrTy(VMContext), 418 "tmp"); 419 420 // Add the block literal. 421 QualType VoidPtrTy = getContext().getPointerType(getContext().VoidTy); 422 CallArgList Args; 423 Args.push_back(std::make_pair(RValue::get(BlockLiteral), VoidPtrTy)); 424 425 QualType FnType = BPT->getPointeeType(); 426 427 // And the rest of the arguments. 428 EmitCallArgs(Args, FnType->getAs<FunctionProtoType>(), 429 E->arg_begin(), E->arg_end()); 430 431 // Load the function. 432 llvm::Value *Func = Builder.CreateLoad(FuncPtr, false, "tmp"); 433 434 QualType ResultType = FnType->getAs<FunctionType>()->getResultType(); 435 436 const CGFunctionInfo &FnInfo = 437 CGM.getTypes().getFunctionInfo(ResultType, Args); 438 439 // Cast the function pointer to the right type. 440 const llvm::Type *BlockFTy = 441 CGM.getTypes().GetFunctionType(FnInfo, false); 442 443 const llvm::Type *BlockFTyPtr = llvm::PointerType::getUnqual(BlockFTy); 444 Func = Builder.CreateBitCast(Func, BlockFTyPtr); 445 446 // And call the block. 447 return EmitCall(FnInfo, Func, Args); 448} 449 450llvm::Value *CodeGenFunction::GetAddrOfBlockDecl(const BlockDeclRefExpr *E) { 451 const ValueDecl *VD = E->getDecl(); 452 453 uint64_t &offset = BlockDecls[VD]; 454 455 456 // See if we have already allocated an offset for this variable. 457 if (offset == 0) { 458 // Don't run the expensive check, unless we have to. 459 if (!BlockHasCopyDispose && BlockRequiresCopying(E->getType())) 460 BlockHasCopyDispose = true; 461 // if not, allocate one now. 462 offset = getBlockOffset(E); 463 } 464 465 llvm::Value *BlockLiteral = LoadBlockStruct(); 466 llvm::Value *V = Builder.CreateGEP(BlockLiteral, 467 llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext), 468 offset), 469 "block.literal"); 470 if (E->isByRef()) { 471 const llvm::Type *PtrStructTy 472 = llvm::PointerType::get(BuildByRefType(VD), 0); 473 // The block literal will need a copy/destroy helper. 474 BlockHasCopyDispose = true; 475 476 const llvm::Type *Ty = PtrStructTy; 477 Ty = llvm::PointerType::get(Ty, 0); 478 V = Builder.CreateBitCast(V, Ty); 479 V = Builder.CreateLoad(V, false); 480 V = Builder.CreateStructGEP(V, 1, "forwarding"); 481 V = Builder.CreateLoad(V, false); 482 V = Builder.CreateBitCast(V, PtrStructTy); 483 V = Builder.CreateStructGEP(V, getByRefValueLLVMField(VD), 484 VD->getNameAsString()); 485 } else { 486 const llvm::Type *Ty = CGM.getTypes().ConvertType(VD->getType()); 487 488 Ty = llvm::PointerType::get(Ty, 0); 489 V = Builder.CreateBitCast(V, Ty); 490 } 491 return V; 492} 493 494void CodeGenFunction::BlockForwardSelf() { 495 const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl); 496 ImplicitParamDecl *SelfDecl = OMD->getSelfDecl(); 497 llvm::Value *&DMEntry = LocalDeclMap[SelfDecl]; 498 if (DMEntry) 499 return; 500 // FIXME - Eliminate BlockDeclRefExprs, clients don't need/want to care 501 BlockDeclRefExpr *BDRE = new (getContext()) 502 BlockDeclRefExpr(SelfDecl, 503 SelfDecl->getType(), SourceLocation(), false); 504 DMEntry = GetAddrOfBlockDecl(BDRE); 505} 506 507llvm::Constant * 508BlockModule::GetAddrOfGlobalBlock(const BlockExpr *BE, const char * n) { 509 // Generate the block descriptor. 510 const llvm::Type *UnsignedLongTy = Types.ConvertType(Context.UnsignedLongTy); 511 const llvm::IntegerType *IntTy = cast<llvm::IntegerType>( 512 getTypes().ConvertType(getContext().IntTy)); 513 514 llvm::Constant *DescriptorFields[2]; 515 516 // Reserved 517 DescriptorFields[0] = llvm::Constant::getNullValue(UnsignedLongTy); 518 519 // Block literal size. For global blocks we just use the size of the generic 520 // block literal struct. 521 uint64_t BlockLiteralSize = 522 TheTargetData.getTypeStoreSizeInBits(getGenericBlockLiteralType()) / 8; 523 DescriptorFields[1] = 524 llvm::ConstantInt::get(UnsignedLongTy,BlockLiteralSize); 525 526 llvm::Constant *DescriptorStruct = 527 llvm::ConstantStruct::get(VMContext, &DescriptorFields[0], 2, false); 528 529 llvm::GlobalVariable *Descriptor = 530 new llvm::GlobalVariable(getModule(), DescriptorStruct->getType(), true, 531 llvm::GlobalVariable::InternalLinkage, 532 DescriptorStruct, "__block_descriptor_global"); 533 534 // Generate the constants for the block literal. 535 llvm::Constant *LiteralFields[5]; 536 537 CodeGenFunction::BlockInfo Info(0, n); 538 uint64_t subBlockSize, subBlockAlign; 539 llvm::SmallVector<const Expr *, 8> subBlockDeclRefDecls; 540 bool subBlockHasCopyDispose = false; 541 llvm::DenseMap<const Decl*, llvm::Value*> LocalDeclMap; 542 llvm::Function *Fn 543 = CodeGenFunction(CGM).GenerateBlockFunction(BE, Info, 0, LocalDeclMap, 544 subBlockSize, 545 subBlockAlign, 546 subBlockDeclRefDecls, 547 subBlockHasCopyDispose); 548 assert(subBlockSize == BlockLiteralSize 549 && "no imports allowed for global block"); 550 551 // isa 552 LiteralFields[0] = getNSConcreteGlobalBlock(); 553 554 // Flags 555 LiteralFields[1] = 556 llvm::ConstantInt::get(IntTy, BLOCK_IS_GLOBAL | BLOCK_HAS_DESCRIPTOR); 557 558 // Reserved 559 LiteralFields[2] = llvm::Constant::getNullValue(IntTy); 560 561 // Function 562 LiteralFields[3] = Fn; 563 564 // Descriptor 565 LiteralFields[4] = Descriptor; 566 567 llvm::Constant *BlockLiteralStruct = 568 llvm::ConstantStruct::get(VMContext, &LiteralFields[0], 5, false); 569 570 llvm::GlobalVariable *BlockLiteral = 571 new llvm::GlobalVariable(getModule(), BlockLiteralStruct->getType(), true, 572 llvm::GlobalVariable::InternalLinkage, 573 BlockLiteralStruct, "__block_literal_global"); 574 575 return BlockLiteral; 576} 577 578llvm::Value *CodeGenFunction::LoadBlockStruct() { 579 return Builder.CreateLoad(LocalDeclMap[getBlockStructDecl()], "self"); 580} 581 582llvm::Function * 583CodeGenFunction::GenerateBlockFunction(const BlockExpr *BExpr, 584 const BlockInfo& Info, 585 const Decl *OuterFuncDecl, 586 llvm::DenseMap<const Decl*, llvm::Value*> ldm, 587 uint64_t &Size, 588 uint64_t &Align, 589 llvm::SmallVector<const Expr *, 8> &subBlockDeclRefDecls, 590 bool &subBlockHasCopyDispose) { 591 592 // Check if we should generate debug info for this block. 593 if (CGM.getDebugInfo()) 594 DebugInfo = CGM.getDebugInfo(); 595 596 // Arrange for local static and local extern declarations to appear 597 // to be local to this function as well, as they are directly referenced 598 // in a block. 599 for (llvm::DenseMap<const Decl *, llvm::Value*>::iterator i = ldm.begin(); 600 i != ldm.end(); 601 ++i) { 602 const VarDecl *VD = dyn_cast<VarDecl>(i->first); 603 604 if (VD->getStorageClass() == VarDecl::Static || VD->hasExternalStorage()) 605 LocalDeclMap[VD] = i->second; 606 } 607 608 // FIXME: We need to rearrange the code for copy/dispose so we have this 609 // sooner, so we can calculate offsets correctly. 610 if (!BlockHasCopyDispose) 611 BlockOffset = CGM.getTargetData() 612 .getTypeStoreSizeInBits(CGM.getGenericBlockLiteralType()) / 8; 613 else 614 BlockOffset = CGM.getTargetData() 615 .getTypeStoreSizeInBits(CGM.getGenericExtendedBlockLiteralType()) / 8; 616 BlockAlign = getContext().getTypeAlign(getContext().VoidPtrTy) / 8; 617 618 const FunctionType *BlockFunctionType = BExpr->getFunctionType(); 619 QualType ResultType; 620 bool IsVariadic; 621 if (const FunctionProtoType *FTy = 622 dyn_cast<FunctionProtoType>(BlockFunctionType)) { 623 ResultType = FTy->getResultType(); 624 IsVariadic = FTy->isVariadic(); 625 } else { 626 // K&R style block. 627 ResultType = BlockFunctionType->getResultType(); 628 IsVariadic = false; 629 } 630 631 FunctionArgList Args; 632 633 const BlockDecl *BD = BExpr->getBlockDecl(); 634 635 // FIXME: This leaks 636 ImplicitParamDecl *SelfDecl = 637 ImplicitParamDecl::Create(getContext(), 0, 638 SourceLocation(), 0, 639 getContext().getPointerType(getContext().VoidTy)); 640 641 Args.push_back(std::make_pair(SelfDecl, SelfDecl->getType())); 642 BlockStructDecl = SelfDecl; 643 644 for (BlockDecl::param_const_iterator i = BD->param_begin(), 645 e = BD->param_end(); i != e; ++i) 646 Args.push_back(std::make_pair(*i, (*i)->getType())); 647 648 const CGFunctionInfo &FI = 649 CGM.getTypes().getFunctionInfo(ResultType, Args); 650 651 std::string Name = std::string("__") + Info.Name + "_block_invoke_"; 652 CodeGenTypes &Types = CGM.getTypes(); 653 const llvm::FunctionType *LTy = Types.GetFunctionType(FI, IsVariadic); 654 655 llvm::Function *Fn = 656 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, 657 Name, 658 &CGM.getModule()); 659 660 CGM.SetInternalFunctionAttributes(BD, Fn, FI); 661 662 StartFunction(BD, ResultType, Fn, Args, 663 BExpr->getBody()->getLocEnd()); 664 665 CurFuncDecl = OuterFuncDecl; 666 CurCodeDecl = BD; 667 668 // Save a spot to insert the debug information for all the BlockDeclRefDecls. 669 llvm::BasicBlock *entry = Builder.GetInsertBlock(); 670 llvm::BasicBlock::iterator entry_ptr = Builder.GetInsertPoint(); 671 --entry_ptr; 672 673 EmitStmt(BExpr->getBody()); 674 675 // Remember where we were... 676 llvm::BasicBlock *resume = Builder.GetInsertBlock(); 677 678 // Go back to the entry. 679 ++entry_ptr; 680 Builder.SetInsertPoint(entry, entry_ptr); 681 682 if (CGDebugInfo *DI = getDebugInfo()) { 683 // Emit debug information for all the BlockDeclRefDecls. 684 for (unsigned i=0; i < BlockDeclRefDecls.size(); ++i) { 685 const Expr *E = BlockDeclRefDecls[i]; 686 const BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(E); 687 if (BDRE) { 688 const ValueDecl *D = BDRE->getDecl(); 689 DI->setLocation(D->getLocation()); 690 DI->EmitDeclareOfBlockDeclRefVariable(BDRE, 691 LocalDeclMap[getBlockStructDecl()], 692 Builder, this); 693 } 694 } 695 } 696 // And resume where we left off. 697 if (resume == 0) 698 Builder.ClearInsertionPoint(); 699 else 700 Builder.SetInsertPoint(resume); 701 702 FinishFunction(cast<CompoundStmt>(BExpr->getBody())->getRBracLoc()); 703 704 // The runtime needs a minimum alignment of a void *. 705 uint64_t MinAlign = getContext().getTypeAlign(getContext().VoidPtrTy) / 8; 706 BlockOffset = llvm::RoundUpToAlignment(BlockOffset, MinAlign); 707 708 Size = BlockOffset; 709 Align = BlockAlign; 710 subBlockDeclRefDecls = BlockDeclRefDecls; 711 subBlockHasCopyDispose |= BlockHasCopyDispose; 712 return Fn; 713} 714 715uint64_t BlockFunction::getBlockOffset(const BlockDeclRefExpr *BDRE) { 716 const ValueDecl *D = dyn_cast<ValueDecl>(BDRE->getDecl()); 717 718 uint64_t Size = getContext().getTypeSize(D->getType()) / 8; 719 uint64_t Align = getContext().getDeclAlignInBytes(D); 720 721 if (BDRE->isByRef()) { 722 Size = getContext().getTypeSize(getContext().VoidPtrTy) / 8; 723 Align = getContext().getTypeAlign(getContext().VoidPtrTy) / 8; 724 } 725 726 assert ((Align > 0) && "alignment must be 1 byte or more"); 727 728 uint64_t OldOffset = BlockOffset; 729 730 // Ensure proper alignment, even if it means we have to have a gap 731 BlockOffset = llvm::RoundUpToAlignment(BlockOffset, Align); 732 BlockAlign = std::max(Align, BlockAlign); 733 734 uint64_t Pad = BlockOffset - OldOffset; 735 if (Pad) { 736 llvm::ArrayType::get(llvm::Type::getInt8Ty(VMContext), Pad); 737 QualType PadTy = getContext().getConstantArrayType(getContext().CharTy, 738 llvm::APInt(32, Pad), 739 ArrayType::Normal, 0); 740 ValueDecl *PadDecl = VarDecl::Create(getContext(), 0, SourceLocation(), 741 0, QualType(PadTy), 0, VarDecl::None); 742 Expr *E; 743 E = new (getContext()) DeclRefExpr(PadDecl, PadDecl->getType(), 744 SourceLocation(), false, false); 745 BlockDeclRefDecls.push_back(E); 746 } 747 BlockDeclRefDecls.push_back(BDRE); 748 749 BlockOffset += Size; 750 return BlockOffset-Size; 751} 752 753llvm::Constant *BlockFunction:: 754GenerateCopyHelperFunction(bool BlockHasCopyDispose, const llvm::StructType *T, 755 std::vector<HelperInfo> *NoteForHelperp) { 756 QualType R = getContext().VoidTy; 757 758 FunctionArgList Args; 759 // FIXME: This leaks 760 ImplicitParamDecl *Dst = 761 ImplicitParamDecl::Create(getContext(), 0, SourceLocation(), 0, 762 getContext().getPointerType(getContext().VoidTy)); 763 Args.push_back(std::make_pair(Dst, Dst->getType())); 764 ImplicitParamDecl *Src = 765 ImplicitParamDecl::Create(getContext(), 0, SourceLocation(), 0, 766 getContext().getPointerType(getContext().VoidTy)); 767 Args.push_back(std::make_pair(Src, Src->getType())); 768 769 const CGFunctionInfo &FI = 770 CGM.getTypes().getFunctionInfo(R, Args); 771 772 // FIXME: We'd like to put these into a mergable by content, with 773 // internal linkage. 774 std::string Name = std::string("__copy_helper_block_"); 775 CodeGenTypes &Types = CGM.getTypes(); 776 const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false); 777 778 llvm::Function *Fn = 779 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, 780 Name, 781 &CGM.getModule()); 782 783 IdentifierInfo *II 784 = &CGM.getContext().Idents.get("__copy_helper_block_"); 785 786 FunctionDecl *FD = FunctionDecl::Create(getContext(), 787 getContext().getTranslationUnitDecl(), 788 SourceLocation(), II, R, 0, 789 FunctionDecl::Static, false, 790 true); 791 CGF.StartFunction(FD, R, Fn, Args, SourceLocation()); 792 793 llvm::Value *SrcObj = CGF.GetAddrOfLocalVar(Src); 794 llvm::Type *PtrPtrT; 795 796 if (NoteForHelperp) { 797 std::vector<HelperInfo> &NoteForHelper = *NoteForHelperp; 798 799 PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0); 800 SrcObj = Builder.CreateBitCast(SrcObj, PtrPtrT); 801 SrcObj = Builder.CreateLoad(SrcObj); 802 803 llvm::Value *DstObj = CGF.GetAddrOfLocalVar(Dst); 804 llvm::Type *PtrPtrT; 805 PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0); 806 DstObj = Builder.CreateBitCast(DstObj, PtrPtrT); 807 DstObj = Builder.CreateLoad(DstObj); 808 809 for (unsigned i=0; i < NoteForHelper.size(); ++i) { 810 int flag = NoteForHelper[i].flag; 811 int index = NoteForHelper[i].index; 812 813 if ((NoteForHelper[i].flag & BLOCK_FIELD_IS_BYREF) 814 || NoteForHelper[i].RequiresCopying) { 815 llvm::Value *Srcv = SrcObj; 816 Srcv = Builder.CreateStructGEP(Srcv, index); 817 Srcv = Builder.CreateBitCast(Srcv, 818 llvm::PointerType::get(PtrToInt8Ty, 0)); 819 Srcv = Builder.CreateLoad(Srcv); 820 821 llvm::Value *Dstv = Builder.CreateStructGEP(DstObj, index); 822 Dstv = Builder.CreateBitCast(Dstv, PtrToInt8Ty); 823 824 llvm::Value *N = llvm::ConstantInt::get( 825 llvm::Type::getInt32Ty(T->getContext()), flag); 826 llvm::Value *F = getBlockObjectAssign(); 827 Builder.CreateCall3(F, Dstv, Srcv, N); 828 } 829 } 830 } 831 832 CGF.FinishFunction(); 833 834 return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty); 835} 836 837llvm::Constant *BlockFunction:: 838GenerateDestroyHelperFunction(bool BlockHasCopyDispose, 839 const llvm::StructType* T, 840 std::vector<HelperInfo> *NoteForHelperp) { 841 QualType R = getContext().VoidTy; 842 843 FunctionArgList Args; 844 // FIXME: This leaks 845 ImplicitParamDecl *Src = 846 ImplicitParamDecl::Create(getContext(), 0, SourceLocation(), 0, 847 getContext().getPointerType(getContext().VoidTy)); 848 849 Args.push_back(std::make_pair(Src, Src->getType())); 850 851 const CGFunctionInfo &FI = 852 CGM.getTypes().getFunctionInfo(R, Args); 853 854 // FIXME: We'd like to put these into a mergable by content, with 855 // internal linkage. 856 std::string Name = std::string("__destroy_helper_block_"); 857 CodeGenTypes &Types = CGM.getTypes(); 858 const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false); 859 860 llvm::Function *Fn = 861 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, 862 Name, 863 &CGM.getModule()); 864 865 IdentifierInfo *II 866 = &CGM.getContext().Idents.get("__destroy_helper_block_"); 867 868 FunctionDecl *FD = FunctionDecl::Create(getContext(), 869 getContext().getTranslationUnitDecl(), 870 SourceLocation(), II, R, 0, 871 FunctionDecl::Static, false, 872 true); 873 CGF.StartFunction(FD, R, Fn, Args, SourceLocation()); 874 875 if (NoteForHelperp) { 876 std::vector<HelperInfo> &NoteForHelper = *NoteForHelperp; 877 878 llvm::Value *SrcObj = CGF.GetAddrOfLocalVar(Src); 879 llvm::Type *PtrPtrT; 880 PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0); 881 SrcObj = Builder.CreateBitCast(SrcObj, PtrPtrT); 882 SrcObj = Builder.CreateLoad(SrcObj); 883 884 for (unsigned i=0; i < NoteForHelper.size(); ++i) { 885 int flag = NoteForHelper[i].flag; 886 int index = NoteForHelper[i].index; 887 888 if ((NoteForHelper[i].flag & BLOCK_FIELD_IS_BYREF) 889 || NoteForHelper[i].RequiresCopying) { 890 llvm::Value *Srcv = SrcObj; 891 Srcv = Builder.CreateStructGEP(Srcv, index); 892 Srcv = Builder.CreateBitCast(Srcv, 893 llvm::PointerType::get(PtrToInt8Ty, 0)); 894 Srcv = Builder.CreateLoad(Srcv); 895 896 BuildBlockRelease(Srcv, flag); 897 } 898 } 899 } 900 901 CGF.FinishFunction(); 902 903 return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty); 904} 905 906llvm::Constant *BlockFunction::BuildCopyHelper(const llvm::StructType *T, 907 std::vector<HelperInfo> *NoteForHelper) { 908 return CodeGenFunction(CGM).GenerateCopyHelperFunction(BlockHasCopyDispose, 909 T, NoteForHelper); 910} 911 912llvm::Constant *BlockFunction::BuildDestroyHelper(const llvm::StructType *T, 913 std::vector<HelperInfo> *NoteForHelperp) { 914 return CodeGenFunction(CGM).GenerateDestroyHelperFunction(BlockHasCopyDispose, 915 T, NoteForHelperp); 916} 917 918llvm::Constant *BlockFunction:: 919GeneratebyrefCopyHelperFunction(const llvm::Type *T, int flag) { 920 QualType R = getContext().VoidTy; 921 922 FunctionArgList Args; 923 // FIXME: This leaks 924 ImplicitParamDecl *Dst = 925 ImplicitParamDecl::Create(getContext(), 0, SourceLocation(), 0, 926 getContext().getPointerType(getContext().VoidTy)); 927 Args.push_back(std::make_pair(Dst, Dst->getType())); 928 929 // FIXME: This leaks 930 ImplicitParamDecl *Src = 931 ImplicitParamDecl::Create(getContext(), 0, SourceLocation(), 0, 932 getContext().getPointerType(getContext().VoidTy)); 933 Args.push_back(std::make_pair(Src, Src->getType())); 934 935 const CGFunctionInfo &FI = 936 CGM.getTypes().getFunctionInfo(R, Args); 937 938 std::string Name = std::string("__Block_byref_id_object_copy_"); 939 CodeGenTypes &Types = CGM.getTypes(); 940 const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false); 941 942 // FIXME: We'd like to put these into a mergable by content, with 943 // internal linkage. 944 llvm::Function *Fn = 945 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, 946 Name, 947 &CGM.getModule()); 948 949 IdentifierInfo *II 950 = &CGM.getContext().Idents.get("__Block_byref_id_object_copy_"); 951 952 FunctionDecl *FD = FunctionDecl::Create(getContext(), 953 getContext().getTranslationUnitDecl(), 954 SourceLocation(), II, R, 0, 955 FunctionDecl::Static, false, 956 true); 957 CGF.StartFunction(FD, R, Fn, Args, SourceLocation()); 958 959 // dst->x 960 llvm::Value *V = CGF.GetAddrOfLocalVar(Dst); 961 V = Builder.CreateBitCast(V, llvm::PointerType::get(T, 0)); 962 V = Builder.CreateLoad(V); 963 V = Builder.CreateStructGEP(V, 6, "x"); 964 llvm::Value *DstObj = Builder.CreateBitCast(V, PtrToInt8Ty); 965 966 // src->x 967 V = CGF.GetAddrOfLocalVar(Src); 968 V = Builder.CreateLoad(V); 969 V = Builder.CreateBitCast(V, T); 970 V = Builder.CreateStructGEP(V, 6, "x"); 971 V = Builder.CreateBitCast(V, llvm::PointerType::get(PtrToInt8Ty, 0)); 972 llvm::Value *SrcObj = Builder.CreateLoad(V); 973 974 flag |= BLOCK_BYREF_CALLER; 975 976 llvm::Value *N = llvm::ConstantInt::get( 977 llvm::Type::getInt32Ty(T->getContext()), flag); 978 llvm::Value *F = getBlockObjectAssign(); 979 Builder.CreateCall3(F, DstObj, SrcObj, N); 980 981 CGF.FinishFunction(); 982 983 return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty); 984} 985 986llvm::Constant * 987BlockFunction::GeneratebyrefDestroyHelperFunction(const llvm::Type *T, 988 int flag) { 989 QualType R = getContext().VoidTy; 990 991 FunctionArgList Args; 992 // FIXME: This leaks 993 ImplicitParamDecl *Src = 994 ImplicitParamDecl::Create(getContext(), 0, SourceLocation(), 0, 995 getContext().getPointerType(getContext().VoidTy)); 996 997 Args.push_back(std::make_pair(Src, Src->getType())); 998 999 const CGFunctionInfo &FI = 1000 CGM.getTypes().getFunctionInfo(R, Args); 1001 1002 std::string Name = std::string("__Block_byref_id_object_dispose_"); 1003 CodeGenTypes &Types = CGM.getTypes(); 1004 const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false); 1005 1006 // FIXME: We'd like to put these into a mergable by content, with 1007 // internal linkage. 1008 llvm::Function *Fn = 1009 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, 1010 Name, 1011 &CGM.getModule()); 1012 1013 IdentifierInfo *II 1014 = &CGM.getContext().Idents.get("__Block_byref_id_object_dispose_"); 1015 1016 FunctionDecl *FD = FunctionDecl::Create(getContext(), 1017 getContext().getTranslationUnitDecl(), 1018 SourceLocation(), II, R, 0, 1019 FunctionDecl::Static, false, 1020 true); 1021 CGF.StartFunction(FD, R, Fn, Args, SourceLocation()); 1022 1023 llvm::Value *V = CGF.GetAddrOfLocalVar(Src); 1024 V = Builder.CreateBitCast(V, llvm::PointerType::get(T, 0)); 1025 V = Builder.CreateLoad(V); 1026 V = Builder.CreateStructGEP(V, 6, "x"); 1027 V = Builder.CreateBitCast(V, llvm::PointerType::get(PtrToInt8Ty, 0)); 1028 V = Builder.CreateLoad(V); 1029 1030 flag |= BLOCK_BYREF_CALLER; 1031 BuildBlockRelease(V, flag); 1032 CGF.FinishFunction(); 1033 1034 return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty); 1035} 1036 1037llvm::Constant *BlockFunction::BuildbyrefCopyHelper(const llvm::Type *T, 1038 int flag, unsigned Align) { 1039 // All alignments below that of pointer alignment collpase down to just 1040 // pointer alignment, as we always have at least that much alignment to begin 1041 // with. 1042 Align /= unsigned(CGF.Target.getPointerAlign(0)/8); 1043 // As an optimization, we only generate a single function of each kind we 1044 // might need. We need a different one for each alignment and for each 1045 // setting of flags. We mix Align and flag to get the kind. 1046 uint64_t kind = (uint64_t)Align*BLOCK_BYREF_CURRENT_MAX + flag; 1047 llvm::Constant *& Entry = CGM.AssignCache[kind]; 1048 if (Entry) 1049 return Entry; 1050 return Entry=CodeGenFunction(CGM).GeneratebyrefCopyHelperFunction(T, flag); 1051} 1052 1053llvm::Constant *BlockFunction::BuildbyrefDestroyHelper(const llvm::Type *T, 1054 int flag, 1055 unsigned Align) { 1056 // All alignments below that of pointer alignment collpase down to just 1057 // pointer alignment, as we always have at least that much alignment to begin 1058 // with. 1059 Align /= unsigned(CGF.Target.getPointerAlign(0)/8); 1060 // As an optimization, we only generate a single function of each kind we 1061 // might need. We need a different one for each alignment and for each 1062 // setting of flags. We mix Align and flag to get the kind. 1063 uint64_t kind = (uint64_t)Align*BLOCK_BYREF_CURRENT_MAX + flag; 1064 llvm::Constant *& Entry = CGM.DestroyCache[kind]; 1065 if (Entry) 1066 return Entry; 1067 return Entry=CodeGenFunction(CGM).GeneratebyrefDestroyHelperFunction(T, flag); 1068} 1069 1070llvm::Value *BlockFunction::getBlockObjectDispose() { 1071 if (CGM.BlockObjectDispose == 0) { 1072 const llvm::FunctionType *FTy; 1073 std::vector<const llvm::Type*> ArgTys; 1074 const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext); 1075 ArgTys.push_back(PtrToInt8Ty); 1076 ArgTys.push_back(llvm::Type::getInt32Ty(VMContext)); 1077 FTy = llvm::FunctionType::get(ResultType, ArgTys, false); 1078 CGM.BlockObjectDispose 1079 = CGM.CreateRuntimeFunction(FTy, "_Block_object_dispose"); 1080 } 1081 return CGM.BlockObjectDispose; 1082} 1083 1084llvm::Value *BlockFunction::getBlockObjectAssign() { 1085 if (CGM.BlockObjectAssign == 0) { 1086 const llvm::FunctionType *FTy; 1087 std::vector<const llvm::Type*> ArgTys; 1088 const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext); 1089 ArgTys.push_back(PtrToInt8Ty); 1090 ArgTys.push_back(PtrToInt8Ty); 1091 ArgTys.push_back(llvm::Type::getInt32Ty(VMContext)); 1092 FTy = llvm::FunctionType::get(ResultType, ArgTys, false); 1093 CGM.BlockObjectAssign 1094 = CGM.CreateRuntimeFunction(FTy, "_Block_object_assign"); 1095 } 1096 return CGM.BlockObjectAssign; 1097} 1098 1099void BlockFunction::BuildBlockRelease(llvm::Value *V, int flag) { 1100 llvm::Value *F = getBlockObjectDispose(); 1101 llvm::Value *N; 1102 V = Builder.CreateBitCast(V, PtrToInt8Ty); 1103 N = llvm::ConstantInt::get(llvm::Type::getInt32Ty(V->getContext()), flag); 1104 Builder.CreateCall2(F, V, N); 1105} 1106 1107ASTContext &BlockFunction::getContext() const { return CGM.getContext(); } 1108 1109BlockFunction::BlockFunction(CodeGenModule &cgm, CodeGenFunction &cgf, 1110 CGBuilderTy &B) 1111 : CGM(cgm), CGF(cgf), VMContext(cgm.getLLVMContext()), Builder(B) { 1112 PtrToInt8Ty = llvm::PointerType::getUnqual( 1113 llvm::Type::getInt8Ty(VMContext)); 1114 1115 BlockHasCopyDispose = false; 1116} 1117