CGBlocks.cpp revision 193576
1//===--- CGBlocks.cpp - Emit LLVM Code for declarations -------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This contains code to emit blocks. 11// 12//===----------------------------------------------------------------------===// 13 14#include "CodeGenFunction.h" 15#include "CodeGenModule.h" 16#include "clang/AST/DeclObjC.h" 17#include "llvm/Module.h" 18#include "llvm/Target/TargetData.h" 19#include <algorithm> 20using namespace clang; 21using namespace CodeGen; 22 23llvm::Constant *CodeGenFunction:: 24BuildDescriptorBlockDecl(bool BlockHasCopyDispose, uint64_t Size, 25 const llvm::StructType* Ty, 26 std::vector<HelperInfo> *NoteForHelper) { 27 const llvm::Type *UnsignedLongTy 28 = CGM.getTypes().ConvertType(getContext().UnsignedLongTy); 29 llvm::Constant *C; 30 std::vector<llvm::Constant*> Elts; 31 32 // reserved 33 C = llvm::ConstantInt::get(UnsignedLongTy, 0); 34 Elts.push_back(C); 35 36 // Size 37 // FIXME: What is the right way to say this doesn't fit? We should give 38 // a user diagnostic in that case. Better fix would be to change the 39 // API to size_t. 40 C = llvm::ConstantInt::get(UnsignedLongTy, Size); 41 Elts.push_back(C); 42 43 if (BlockHasCopyDispose) { 44 // copy_func_helper_decl 45 Elts.push_back(BuildCopyHelper(Ty, NoteForHelper)); 46 47 // destroy_func_decl 48 Elts.push_back(BuildDestroyHelper(Ty, NoteForHelper)); 49 } 50 51 C = llvm::ConstantStruct::get(Elts); 52 53 C = new llvm::GlobalVariable(C->getType(), true, 54 llvm::GlobalValue::InternalLinkage, 55 C, "__block_descriptor_tmp", &CGM.getModule()); 56 return C; 57} 58 59llvm::Constant *BlockModule::getNSConcreteGlobalBlock() { 60 if (NSConcreteGlobalBlock == 0) 61 NSConcreteGlobalBlock = CGM.CreateRuntimeVariable(PtrToInt8Ty, 62 "_NSConcreteGlobalBlock"); 63 return NSConcreteGlobalBlock; 64} 65 66llvm::Constant *BlockModule::getNSConcreteStackBlock() { 67 if (NSConcreteStackBlock == 0) 68 NSConcreteStackBlock = CGM.CreateRuntimeVariable(PtrToInt8Ty, 69 "_NSConcreteStackBlock"); 70 return NSConcreteStackBlock; 71} 72 73static void CollectBlockDeclRefInfo(const Stmt *S, 74 CodeGenFunction::BlockInfo &Info) { 75 for (Stmt::const_child_iterator I = S->child_begin(), E = S->child_end(); 76 I != E; ++I) 77 if (*I) 78 CollectBlockDeclRefInfo(*I, Info); 79 80 if (const BlockDeclRefExpr *DE = dyn_cast<BlockDeclRefExpr>(S)) { 81 // FIXME: Handle enums. 82 if (isa<FunctionDecl>(DE->getDecl())) 83 return; 84 85 if (DE->isByRef()) 86 Info.ByRefDeclRefs.push_back(DE); 87 else 88 Info.ByCopyDeclRefs.push_back(DE); 89 } 90} 91 92/// CanBlockBeGlobal - Given a BlockInfo struct, determines if a block can be 93/// declared as a global variable instead of on the stack. 94static bool CanBlockBeGlobal(const CodeGenFunction::BlockInfo &Info) { 95 return Info.ByRefDeclRefs.empty() && Info.ByCopyDeclRefs.empty(); 96} 97 98// FIXME: Push most into CGM, passing down a few bits, like current function 99// name. 100llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) { 101 102 std::string Name = CurFn->getName(); 103 CodeGenFunction::BlockInfo Info(0, Name.c_str()); 104 CollectBlockDeclRefInfo(BE->getBody(), Info); 105 106 // Check if the block can be global. 107 // FIXME: This test doesn't work for nested blocks yet. Longer term, I'd like 108 // to just have one code path. We should move this function into CGM and pass 109 // CGF, then we can just check to see if CGF is 0. 110 if (0 && CanBlockBeGlobal(Info)) 111 return CGM.GetAddrOfGlobalBlock(BE, Name.c_str()); 112 113 std::vector<llvm::Constant*> Elts(5); 114 llvm::Constant *C; 115 llvm::Value *V; 116 117 { 118 // C = BuildBlockStructInitlist(); 119 unsigned int flags = BLOCK_HAS_DESCRIPTOR; 120 121 // We run this first so that we set BlockHasCopyDispose from the entire 122 // block literal. 123 // __invoke 124 uint64_t subBlockSize, subBlockAlign; 125 llvm::SmallVector<const Expr *, 8> subBlockDeclRefDecls; 126 bool subBlockHasCopyDispose = false; 127 llvm::Function *Fn 128 = CodeGenFunction(CGM).GenerateBlockFunction(BE, Info, CurFuncDecl, LocalDeclMap, 129 subBlockSize, 130 subBlockAlign, 131 subBlockDeclRefDecls, 132 subBlockHasCopyDispose); 133 BlockHasCopyDispose |= subBlockHasCopyDispose; 134 Elts[3] = Fn; 135 136 // FIXME: Don't use BlockHasCopyDispose, it is set more often then 137 // necessary, for example: { ^{ __block int i; ^{ i = 1; }(); }(); } 138 if (subBlockHasCopyDispose) 139 flags |= BLOCK_HAS_COPY_DISPOSE; 140 141 // __isa 142 C = CGM.getNSConcreteStackBlock(); 143 C = llvm::ConstantExpr::getBitCast(C, PtrToInt8Ty); 144 Elts[0] = C; 145 146 // __flags 147 const llvm::IntegerType *IntTy = cast<llvm::IntegerType>( 148 CGM.getTypes().ConvertType(CGM.getContext().IntTy)); 149 C = llvm::ConstantInt::get(IntTy, flags); 150 Elts[1] = C; 151 152 // __reserved 153 C = llvm::ConstantInt::get(IntTy, 0); 154 Elts[2] = C; 155 156 if (subBlockDeclRefDecls.size() == 0) { 157 // __descriptor 158 Elts[4] = BuildDescriptorBlockDecl(subBlockHasCopyDispose, subBlockSize, 0, 0); 159 160 // Optimize to being a global block. 161 Elts[0] = CGM.getNSConcreteGlobalBlock(); 162 Elts[1] = llvm::ConstantInt::get(IntTy, flags|BLOCK_IS_GLOBAL); 163 164 C = llvm::ConstantStruct::get(Elts); 165 166 char Name[32]; 167 sprintf(Name, "__block_holder_tmp_%d", CGM.getGlobalUniqueCount()); 168 C = new llvm::GlobalVariable(C->getType(), true, 169 llvm::GlobalValue::InternalLinkage, 170 C, Name, &CGM.getModule()); 171 QualType BPT = BE->getType(); 172 C = llvm::ConstantExpr::getBitCast(C, ConvertType(BPT)); 173 return C; 174 } 175 176 std::vector<const llvm::Type *> Types(5+subBlockDeclRefDecls.size()); 177 for (int i=0; i<4; ++i) 178 Types[i] = Elts[i]->getType(); 179 Types[4] = PtrToInt8Ty; 180 181 for (unsigned i=0; i < subBlockDeclRefDecls.size(); ++i) { 182 const Expr *E = subBlockDeclRefDecls[i]; 183 const BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(E); 184 QualType Ty = E->getType(); 185 if (BDRE && BDRE->isByRef()) { 186 uint64_t Align = getContext().getDeclAlignInBytes(BDRE->getDecl()); 187 Types[i+5] = llvm::PointerType::get(BuildByRefType(Ty, Align), 0); 188 } else 189 Types[i+5] = ConvertType(Ty); 190 } 191 192 llvm::StructType *Ty = llvm::StructType::get(Types, true); 193 194 llvm::AllocaInst *A = CreateTempAlloca(Ty); 195 A->setAlignment(subBlockAlign); 196 V = A; 197 198 std::vector<HelperInfo> NoteForHelper(subBlockDeclRefDecls.size()); 199 int helpersize = 0; 200 201 for (unsigned i=0; i<4; ++i) 202 Builder.CreateStore(Elts[i], Builder.CreateStructGEP(V, i, "block.tmp")); 203 204 for (unsigned i=0; i < subBlockDeclRefDecls.size(); ++i) 205 { 206 // FIXME: Push const down. 207 Expr *E = const_cast<Expr*>(subBlockDeclRefDecls[i]); 208 DeclRefExpr *DR; 209 ValueDecl *VD; 210 211 DR = dyn_cast<DeclRefExpr>(E); 212 // Skip padding. 213 if (DR) continue; 214 215 BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(E); 216 VD = BDRE->getDecl(); 217 218 llvm::Value* Addr = Builder.CreateStructGEP(V, i+5, "tmp"); 219 NoteForHelper[helpersize].index = i+5; 220 NoteForHelper[helpersize].RequiresCopying = BlockRequiresCopying(VD->getType()); 221 NoteForHelper[helpersize].flag 222 = VD->getType()->isBlockPointerType() ? BLOCK_FIELD_IS_BLOCK : BLOCK_FIELD_IS_OBJECT; 223 224 if (LocalDeclMap[VD]) { 225 if (BDRE->isByRef()) { 226 NoteForHelper[helpersize].flag = BLOCK_FIELD_IS_BYREF | 227 // FIXME: Someone double check this. 228 (VD->getType().isObjCGCWeak() ? BLOCK_FIELD_IS_WEAK : 0); 229 const llvm::Type *Ty = Types[i+5]; 230 llvm::Value *Loc = LocalDeclMap[VD]; 231 Loc = Builder.CreateStructGEP(Loc, 1, "forwarding"); 232 Loc = Builder.CreateLoad(Loc, false); 233 Loc = Builder.CreateBitCast(Loc, Ty); 234 Builder.CreateStore(Loc, Addr); 235 ++helpersize; 236 continue; 237 } else 238 E = new (getContext()) DeclRefExpr (cast<NamedDecl>(VD), 239 VD->getType(), SourceLocation(), 240 false, false); 241 } 242 if (BDRE->isByRef()) { 243 NoteForHelper[helpersize].flag = BLOCK_FIELD_IS_BYREF | 244 // FIXME: Someone double check this. 245 (VD->getType().isObjCGCWeak() ? BLOCK_FIELD_IS_WEAK : 0); 246 E = new (getContext()) 247 UnaryOperator(E, UnaryOperator::AddrOf, 248 getContext().getPointerType(E->getType()), 249 SourceLocation()); 250 } 251 ++helpersize; 252 253 RValue r = EmitAnyExpr(E, Addr, false); 254 if (r.isScalar()) { 255 llvm::Value *Loc = r.getScalarVal(); 256 const llvm::Type *Ty = Types[i+5]; 257 if (BDRE->isByRef()) { 258 // E is now the address of the value field, instead, we want the 259 // address of the actual ByRef struct. We optimize this slightly 260 // compared to gcc by not grabbing the forwarding slot as this must 261 // be done during Block_copy for us, and we can postpone the work 262 // until then. 263 uint64_t offset = BlockDecls[BDRE->getDecl()]; 264 265 llvm::Value *BlockLiteral = LoadBlockStruct(); 266 267 Loc = Builder.CreateGEP(BlockLiteral, 268 llvm::ConstantInt::get(llvm::Type::Int64Ty, 269 offset), 270 "block.literal"); 271 Ty = llvm::PointerType::get(Ty, 0); 272 Loc = Builder.CreateBitCast(Loc, Ty); 273 Loc = Builder.CreateLoad(Loc, false); 274 // Loc = Builder.CreateBitCast(Loc, Ty); 275 } 276 Builder.CreateStore(Loc, Addr); 277 } else if (r.isComplex()) 278 // FIXME: implement 279 ErrorUnsupported(BE, "complex in block literal"); 280 else if (r.isAggregate()) 281 ; // Already created into the destination 282 else 283 assert (0 && "bad block variable"); 284 // FIXME: Ensure that the offset created by the backend for 285 // the struct matches the previously computed offset in BlockDecls. 286 } 287 NoteForHelper.resize(helpersize); 288 289 // __descriptor 290 llvm::Value *Descriptor = BuildDescriptorBlockDecl(subBlockHasCopyDispose, 291 subBlockSize, Ty, 292 &NoteForHelper); 293 Descriptor = Builder.CreateBitCast(Descriptor, PtrToInt8Ty); 294 Builder.CreateStore(Descriptor, Builder.CreateStructGEP(V, 4, "block.tmp")); 295 } 296 297 QualType BPT = BE->getType(); 298 return Builder.CreateBitCast(V, ConvertType(BPT)); 299} 300 301 302const llvm::Type *BlockModule::getBlockDescriptorType() { 303 if (BlockDescriptorType) 304 return BlockDescriptorType; 305 306 const llvm::Type *UnsignedLongTy = 307 getTypes().ConvertType(getContext().UnsignedLongTy); 308 309 // struct __block_descriptor { 310 // unsigned long reserved; 311 // unsigned long block_size; 312 // }; 313 BlockDescriptorType = llvm::StructType::get(UnsignedLongTy, 314 UnsignedLongTy, 315 NULL); 316 317 getModule().addTypeName("struct.__block_descriptor", 318 BlockDescriptorType); 319 320 return BlockDescriptorType; 321} 322 323const llvm::Type *BlockModule::getGenericBlockLiteralType() { 324 if (GenericBlockLiteralType) 325 return GenericBlockLiteralType; 326 327 const llvm::Type *BlockDescPtrTy = 328 llvm::PointerType::getUnqual(getBlockDescriptorType()); 329 330 const llvm::IntegerType *IntTy = cast<llvm::IntegerType>( 331 getTypes().ConvertType(getContext().IntTy)); 332 333 // struct __block_literal_generic { 334 // void *__isa; 335 // int __flags; 336 // int __reserved; 337 // void (*__invoke)(void *); 338 // struct __block_descriptor *__descriptor; 339 // }; 340 GenericBlockLiteralType = llvm::StructType::get(PtrToInt8Ty, 341 IntTy, 342 IntTy, 343 PtrToInt8Ty, 344 BlockDescPtrTy, 345 NULL); 346 347 getModule().addTypeName("struct.__block_literal_generic", 348 GenericBlockLiteralType); 349 350 return GenericBlockLiteralType; 351} 352 353const llvm::Type *BlockModule::getGenericExtendedBlockLiteralType() { 354 if (GenericExtendedBlockLiteralType) 355 return GenericExtendedBlockLiteralType; 356 357 const llvm::Type *BlockDescPtrTy = 358 llvm::PointerType::getUnqual(getBlockDescriptorType()); 359 360 const llvm::IntegerType *IntTy = cast<llvm::IntegerType>( 361 getTypes().ConvertType(getContext().IntTy)); 362 363 // struct __block_literal_generic { 364 // void *__isa; 365 // int __flags; 366 // int __reserved; 367 // void (*__invoke)(void *); 368 // struct __block_descriptor *__descriptor; 369 // void *__copy_func_helper_decl; 370 // void *__destroy_func_decl; 371 // }; 372 GenericExtendedBlockLiteralType = llvm::StructType::get(PtrToInt8Ty, 373 IntTy, 374 IntTy, 375 PtrToInt8Ty, 376 BlockDescPtrTy, 377 PtrToInt8Ty, 378 PtrToInt8Ty, 379 NULL); 380 381 getModule().addTypeName("struct.__block_literal_extended_generic", 382 GenericExtendedBlockLiteralType); 383 384 return GenericExtendedBlockLiteralType; 385} 386 387RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr* E) { 388 const BlockPointerType *BPT = 389 E->getCallee()->getType()->getAsBlockPointerType(); 390 391 llvm::Value *Callee = EmitScalarExpr(E->getCallee()); 392 393 // Get a pointer to the generic block literal. 394 const llvm::Type *BlockLiteralTy = 395 llvm::PointerType::getUnqual(CGM.getGenericBlockLiteralType()); 396 397 // Bitcast the callee to a block literal. 398 llvm::Value *BlockLiteral = 399 Builder.CreateBitCast(Callee, BlockLiteralTy, "block.literal"); 400 401 // Get the function pointer from the literal. 402 llvm::Value *FuncPtr = Builder.CreateStructGEP(BlockLiteral, 3, "tmp"); 403 404 BlockLiteral = 405 Builder.CreateBitCast(BlockLiteral, 406 llvm::PointerType::getUnqual(llvm::Type::Int8Ty), 407 "tmp"); 408 409 // Add the block literal. 410 QualType VoidPtrTy = getContext().getPointerType(getContext().VoidTy); 411 CallArgList Args; 412 Args.push_back(std::make_pair(RValue::get(BlockLiteral), VoidPtrTy)); 413 414 QualType FnType = BPT->getPointeeType(); 415 416 // And the rest of the arguments. 417 EmitCallArgs(Args, FnType->getAsFunctionProtoType(), 418 E->arg_begin(), E->arg_end()); 419 420 // Load the function. 421 llvm::Value *Func = Builder.CreateLoad(FuncPtr, false, "tmp"); 422 423 QualType ResultType = FnType->getAsFunctionType()->getResultType(); 424 425 const CGFunctionInfo &FnInfo = 426 CGM.getTypes().getFunctionInfo(ResultType, Args); 427 428 // Cast the function pointer to the right type. 429 const llvm::Type *BlockFTy = 430 CGM.getTypes().GetFunctionType(FnInfo, false); 431 432 const llvm::Type *BlockFTyPtr = llvm::PointerType::getUnqual(BlockFTy); 433 Func = Builder.CreateBitCast(Func, BlockFTyPtr); 434 435 // And call the block. 436 return EmitCall(FnInfo, Func, Args); 437} 438 439llvm::Value *CodeGenFunction::GetAddrOfBlockDecl(const BlockDeclRefExpr *E) { 440 uint64_t &offset = BlockDecls[E->getDecl()]; 441 442 const llvm::Type *Ty; 443 Ty = CGM.getTypes().ConvertType(E->getDecl()->getType()); 444 445 // See if we have already allocated an offset for this variable. 446 if (offset == 0) { 447 // Don't run the expensive check, unless we have to. 448 if (!BlockHasCopyDispose && BlockRequiresCopying(E->getType())) 449 BlockHasCopyDispose = true; 450 // if not, allocate one now. 451 offset = getBlockOffset(E); 452 } 453 454 llvm::Value *BlockLiteral = LoadBlockStruct(); 455 llvm::Value *V = Builder.CreateGEP(BlockLiteral, 456 llvm::ConstantInt::get(llvm::Type::Int64Ty, 457 offset), 458 "block.literal"); 459 if (E->isByRef()) { 460 bool needsCopyDispose = BlockRequiresCopying(E->getType()); 461 uint64_t Align = getContext().getDeclAlignInBytes(E->getDecl()); 462 const llvm::Type *PtrStructTy 463 = llvm::PointerType::get(BuildByRefType(E->getType(), Align), 0); 464 // The block literal will need a copy/destroy helper. 465 BlockHasCopyDispose = true; 466 Ty = PtrStructTy; 467 Ty = llvm::PointerType::get(Ty, 0); 468 V = Builder.CreateBitCast(V, Ty); 469 V = Builder.CreateLoad(V, false); 470 V = Builder.CreateStructGEP(V, 1, "forwarding"); 471 V = Builder.CreateLoad(V, false); 472 V = Builder.CreateBitCast(V, PtrStructTy); 473 V = Builder.CreateStructGEP(V, needsCopyDispose*2 + 4, "x"); 474 } else { 475 Ty = llvm::PointerType::get(Ty, 0); 476 V = Builder.CreateBitCast(V, Ty); 477 } 478 return V; 479} 480 481void CodeGenFunction::BlockForwardSelf() { 482 const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl); 483 ImplicitParamDecl *SelfDecl = OMD->getSelfDecl(); 484 llvm::Value *&DMEntry = LocalDeclMap[SelfDecl]; 485 if (DMEntry) 486 return; 487 // FIXME - Eliminate BlockDeclRefExprs, clients don't need/want to care 488 BlockDeclRefExpr *BDRE = new (getContext()) 489 BlockDeclRefExpr(SelfDecl, 490 SelfDecl->getType(), SourceLocation(), false); 491 DMEntry = GetAddrOfBlockDecl(BDRE); 492} 493 494llvm::Constant * 495BlockModule::GetAddrOfGlobalBlock(const BlockExpr *BE, const char * n) { 496 // Generate the block descriptor. 497 const llvm::Type *UnsignedLongTy = Types.ConvertType(Context.UnsignedLongTy); 498 const llvm::IntegerType *IntTy = cast<llvm::IntegerType>( 499 getTypes().ConvertType(getContext().IntTy)); 500 501 llvm::Constant *DescriptorFields[2]; 502 503 // Reserved 504 DescriptorFields[0] = llvm::Constant::getNullValue(UnsignedLongTy); 505 506 // Block literal size. For global blocks we just use the size of the generic 507 // block literal struct. 508 uint64_t BlockLiteralSize = 509 TheTargetData.getTypeStoreSizeInBits(getGenericBlockLiteralType()) / 8; 510 DescriptorFields[1] = llvm::ConstantInt::get(UnsignedLongTy,BlockLiteralSize); 511 512 llvm::Constant *DescriptorStruct = 513 llvm::ConstantStruct::get(&DescriptorFields[0], 2); 514 515 llvm::GlobalVariable *Descriptor = 516 new llvm::GlobalVariable(DescriptorStruct->getType(), true, 517 llvm::GlobalVariable::InternalLinkage, 518 DescriptorStruct, "__block_descriptor_global", 519 &getModule()); 520 521 // Generate the constants for the block literal. 522 llvm::Constant *LiteralFields[5]; 523 524 CodeGenFunction::BlockInfo Info(0, n); 525 uint64_t subBlockSize, subBlockAlign; 526 llvm::SmallVector<const Expr *, 8> subBlockDeclRefDecls; 527 bool subBlockHasCopyDispose = false; 528 llvm::DenseMap<const Decl*, llvm::Value*> LocalDeclMap; 529 llvm::Function *Fn 530 = CodeGenFunction(CGM).GenerateBlockFunction(BE, Info, 0, LocalDeclMap, 531 subBlockSize, 532 subBlockAlign, 533 subBlockDeclRefDecls, 534 subBlockHasCopyDispose); 535 assert(subBlockSize == BlockLiteralSize 536 && "no imports allowed for global block"); 537 538 // isa 539 LiteralFields[0] = getNSConcreteGlobalBlock(); 540 541 // Flags 542 LiteralFields[1] = 543 llvm::ConstantInt::get(IntTy, BLOCK_IS_GLOBAL | BLOCK_HAS_DESCRIPTOR); 544 545 // Reserved 546 LiteralFields[2] = llvm::Constant::getNullValue(IntTy); 547 548 // Function 549 LiteralFields[3] = Fn; 550 551 // Descriptor 552 LiteralFields[4] = Descriptor; 553 554 llvm::Constant *BlockLiteralStruct = 555 llvm::ConstantStruct::get(&LiteralFields[0], 5); 556 557 llvm::GlobalVariable *BlockLiteral = 558 new llvm::GlobalVariable(BlockLiteralStruct->getType(), true, 559 llvm::GlobalVariable::InternalLinkage, 560 BlockLiteralStruct, "__block_literal_global", 561 &getModule()); 562 563 return BlockLiteral; 564} 565 566llvm::Value *CodeGenFunction::LoadBlockStruct() { 567 return Builder.CreateLoad(LocalDeclMap[getBlockStructDecl()], "self"); 568} 569 570llvm::Function * 571CodeGenFunction::GenerateBlockFunction(const BlockExpr *BExpr, 572 const BlockInfo& Info, 573 const Decl *OuterFuncDecl, 574 llvm::DenseMap<const Decl*, llvm::Value*> ldm, 575 uint64_t &Size, 576 uint64_t &Align, 577 llvm::SmallVector<const Expr *, 8> &subBlockDeclRefDecls, 578 bool &subBlockHasCopyDispose) { 579 580 // Check if we should generate debug info for this block. 581 if (CGM.getDebugInfo()) 582 DebugInfo = CGM.getDebugInfo(); 583 584 // Arrange for local static and local extern declarations to appear 585 // to be local to this function as well, as they are directly referenced 586 // in a block. 587 for (llvm::DenseMap<const Decl *, llvm::Value*>::iterator i = ldm.begin(); 588 i != ldm.end(); 589 ++i) { 590 const VarDecl *VD = dyn_cast<VarDecl>(i->first); 591 592 if (VD->getStorageClass() == VarDecl::Static || VD->hasExternalStorage()) 593 LocalDeclMap[VD] = i->second; 594 } 595 596 // FIXME: We need to rearrange the code for copy/dispose so we have this 597 // sooner, so we can calculate offsets correctly. 598 if (!BlockHasCopyDispose) 599 BlockOffset = CGM.getTargetData() 600 .getTypeStoreSizeInBits(CGM.getGenericBlockLiteralType()) / 8; 601 else 602 BlockOffset = CGM.getTargetData() 603 .getTypeStoreSizeInBits(CGM.getGenericExtendedBlockLiteralType()) / 8; 604 BlockAlign = getContext().getTypeAlign(getContext().VoidPtrTy) / 8; 605 606 const FunctionType *BlockFunctionType = BExpr->getFunctionType(); 607 QualType ResultType; 608 bool IsVariadic; 609 if (const FunctionProtoType *FTy = 610 dyn_cast<FunctionProtoType>(BlockFunctionType)) { 611 ResultType = FTy->getResultType(); 612 IsVariadic = FTy->isVariadic(); 613 } 614 else { 615 // K&R style block. 616 ResultType = BlockFunctionType->getResultType(); 617 IsVariadic = false; 618 } 619 620 FunctionArgList Args; 621 622 const BlockDecl *BD = BExpr->getBlockDecl(); 623 624 // FIXME: This leaks 625 ImplicitParamDecl *SelfDecl = 626 ImplicitParamDecl::Create(getContext(), 0, 627 SourceLocation(), 0, 628 getContext().getPointerType(getContext().VoidTy)); 629 630 Args.push_back(std::make_pair(SelfDecl, SelfDecl->getType())); 631 BlockStructDecl = SelfDecl; 632 633 for (BlockDecl::param_const_iterator i = BD->param_begin(), 634 e = BD->param_end(); i != e; ++i) 635 Args.push_back(std::make_pair(*i, (*i)->getType())); 636 637 const CGFunctionInfo &FI = 638 CGM.getTypes().getFunctionInfo(ResultType, Args); 639 640 std::string Name = std::string("__") + Info.Name + "_block_invoke_"; 641 CodeGenTypes &Types = CGM.getTypes(); 642 const llvm::FunctionType *LTy = Types.GetFunctionType(FI, IsVariadic); 643 644 llvm::Function *Fn = 645 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, 646 Name, 647 &CGM.getModule()); 648 649 CGM.SetInternalFunctionAttributes(BD, Fn, FI); 650 651 StartFunction(BD, ResultType, Fn, Args, 652 BExpr->getBody()->getLocEnd()); 653 CurFuncDecl = OuterFuncDecl; 654 CurCodeDecl = BD; 655 EmitStmt(BExpr->getBody()); 656 FinishFunction(cast<CompoundStmt>(BExpr->getBody())->getRBracLoc()); 657 658 // The runtime needs a minimum alignment of a void *. 659 uint64_t MinAlign = getContext().getTypeAlign(getContext().VoidPtrTy) / 8; 660 BlockOffset = llvm::RoundUpToAlignment(BlockOffset, MinAlign); 661 662 Size = BlockOffset; 663 Align = BlockAlign; 664 subBlockDeclRefDecls = BlockDeclRefDecls; 665 subBlockHasCopyDispose |= BlockHasCopyDispose; 666 return Fn; 667} 668 669uint64_t BlockFunction::getBlockOffset(const BlockDeclRefExpr *BDRE) { 670 const ValueDecl *D = dyn_cast<ValueDecl>(BDRE->getDecl()); 671 672 uint64_t Size = getContext().getTypeSize(D->getType()) / 8; 673 uint64_t Align = getContext().getDeclAlignInBytes(D); 674 675 if (BDRE->isByRef()) { 676 Size = getContext().getTypeSize(getContext().VoidPtrTy) / 8; 677 Align = getContext().getTypeAlign(getContext().VoidPtrTy) / 8; 678 } 679 680 assert ((Align > 0) && "alignment must be 1 byte or more"); 681 682 uint64_t OldOffset = BlockOffset; 683 684 // Ensure proper alignment, even if it means we have to have a gap 685 BlockOffset = llvm::RoundUpToAlignment(BlockOffset, Align); 686 BlockAlign = std::max(Align, BlockAlign); 687 688 uint64_t Pad = BlockOffset - OldOffset; 689 if (Pad) { 690 llvm::ArrayType::get(llvm::Type::Int8Ty, Pad); 691 QualType PadTy = getContext().getConstantArrayType(getContext().CharTy, 692 llvm::APInt(32, Pad), 693 ArrayType::Normal, 0); 694 ValueDecl *PadDecl = VarDecl::Create(getContext(), 0, SourceLocation(), 695 0, QualType(PadTy), VarDecl::None, 696 SourceLocation()); 697 Expr *E; 698 E = new (getContext()) DeclRefExpr(PadDecl, PadDecl->getType(), 699 SourceLocation(), false, false); 700 BlockDeclRefDecls.push_back(E); 701 } 702 BlockDeclRefDecls.push_back(BDRE); 703 704 BlockOffset += Size; 705 return BlockOffset-Size; 706} 707 708llvm::Constant *BlockFunction:: 709GenerateCopyHelperFunction(bool BlockHasCopyDispose, const llvm::StructType *T, 710 std::vector<HelperInfo> *NoteForHelperp) { 711 QualType R = getContext().VoidTy; 712 713 FunctionArgList Args; 714 // FIXME: This leaks 715 ImplicitParamDecl *Dst = 716 ImplicitParamDecl::Create(getContext(), 0, SourceLocation(), 0, 717 getContext().getPointerType(getContext().VoidTy)); 718 Args.push_back(std::make_pair(Dst, Dst->getType())); 719 ImplicitParamDecl *Src = 720 ImplicitParamDecl::Create(getContext(), 0, SourceLocation(), 0, 721 getContext().getPointerType(getContext().VoidTy)); 722 Args.push_back(std::make_pair(Src, Src->getType())); 723 724 const CGFunctionInfo &FI = 725 CGM.getTypes().getFunctionInfo(R, Args); 726 727 // FIXME: We'd like to put these into a mergable by content, with 728 // internal linkage. 729 std::string Name = std::string("__copy_helper_block_"); 730 CodeGenTypes &Types = CGM.getTypes(); 731 const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false); 732 733 llvm::Function *Fn = 734 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, 735 Name, 736 &CGM.getModule()); 737 738 IdentifierInfo *II 739 = &CGM.getContext().Idents.get("__copy_helper_block_"); 740 741 FunctionDecl *FD = FunctionDecl::Create(getContext(), 742 getContext().getTranslationUnitDecl(), 743 SourceLocation(), II, R, 744 FunctionDecl::Static, false, 745 true); 746 CGF.StartFunction(FD, R, Fn, Args, SourceLocation()); 747 748 llvm::Value *SrcObj = CGF.GetAddrOfLocalVar(Src); 749 llvm::Type *PtrPtrT; 750 751 if (NoteForHelperp) { 752 std::vector<HelperInfo> &NoteForHelper = *NoteForHelperp; 753 754 PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0); 755 SrcObj = Builder.CreateBitCast(SrcObj, PtrPtrT); 756 SrcObj = Builder.CreateLoad(SrcObj); 757 758 llvm::Value *DstObj = CGF.GetAddrOfLocalVar(Dst); 759 llvm::Type *PtrPtrT; 760 PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0); 761 DstObj = Builder.CreateBitCast(DstObj, PtrPtrT); 762 DstObj = Builder.CreateLoad(DstObj); 763 764 for (unsigned i=0; i < NoteForHelper.size(); ++i) { 765 int flag = NoteForHelper[i].flag; 766 int index = NoteForHelper[i].index; 767 768 if ((NoteForHelper[i].flag & BLOCK_FIELD_IS_BYREF) 769 || NoteForHelper[i].RequiresCopying) { 770 llvm::Value *Srcv = SrcObj; 771 Srcv = Builder.CreateStructGEP(Srcv, index); 772 Srcv = Builder.CreateBitCast(Srcv, 773 llvm::PointerType::get(PtrToInt8Ty, 0)); 774 Srcv = Builder.CreateLoad(Srcv); 775 776 llvm::Value *Dstv = Builder.CreateStructGEP(DstObj, index); 777 Dstv = Builder.CreateBitCast(Dstv, PtrToInt8Ty); 778 779 llvm::Value *N = llvm::ConstantInt::get(llvm::Type::Int32Ty, flag); 780 llvm::Value *F = getBlockObjectAssign(); 781 Builder.CreateCall3(F, Dstv, Srcv, N); 782 } 783 } 784 } 785 786 CGF.FinishFunction(); 787 788 return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty); 789} 790 791llvm::Constant *BlockFunction:: 792GenerateDestroyHelperFunction(bool BlockHasCopyDispose, 793 const llvm::StructType* T, 794 std::vector<HelperInfo> *NoteForHelperp) { 795 QualType R = getContext().VoidTy; 796 797 FunctionArgList Args; 798 // FIXME: This leaks 799 ImplicitParamDecl *Src = 800 ImplicitParamDecl::Create(getContext(), 0, SourceLocation(), 0, 801 getContext().getPointerType(getContext().VoidTy)); 802 803 Args.push_back(std::make_pair(Src, Src->getType())); 804 805 const CGFunctionInfo &FI = 806 CGM.getTypes().getFunctionInfo(R, Args); 807 808 // FIXME: We'd like to put these into a mergable by content, with 809 // internal linkage. 810 std::string Name = std::string("__destroy_helper_block_"); 811 CodeGenTypes &Types = CGM.getTypes(); 812 const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false); 813 814 llvm::Function *Fn = 815 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, 816 Name, 817 &CGM.getModule()); 818 819 IdentifierInfo *II 820 = &CGM.getContext().Idents.get("__destroy_helper_block_"); 821 822 FunctionDecl *FD = FunctionDecl::Create(getContext(), 823 getContext().getTranslationUnitDecl(), 824 SourceLocation(), II, R, 825 FunctionDecl::Static, false, 826 true); 827 CGF.StartFunction(FD, R, Fn, Args, SourceLocation()); 828 829 if (NoteForHelperp) { 830 std::vector<HelperInfo> &NoteForHelper = *NoteForHelperp; 831 832 llvm::Value *SrcObj = CGF.GetAddrOfLocalVar(Src); 833 llvm::Type *PtrPtrT; 834 PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0); 835 SrcObj = Builder.CreateBitCast(SrcObj, PtrPtrT); 836 SrcObj = Builder.CreateLoad(SrcObj); 837 838 for (unsigned i=0; i < NoteForHelper.size(); ++i) { 839 int flag = NoteForHelper[i].flag; 840 int index = NoteForHelper[i].index; 841 842 if ((NoteForHelper[i].flag & BLOCK_FIELD_IS_BYREF) 843 || NoteForHelper[i].RequiresCopying) { 844 llvm::Value *Srcv = SrcObj; 845 Srcv = Builder.CreateStructGEP(Srcv, index); 846 Srcv = Builder.CreateBitCast(Srcv, 847 llvm::PointerType::get(PtrToInt8Ty, 0)); 848 Srcv = Builder.CreateLoad(Srcv); 849 850 BuildBlockRelease(Srcv, flag); 851 } 852 } 853 } 854 855 CGF.FinishFunction(); 856 857 return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty); 858} 859 860llvm::Constant *BlockFunction::BuildCopyHelper(const llvm::StructType *T, 861 std::vector<HelperInfo> *NoteForHelper) { 862 return CodeGenFunction(CGM).GenerateCopyHelperFunction(BlockHasCopyDispose, 863 T, NoteForHelper); 864} 865 866llvm::Constant *BlockFunction::BuildDestroyHelper(const llvm::StructType *T, 867 std::vector<HelperInfo> *NoteForHelperp) { 868 return CodeGenFunction(CGM).GenerateDestroyHelperFunction(BlockHasCopyDispose, 869 T, NoteForHelperp); 870} 871 872llvm::Constant *BlockFunction:: 873GeneratebyrefCopyHelperFunction(const llvm::Type *T, int flag) { 874 QualType R = getContext().VoidTy; 875 876 FunctionArgList Args; 877 // FIXME: This leaks 878 ImplicitParamDecl *Dst = 879 ImplicitParamDecl::Create(getContext(), 0, SourceLocation(), 0, 880 getContext().getPointerType(getContext().VoidTy)); 881 Args.push_back(std::make_pair(Dst, Dst->getType())); 882 883 // FIXME: This leaks 884 ImplicitParamDecl *Src = 885 ImplicitParamDecl::Create(getContext(), 0, SourceLocation(), 0, 886 getContext().getPointerType(getContext().VoidTy)); 887 Args.push_back(std::make_pair(Src, Src->getType())); 888 889 const CGFunctionInfo &FI = 890 CGM.getTypes().getFunctionInfo(R, Args); 891 892 std::string Name = std::string("__Block_byref_id_object_copy_"); 893 CodeGenTypes &Types = CGM.getTypes(); 894 const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false); 895 896 // FIXME: We'd like to put these into a mergable by content, with 897 // internal linkage. 898 llvm::Function *Fn = 899 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, 900 Name, 901 &CGM.getModule()); 902 903 IdentifierInfo *II 904 = &CGM.getContext().Idents.get("__Block_byref_id_object_copy_"); 905 906 FunctionDecl *FD = FunctionDecl::Create(getContext(), 907 getContext().getTranslationUnitDecl(), 908 SourceLocation(), II, R, 909 FunctionDecl::Static, false, 910 true); 911 CGF.StartFunction(FD, R, Fn, Args, SourceLocation()); 912 913 // dst->x 914 llvm::Value *V = CGF.GetAddrOfLocalVar(Dst); 915 V = Builder.CreateBitCast(V, llvm::PointerType::get(T, 0)); 916 V = Builder.CreateLoad(V); 917 V = Builder.CreateStructGEP(V, 6, "x"); 918 llvm::Value *DstObj = Builder.CreateBitCast(V, PtrToInt8Ty); 919 920 // src->x 921 V = CGF.GetAddrOfLocalVar(Src); 922 V = Builder.CreateLoad(V); 923 V = Builder.CreateBitCast(V, T); 924 V = Builder.CreateStructGEP(V, 6, "x"); 925 V = Builder.CreateBitCast(V, llvm::PointerType::get(PtrToInt8Ty, 0)); 926 llvm::Value *SrcObj = Builder.CreateLoad(V); 927 928 flag |= BLOCK_BYREF_CALLER; 929 930 llvm::Value *N = llvm::ConstantInt::get(llvm::Type::Int32Ty, flag); 931 llvm::Value *F = getBlockObjectAssign(); 932 Builder.CreateCall3(F, DstObj, SrcObj, N); 933 934 CGF.FinishFunction(); 935 936 return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty); 937} 938 939llvm::Constant * 940BlockFunction::GeneratebyrefDestroyHelperFunction(const llvm::Type *T, 941 int flag) { 942 QualType R = getContext().VoidTy; 943 944 FunctionArgList Args; 945 // FIXME: This leaks 946 ImplicitParamDecl *Src = 947 ImplicitParamDecl::Create(getContext(), 0, SourceLocation(), 0, 948 getContext().getPointerType(getContext().VoidTy)); 949 950 Args.push_back(std::make_pair(Src, Src->getType())); 951 952 const CGFunctionInfo &FI = 953 CGM.getTypes().getFunctionInfo(R, Args); 954 955 std::string Name = std::string("__Block_byref_id_object_dispose_"); 956 CodeGenTypes &Types = CGM.getTypes(); 957 const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false); 958 959 // FIXME: We'd like to put these into a mergable by content, with 960 // internal linkage. 961 llvm::Function *Fn = 962 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, 963 Name, 964 &CGM.getModule()); 965 966 IdentifierInfo *II 967 = &CGM.getContext().Idents.get("__Block_byref_id_object_dispose_"); 968 969 FunctionDecl *FD = FunctionDecl::Create(getContext(), 970 getContext().getTranslationUnitDecl(), 971 SourceLocation(), II, R, 972 FunctionDecl::Static, false, 973 true); 974 CGF.StartFunction(FD, R, Fn, Args, SourceLocation()); 975 976 llvm::Value *V = CGF.GetAddrOfLocalVar(Src); 977 V = Builder.CreateBitCast(V, llvm::PointerType::get(T, 0)); 978 V = Builder.CreateLoad(V); 979 V = Builder.CreateStructGEP(V, 6, "x"); 980 V = Builder.CreateBitCast(V, llvm::PointerType::get(PtrToInt8Ty, 0)); 981 V = Builder.CreateLoad(V); 982 983 flag |= BLOCK_BYREF_CALLER; 984 BuildBlockRelease(V, flag); 985 CGF.FinishFunction(); 986 987 return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty); 988} 989 990llvm::Constant *BlockFunction::BuildbyrefCopyHelper(const llvm::Type *T, 991 int flag, unsigned Align) { 992 // All alignments below that of pointer alignment collpase down to just 993 // pointer alignment, as we always have at least that much alignment to begin 994 // with. 995 Align /= unsigned(CGF.Target.getPointerAlign(0)/8); 996 // As an optimization, we only generate a single function of each kind we 997 // might need. We need a different one for each alignment and for each 998 // setting of flags. We mix Align and flag to get the kind. 999 uint64_t kind = (uint64_t)Align*BLOCK_BYREF_CURRENT_MAX + flag; 1000 llvm::Constant *& Entry = CGM.AssignCache[kind]; 1001 if (Entry) 1002 return Entry; 1003 return Entry=CodeGenFunction(CGM).GeneratebyrefCopyHelperFunction(T, flag); 1004} 1005 1006llvm::Constant *BlockFunction::BuildbyrefDestroyHelper(const llvm::Type *T, 1007 int flag, 1008 unsigned Align) { 1009 // All alignments below that of pointer alignment collpase down to just 1010 // pointer alignment, as we always have at least that much alignment to begin 1011 // with. 1012 Align /= unsigned(CGF.Target.getPointerAlign(0)/8); 1013 // As an optimization, we only generate a single function of each kind we 1014 // might need. We need a different one for each alignment and for each 1015 // setting of flags. We mix Align and flag to get the kind. 1016 uint64_t kind = (uint64_t)Align*BLOCK_BYREF_CURRENT_MAX + flag; 1017 llvm::Constant *& Entry = CGM.DestroyCache[kind]; 1018 if (Entry) 1019 return Entry; 1020 return Entry=CodeGenFunction(CGM).GeneratebyrefDestroyHelperFunction(T, flag); 1021} 1022 1023llvm::Value *BlockFunction::getBlockObjectDispose() { 1024 if (CGM.BlockObjectDispose == 0) { 1025 const llvm::FunctionType *FTy; 1026 std::vector<const llvm::Type*> ArgTys; 1027 const llvm::Type *ResultType = llvm::Type::VoidTy; 1028 ArgTys.push_back(PtrToInt8Ty); 1029 ArgTys.push_back(llvm::Type::Int32Ty); 1030 FTy = llvm::FunctionType::get(ResultType, ArgTys, false); 1031 CGM.BlockObjectDispose 1032 = CGM.CreateRuntimeFunction(FTy, "_Block_object_dispose"); 1033 } 1034 return CGM.BlockObjectDispose; 1035} 1036 1037llvm::Value *BlockFunction::getBlockObjectAssign() { 1038 if (CGM.BlockObjectAssign == 0) { 1039 const llvm::FunctionType *FTy; 1040 std::vector<const llvm::Type*> ArgTys; 1041 const llvm::Type *ResultType = llvm::Type::VoidTy; 1042 ArgTys.push_back(PtrToInt8Ty); 1043 ArgTys.push_back(PtrToInt8Ty); 1044 ArgTys.push_back(llvm::Type::Int32Ty); 1045 FTy = llvm::FunctionType::get(ResultType, ArgTys, false); 1046 CGM.BlockObjectAssign 1047 = CGM.CreateRuntimeFunction(FTy, "_Block_object_assign"); 1048 } 1049 return CGM.BlockObjectAssign; 1050} 1051 1052void BlockFunction::BuildBlockRelease(llvm::Value *V, int flag) { 1053 llvm::Value *F = getBlockObjectDispose(); 1054 llvm::Value *N; 1055 V = Builder.CreateBitCast(V, PtrToInt8Ty); 1056 N = llvm::ConstantInt::get(llvm::Type::Int32Ty, flag); 1057 Builder.CreateCall2(F, V, N); 1058} 1059 1060ASTContext &BlockFunction::getContext() const { return CGM.getContext(); } 1061 1062BlockFunction::BlockFunction(CodeGenModule &cgm, CodeGenFunction &cgf, 1063 CGBuilderTy &B) 1064 : CGM(cgm), CGF(cgf), Builder(B) { 1065 PtrToInt8Ty = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); 1066 1067 BlockHasCopyDispose = false; 1068} 1069