1//===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Stmt nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGDebugInfo.h"
14#include "CGOpenMPRuntime.h"
15#include "CodeGenFunction.h"
16#include "CodeGenModule.h"
17#include "TargetInfo.h"
18#include "clang/AST/Attr.h"
19#include "clang/AST/StmtVisitor.h"
20#include "clang/Basic/Builtins.h"
21#include "clang/Basic/PrettyStackTrace.h"
22#include "clang/Basic/SourceManager.h"
23#include "clang/Basic/TargetInfo.h"
24#include "llvm/ADT/StringExtras.h"
25#include "llvm/IR/DataLayout.h"
26#include "llvm/IR/InlineAsm.h"
27#include "llvm/IR/Intrinsics.h"
28#include "llvm/IR/MDBuilder.h"
29#include "llvm/Support/SaveAndRestore.h"
30
31using namespace clang;
32using namespace CodeGen;
33
34//===----------------------------------------------------------------------===//
35//                              Statement Emission
36//===----------------------------------------------------------------------===//
37
38void CodeGenFunction::EmitStopPoint(const Stmt *S) {
39  if (CGDebugInfo *DI = getDebugInfo()) {
40    SourceLocation Loc;
41    Loc = S->getBeginLoc();
42    DI->EmitLocation(Builder, Loc);
43
44    LastStopPoint = Loc;
45  }
46}
47
48void CodeGenFunction::EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs) {
49  assert(S && "Null statement?");
50  PGO.setCurrentStmt(S);
51
52  // These statements have their own debug info handling.
53  if (EmitSimpleStmt(S))
54    return;
55
56  // Check if we are generating unreachable code.
57  if (!HaveInsertPoint()) {
58    // If so, and the statement doesn't contain a label, then we do not need to
59    // generate actual code. This is safe because (1) the current point is
60    // unreachable, so we don't need to execute the code, and (2) we've already
61    // handled the statements which update internal data structures (like the
62    // local variable map) which could be used by subsequent statements.
63    if (!ContainsLabel(S)) {
64      // Verify that any decl statements were handled as simple, they may be in
65      // scope of subsequent reachable statements.
66      assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
67      return;
68    }
69
70    // Otherwise, make a new block to hold the code.
71    EnsureInsertPoint();
72  }
73
74  // Generate a stoppoint if we are emitting debug info.
75  EmitStopPoint(S);
76
77  // Ignore all OpenMP directives except for simd if OpenMP with Simd is
78  // enabled.
79  if (getLangOpts().OpenMP && getLangOpts().OpenMPSimd) {
80    if (const auto *D = dyn_cast<OMPExecutableDirective>(S)) {
81      EmitSimpleOMPExecutableDirective(*D);
82      return;
83    }
84  }
85
86  switch (S->getStmtClass()) {
87  case Stmt::NoStmtClass:
88  case Stmt::CXXCatchStmtClass:
89  case Stmt::SEHExceptStmtClass:
90  case Stmt::SEHFinallyStmtClass:
91  case Stmt::MSDependentExistsStmtClass:
92    llvm_unreachable("invalid statement class to emit generically");
93  case Stmt::NullStmtClass:
94  case Stmt::CompoundStmtClass:
95  case Stmt::DeclStmtClass:
96  case Stmt::LabelStmtClass:
97  case Stmt::AttributedStmtClass:
98  case Stmt::GotoStmtClass:
99  case Stmt::BreakStmtClass:
100  case Stmt::ContinueStmtClass:
101  case Stmt::DefaultStmtClass:
102  case Stmt::CaseStmtClass:
103  case Stmt::SEHLeaveStmtClass:
104    llvm_unreachable("should have emitted these statements as simple");
105
106#define STMT(Type, Base)
107#define ABSTRACT_STMT(Op)
108#define EXPR(Type, Base) \
109  case Stmt::Type##Class:
110#include "clang/AST/StmtNodes.inc"
111  {
112    // Remember the block we came in on.
113    llvm::BasicBlock *incoming = Builder.GetInsertBlock();
114    assert(incoming && "expression emission must have an insertion point");
115
116    EmitIgnoredExpr(cast<Expr>(S));
117
118    llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
119    assert(outgoing && "expression emission cleared block!");
120
121    // The expression emitters assume (reasonably!) that the insertion
122    // point is always set.  To maintain that, the call-emission code
123    // for noreturn functions has to enter a new block with no
124    // predecessors.  We want to kill that block and mark the current
125    // insertion point unreachable in the common case of a call like
126    // "exit();".  Since expression emission doesn't otherwise create
127    // blocks with no predecessors, we can just test for that.
128    // However, we must be careful not to do this to our incoming
129    // block, because *statement* emission does sometimes create
130    // reachable blocks which will have no predecessors until later in
131    // the function.  This occurs with, e.g., labels that are not
132    // reachable by fallthrough.
133    if (incoming != outgoing && outgoing->use_empty()) {
134      outgoing->eraseFromParent();
135      Builder.ClearInsertionPoint();
136    }
137    break;
138  }
139
140  case Stmt::IndirectGotoStmtClass:
141    EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
142
143  case Stmt::IfStmtClass:      EmitIfStmt(cast<IfStmt>(*S));              break;
144  case Stmt::WhileStmtClass:   EmitWhileStmt(cast<WhileStmt>(*S), Attrs); break;
145  case Stmt::DoStmtClass:      EmitDoStmt(cast<DoStmt>(*S), Attrs);       break;
146  case Stmt::ForStmtClass:     EmitForStmt(cast<ForStmt>(*S), Attrs);     break;
147
148  case Stmt::ReturnStmtClass:  EmitReturnStmt(cast<ReturnStmt>(*S));      break;
149
150  case Stmt::SwitchStmtClass:  EmitSwitchStmt(cast<SwitchStmt>(*S));      break;
151  case Stmt::GCCAsmStmtClass:  // Intentional fall-through.
152  case Stmt::MSAsmStmtClass:   EmitAsmStmt(cast<AsmStmt>(*S));            break;
153  case Stmt::CoroutineBodyStmtClass:
154    EmitCoroutineBody(cast<CoroutineBodyStmt>(*S));
155    break;
156  case Stmt::CoreturnStmtClass:
157    EmitCoreturnStmt(cast<CoreturnStmt>(*S));
158    break;
159  case Stmt::CapturedStmtClass: {
160    const CapturedStmt *CS = cast<CapturedStmt>(S);
161    EmitCapturedStmt(*CS, CS->getCapturedRegionKind());
162    }
163    break;
164  case Stmt::ObjCAtTryStmtClass:
165    EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
166    break;
167  case Stmt::ObjCAtCatchStmtClass:
168    llvm_unreachable(
169                    "@catch statements should be handled by EmitObjCAtTryStmt");
170  case Stmt::ObjCAtFinallyStmtClass:
171    llvm_unreachable(
172                  "@finally statements should be handled by EmitObjCAtTryStmt");
173  case Stmt::ObjCAtThrowStmtClass:
174    EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S));
175    break;
176  case Stmt::ObjCAtSynchronizedStmtClass:
177    EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S));
178    break;
179  case Stmt::ObjCForCollectionStmtClass:
180    EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S));
181    break;
182  case Stmt::ObjCAutoreleasePoolStmtClass:
183    EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S));
184    break;
185
186  case Stmt::CXXTryStmtClass:
187    EmitCXXTryStmt(cast<CXXTryStmt>(*S));
188    break;
189  case Stmt::CXXForRangeStmtClass:
190    EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S), Attrs);
191    break;
192  case Stmt::SEHTryStmtClass:
193    EmitSEHTryStmt(cast<SEHTryStmt>(*S));
194    break;
195  case Stmt::OMPParallelDirectiveClass:
196    EmitOMPParallelDirective(cast<OMPParallelDirective>(*S));
197    break;
198  case Stmt::OMPSimdDirectiveClass:
199    EmitOMPSimdDirective(cast<OMPSimdDirective>(*S));
200    break;
201  case Stmt::OMPForDirectiveClass:
202    EmitOMPForDirective(cast<OMPForDirective>(*S));
203    break;
204  case Stmt::OMPForSimdDirectiveClass:
205    EmitOMPForSimdDirective(cast<OMPForSimdDirective>(*S));
206    break;
207  case Stmt::OMPSectionsDirectiveClass:
208    EmitOMPSectionsDirective(cast<OMPSectionsDirective>(*S));
209    break;
210  case Stmt::OMPSectionDirectiveClass:
211    EmitOMPSectionDirective(cast<OMPSectionDirective>(*S));
212    break;
213  case Stmt::OMPSingleDirectiveClass:
214    EmitOMPSingleDirective(cast<OMPSingleDirective>(*S));
215    break;
216  case Stmt::OMPMasterDirectiveClass:
217    EmitOMPMasterDirective(cast<OMPMasterDirective>(*S));
218    break;
219  case Stmt::OMPCriticalDirectiveClass:
220    EmitOMPCriticalDirective(cast<OMPCriticalDirective>(*S));
221    break;
222  case Stmt::OMPParallelForDirectiveClass:
223    EmitOMPParallelForDirective(cast<OMPParallelForDirective>(*S));
224    break;
225  case Stmt::OMPParallelForSimdDirectiveClass:
226    EmitOMPParallelForSimdDirective(cast<OMPParallelForSimdDirective>(*S));
227    break;
228  case Stmt::OMPParallelMasterDirectiveClass:
229    EmitOMPParallelMasterDirective(cast<OMPParallelMasterDirective>(*S));
230    break;
231  case Stmt::OMPParallelSectionsDirectiveClass:
232    EmitOMPParallelSectionsDirective(cast<OMPParallelSectionsDirective>(*S));
233    break;
234  case Stmt::OMPTaskDirectiveClass:
235    EmitOMPTaskDirective(cast<OMPTaskDirective>(*S));
236    break;
237  case Stmt::OMPTaskyieldDirectiveClass:
238    EmitOMPTaskyieldDirective(cast<OMPTaskyieldDirective>(*S));
239    break;
240  case Stmt::OMPBarrierDirectiveClass:
241    EmitOMPBarrierDirective(cast<OMPBarrierDirective>(*S));
242    break;
243  case Stmt::OMPTaskwaitDirectiveClass:
244    EmitOMPTaskwaitDirective(cast<OMPTaskwaitDirective>(*S));
245    break;
246  case Stmt::OMPTaskgroupDirectiveClass:
247    EmitOMPTaskgroupDirective(cast<OMPTaskgroupDirective>(*S));
248    break;
249  case Stmt::OMPFlushDirectiveClass:
250    EmitOMPFlushDirective(cast<OMPFlushDirective>(*S));
251    break;
252  case Stmt::OMPDepobjDirectiveClass:
253    EmitOMPDepobjDirective(cast<OMPDepobjDirective>(*S));
254    break;
255  case Stmt::OMPScanDirectiveClass:
256    EmitOMPScanDirective(cast<OMPScanDirective>(*S));
257    break;
258  case Stmt::OMPOrderedDirectiveClass:
259    EmitOMPOrderedDirective(cast<OMPOrderedDirective>(*S));
260    break;
261  case Stmt::OMPAtomicDirectiveClass:
262    EmitOMPAtomicDirective(cast<OMPAtomicDirective>(*S));
263    break;
264  case Stmt::OMPTargetDirectiveClass:
265    EmitOMPTargetDirective(cast<OMPTargetDirective>(*S));
266    break;
267  case Stmt::OMPTeamsDirectiveClass:
268    EmitOMPTeamsDirective(cast<OMPTeamsDirective>(*S));
269    break;
270  case Stmt::OMPCancellationPointDirectiveClass:
271    EmitOMPCancellationPointDirective(cast<OMPCancellationPointDirective>(*S));
272    break;
273  case Stmt::OMPCancelDirectiveClass:
274    EmitOMPCancelDirective(cast<OMPCancelDirective>(*S));
275    break;
276  case Stmt::OMPTargetDataDirectiveClass:
277    EmitOMPTargetDataDirective(cast<OMPTargetDataDirective>(*S));
278    break;
279  case Stmt::OMPTargetEnterDataDirectiveClass:
280    EmitOMPTargetEnterDataDirective(cast<OMPTargetEnterDataDirective>(*S));
281    break;
282  case Stmt::OMPTargetExitDataDirectiveClass:
283    EmitOMPTargetExitDataDirective(cast<OMPTargetExitDataDirective>(*S));
284    break;
285  case Stmt::OMPTargetParallelDirectiveClass:
286    EmitOMPTargetParallelDirective(cast<OMPTargetParallelDirective>(*S));
287    break;
288  case Stmt::OMPTargetParallelForDirectiveClass:
289    EmitOMPTargetParallelForDirective(cast<OMPTargetParallelForDirective>(*S));
290    break;
291  case Stmt::OMPTaskLoopDirectiveClass:
292    EmitOMPTaskLoopDirective(cast<OMPTaskLoopDirective>(*S));
293    break;
294  case Stmt::OMPTaskLoopSimdDirectiveClass:
295    EmitOMPTaskLoopSimdDirective(cast<OMPTaskLoopSimdDirective>(*S));
296    break;
297  case Stmt::OMPMasterTaskLoopDirectiveClass:
298    EmitOMPMasterTaskLoopDirective(cast<OMPMasterTaskLoopDirective>(*S));
299    break;
300  case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
301    EmitOMPMasterTaskLoopSimdDirective(
302        cast<OMPMasterTaskLoopSimdDirective>(*S));
303    break;
304  case Stmt::OMPParallelMasterTaskLoopDirectiveClass:
305    EmitOMPParallelMasterTaskLoopDirective(
306        cast<OMPParallelMasterTaskLoopDirective>(*S));
307    break;
308  case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass:
309    EmitOMPParallelMasterTaskLoopSimdDirective(
310        cast<OMPParallelMasterTaskLoopSimdDirective>(*S));
311    break;
312  case Stmt::OMPDistributeDirectiveClass:
313    EmitOMPDistributeDirective(cast<OMPDistributeDirective>(*S));
314    break;
315  case Stmt::OMPTargetUpdateDirectiveClass:
316    EmitOMPTargetUpdateDirective(cast<OMPTargetUpdateDirective>(*S));
317    break;
318  case Stmt::OMPDistributeParallelForDirectiveClass:
319    EmitOMPDistributeParallelForDirective(
320        cast<OMPDistributeParallelForDirective>(*S));
321    break;
322  case Stmt::OMPDistributeParallelForSimdDirectiveClass:
323    EmitOMPDistributeParallelForSimdDirective(
324        cast<OMPDistributeParallelForSimdDirective>(*S));
325    break;
326  case Stmt::OMPDistributeSimdDirectiveClass:
327    EmitOMPDistributeSimdDirective(cast<OMPDistributeSimdDirective>(*S));
328    break;
329  case Stmt::OMPTargetParallelForSimdDirectiveClass:
330    EmitOMPTargetParallelForSimdDirective(
331        cast<OMPTargetParallelForSimdDirective>(*S));
332    break;
333  case Stmt::OMPTargetSimdDirectiveClass:
334    EmitOMPTargetSimdDirective(cast<OMPTargetSimdDirective>(*S));
335    break;
336  case Stmt::OMPTeamsDistributeDirectiveClass:
337    EmitOMPTeamsDistributeDirective(cast<OMPTeamsDistributeDirective>(*S));
338    break;
339  case Stmt::OMPTeamsDistributeSimdDirectiveClass:
340    EmitOMPTeamsDistributeSimdDirective(
341        cast<OMPTeamsDistributeSimdDirective>(*S));
342    break;
343  case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass:
344    EmitOMPTeamsDistributeParallelForSimdDirective(
345        cast<OMPTeamsDistributeParallelForSimdDirective>(*S));
346    break;
347  case Stmt::OMPTeamsDistributeParallelForDirectiveClass:
348    EmitOMPTeamsDistributeParallelForDirective(
349        cast<OMPTeamsDistributeParallelForDirective>(*S));
350    break;
351  case Stmt::OMPTargetTeamsDirectiveClass:
352    EmitOMPTargetTeamsDirective(cast<OMPTargetTeamsDirective>(*S));
353    break;
354  case Stmt::OMPTargetTeamsDistributeDirectiveClass:
355    EmitOMPTargetTeamsDistributeDirective(
356        cast<OMPTargetTeamsDistributeDirective>(*S));
357    break;
358  case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
359    EmitOMPTargetTeamsDistributeParallelForDirective(
360        cast<OMPTargetTeamsDistributeParallelForDirective>(*S));
361    break;
362  case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
363    EmitOMPTargetTeamsDistributeParallelForSimdDirective(
364        cast<OMPTargetTeamsDistributeParallelForSimdDirective>(*S));
365    break;
366  case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
367    EmitOMPTargetTeamsDistributeSimdDirective(
368        cast<OMPTargetTeamsDistributeSimdDirective>(*S));
369    break;
370  }
371}
372
373bool CodeGenFunction::EmitSimpleStmt(const Stmt *S) {
374  switch (S->getStmtClass()) {
375  default: return false;
376  case Stmt::NullStmtClass: break;
377  case Stmt::CompoundStmtClass: EmitCompoundStmt(cast<CompoundStmt>(*S)); break;
378  case Stmt::DeclStmtClass:     EmitDeclStmt(cast<DeclStmt>(*S));         break;
379  case Stmt::LabelStmtClass:    EmitLabelStmt(cast<LabelStmt>(*S));       break;
380  case Stmt::AttributedStmtClass:
381                            EmitAttributedStmt(cast<AttributedStmt>(*S)); break;
382  case Stmt::GotoStmtClass:     EmitGotoStmt(cast<GotoStmt>(*S));         break;
383  case Stmt::BreakStmtClass:    EmitBreakStmt(cast<BreakStmt>(*S));       break;
384  case Stmt::ContinueStmtClass: EmitContinueStmt(cast<ContinueStmt>(*S)); break;
385  case Stmt::DefaultStmtClass:  EmitDefaultStmt(cast<DefaultStmt>(*S));   break;
386  case Stmt::CaseStmtClass:     EmitCaseStmt(cast<CaseStmt>(*S));         break;
387  case Stmt::SEHLeaveStmtClass: EmitSEHLeaveStmt(cast<SEHLeaveStmt>(*S)); break;
388  }
389
390  return true;
391}
392
393/// EmitCompoundStmt - Emit a compound statement {..} node.  If GetLast is true,
394/// this captures the expression result of the last sub-statement and returns it
395/// (for use by the statement expression extension).
396Address CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
397                                          AggValueSlot AggSlot) {
398  PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
399                             "LLVM IR generation of compound statement ('{}')");
400
401  // Keep track of the current cleanup stack depth, including debug scopes.
402  LexicalScope Scope(*this, S.getSourceRange());
403
404  return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot);
405}
406
407Address
408CodeGenFunction::EmitCompoundStmtWithoutScope(const CompoundStmt &S,
409                                              bool GetLast,
410                                              AggValueSlot AggSlot) {
411
412  const Stmt *ExprResult = S.getStmtExprResult();
413  assert((!GetLast || (GetLast && ExprResult)) &&
414         "If GetLast is true then the CompoundStmt must have a StmtExprResult");
415
416  Address RetAlloca = Address::invalid();
417
418  for (auto *CurStmt : S.body()) {
419    if (GetLast && ExprResult == CurStmt) {
420      // We have to special case labels here.  They are statements, but when put
421      // at the end of a statement expression, they yield the value of their
422      // subexpression.  Handle this by walking through all labels we encounter,
423      // emitting them before we evaluate the subexpr.
424      // Similar issues arise for attributed statements.
425      while (!isa<Expr>(ExprResult)) {
426        if (const auto *LS = dyn_cast<LabelStmt>(ExprResult)) {
427          EmitLabel(LS->getDecl());
428          ExprResult = LS->getSubStmt();
429        } else if (const auto *AS = dyn_cast<AttributedStmt>(ExprResult)) {
430          // FIXME: Update this if we ever have attributes that affect the
431          // semantics of an expression.
432          ExprResult = AS->getSubStmt();
433        } else {
434          llvm_unreachable("unknown value statement");
435        }
436      }
437
438      EnsureInsertPoint();
439
440      const Expr *E = cast<Expr>(ExprResult);
441      QualType ExprTy = E->getType();
442      if (hasAggregateEvaluationKind(ExprTy)) {
443        EmitAggExpr(E, AggSlot);
444      } else {
445        // We can't return an RValue here because there might be cleanups at
446        // the end of the StmtExpr.  Because of that, we have to emit the result
447        // here into a temporary alloca.
448        RetAlloca = CreateMemTemp(ExprTy);
449        EmitAnyExprToMem(E, RetAlloca, Qualifiers(),
450                         /*IsInit*/ false);
451      }
452    } else {
453      EmitStmt(CurStmt);
454    }
455  }
456
457  return RetAlloca;
458}
459
460void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
461  llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
462
463  // If there is a cleanup stack, then we it isn't worth trying to
464  // simplify this block (we would need to remove it from the scope map
465  // and cleanup entry).
466  if (!EHStack.empty())
467    return;
468
469  // Can only simplify direct branches.
470  if (!BI || !BI->isUnconditional())
471    return;
472
473  // Can only simplify empty blocks.
474  if (BI->getIterator() != BB->begin())
475    return;
476
477  BB->replaceAllUsesWith(BI->getSuccessor(0));
478  BI->eraseFromParent();
479  BB->eraseFromParent();
480}
481
482void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
483  llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
484
485  // Fall out of the current block (if necessary).
486  EmitBranch(BB);
487
488  if (IsFinished && BB->use_empty()) {
489    delete BB;
490    return;
491  }
492
493  // Place the block after the current block, if possible, or else at
494  // the end of the function.
495  if (CurBB && CurBB->getParent())
496    CurFn->getBasicBlockList().insertAfter(CurBB->getIterator(), BB);
497  else
498    CurFn->getBasicBlockList().push_back(BB);
499  Builder.SetInsertPoint(BB);
500}
501
502void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
503  // Emit a branch from the current block to the target one if this
504  // was a real block.  If this was just a fall-through block after a
505  // terminator, don't emit it.
506  llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
507
508  if (!CurBB || CurBB->getTerminator()) {
509    // If there is no insert point or the previous block is already
510    // terminated, don't touch it.
511  } else {
512    // Otherwise, create a fall-through branch.
513    Builder.CreateBr(Target);
514  }
515
516  Builder.ClearInsertionPoint();
517}
518
519void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
520  bool inserted = false;
521  for (llvm::User *u : block->users()) {
522    if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) {
523      CurFn->getBasicBlockList().insertAfter(insn->getParent()->getIterator(),
524                                             block);
525      inserted = true;
526      break;
527    }
528  }
529
530  if (!inserted)
531    CurFn->getBasicBlockList().push_back(block);
532
533  Builder.SetInsertPoint(block);
534}
535
536CodeGenFunction::JumpDest
537CodeGenFunction::getJumpDestForLabel(const LabelDecl *D) {
538  JumpDest &Dest = LabelMap[D];
539  if (Dest.isValid()) return Dest;
540
541  // Create, but don't insert, the new block.
542  Dest = JumpDest(createBasicBlock(D->getName()),
543                  EHScopeStack::stable_iterator::invalid(),
544                  NextCleanupDestIndex++);
545  return Dest;
546}
547
548void CodeGenFunction::EmitLabel(const LabelDecl *D) {
549  // Add this label to the current lexical scope if we're within any
550  // normal cleanups.  Jumps "in" to this label --- when permitted by
551  // the language --- may need to be routed around such cleanups.
552  if (EHStack.hasNormalCleanups() && CurLexicalScope)
553    CurLexicalScope->addLabel(D);
554
555  JumpDest &Dest = LabelMap[D];
556
557  // If we didn't need a forward reference to this label, just go
558  // ahead and create a destination at the current scope.
559  if (!Dest.isValid()) {
560    Dest = getJumpDestInCurrentScope(D->getName());
561
562  // Otherwise, we need to give this label a target depth and remove
563  // it from the branch-fixups list.
564  } else {
565    assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
566    Dest.setScopeDepth(EHStack.stable_begin());
567    ResolveBranchFixups(Dest.getBlock());
568  }
569
570  EmitBlock(Dest.getBlock());
571
572  // Emit debug info for labels.
573  if (CGDebugInfo *DI = getDebugInfo()) {
574    if (CGM.getCodeGenOpts().hasReducedDebugInfo()) {
575      DI->setLocation(D->getLocation());
576      DI->EmitLabel(D, Builder);
577    }
578  }
579
580  incrementProfileCounter(D->getStmt());
581}
582
583/// Change the cleanup scope of the labels in this lexical scope to
584/// match the scope of the enclosing context.
585void CodeGenFunction::LexicalScope::rescopeLabels() {
586  assert(!Labels.empty());
587  EHScopeStack::stable_iterator innermostScope
588    = CGF.EHStack.getInnermostNormalCleanup();
589
590  // Change the scope depth of all the labels.
591  for (SmallVectorImpl<const LabelDecl*>::const_iterator
592         i = Labels.begin(), e = Labels.end(); i != e; ++i) {
593    assert(CGF.LabelMap.count(*i));
594    JumpDest &dest = CGF.LabelMap.find(*i)->second;
595    assert(dest.getScopeDepth().isValid());
596    assert(innermostScope.encloses(dest.getScopeDepth()));
597    dest.setScopeDepth(innermostScope);
598  }
599
600  // Reparent the labels if the new scope also has cleanups.
601  if (innermostScope != EHScopeStack::stable_end() && ParentScope) {
602    ParentScope->Labels.append(Labels.begin(), Labels.end());
603  }
604}
605
606
607void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) {
608  EmitLabel(S.getDecl());
609  EmitStmt(S.getSubStmt());
610}
611
612void CodeGenFunction::EmitAttributedStmt(const AttributedStmt &S) {
613  bool nomerge = false;
614  for (const auto *A : S.getAttrs())
615    if (A->getKind() == attr::NoMerge) {
616      nomerge = true;
617      break;
618    }
619  SaveAndRestore<bool> save_nomerge(InNoMergeAttributedStmt, nomerge);
620  EmitStmt(S.getSubStmt(), S.getAttrs());
621}
622
623void CodeGenFunction::EmitGotoStmt(const GotoStmt &S) {
624  // If this code is reachable then emit a stop point (if generating
625  // debug info). We have to do this ourselves because we are on the
626  // "simple" statement path.
627  if (HaveInsertPoint())
628    EmitStopPoint(&S);
629
630  EmitBranchThroughCleanup(getJumpDestForLabel(S.getLabel()));
631}
632
633
634void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) {
635  if (const LabelDecl *Target = S.getConstantTarget()) {
636    EmitBranchThroughCleanup(getJumpDestForLabel(Target));
637    return;
638  }
639
640  // Ensure that we have an i8* for our PHI node.
641  llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
642                                         Int8PtrTy, "addr");
643  llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
644
645  // Get the basic block for the indirect goto.
646  llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
647
648  // The first instruction in the block has to be the PHI for the switch dest,
649  // add an entry for this branch.
650  cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
651
652  EmitBranch(IndGotoBB);
653}
654
655void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
656  // C99 6.8.4.1: The first substatement is executed if the expression compares
657  // unequal to 0.  The condition must be a scalar type.
658  LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
659
660  if (S.getInit())
661    EmitStmt(S.getInit());
662
663  if (S.getConditionVariable())
664    EmitDecl(*S.getConditionVariable());
665
666  // If the condition constant folds and can be elided, try to avoid emitting
667  // the condition and the dead arm of the if/else.
668  bool CondConstant;
669  if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant,
670                                   S.isConstexpr())) {
671    // Figure out which block (then or else) is executed.
672    const Stmt *Executed = S.getThen();
673    const Stmt *Skipped  = S.getElse();
674    if (!CondConstant)  // Condition false?
675      std::swap(Executed, Skipped);
676
677    // If the skipped block has no labels in it, just emit the executed block.
678    // This avoids emitting dead code and simplifies the CFG substantially.
679    if (S.isConstexpr() || !ContainsLabel(Skipped)) {
680      if (CondConstant)
681        incrementProfileCounter(&S);
682      if (Executed) {
683        RunCleanupsScope ExecutedScope(*this);
684        EmitStmt(Executed);
685      }
686      return;
687    }
688  }
689
690  // Otherwise, the condition did not fold, or we couldn't elide it.  Just emit
691  // the conditional branch.
692  llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
693  llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
694  llvm::BasicBlock *ElseBlock = ContBlock;
695  if (S.getElse())
696    ElseBlock = createBasicBlock("if.else");
697
698  EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock,
699                       getProfileCount(S.getThen()));
700
701  // Emit the 'then' code.
702  EmitBlock(ThenBlock);
703  incrementProfileCounter(&S);
704  {
705    RunCleanupsScope ThenScope(*this);
706    EmitStmt(S.getThen());
707  }
708  EmitBranch(ContBlock);
709
710  // Emit the 'else' code if present.
711  if (const Stmt *Else = S.getElse()) {
712    {
713      // There is no need to emit line number for an unconditional branch.
714      auto NL = ApplyDebugLocation::CreateEmpty(*this);
715      EmitBlock(ElseBlock);
716    }
717    {
718      RunCleanupsScope ElseScope(*this);
719      EmitStmt(Else);
720    }
721    {
722      // There is no need to emit line number for an unconditional branch.
723      auto NL = ApplyDebugLocation::CreateEmpty(*this);
724      EmitBranch(ContBlock);
725    }
726  }
727
728  // Emit the continuation block for code after the if.
729  EmitBlock(ContBlock, true);
730}
731
732void CodeGenFunction::EmitWhileStmt(const WhileStmt &S,
733                                    ArrayRef<const Attr *> WhileAttrs) {
734  // Emit the header for the loop, which will also become
735  // the continue target.
736  JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
737  EmitBlock(LoopHeader.getBlock());
738
739  const SourceRange &R = S.getSourceRange();
740  LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), CGM.getCodeGenOpts(),
741                 WhileAttrs, SourceLocToDebugLoc(R.getBegin()),
742                 SourceLocToDebugLoc(R.getEnd()));
743
744  // Create an exit block for when the condition fails, which will
745  // also become the break target.
746  JumpDest LoopExit = getJumpDestInCurrentScope("while.end");
747
748  // Store the blocks to use for break and continue.
749  BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader));
750
751  // C++ [stmt.while]p2:
752  //   When the condition of a while statement is a declaration, the
753  //   scope of the variable that is declared extends from its point
754  //   of declaration (3.3.2) to the end of the while statement.
755  //   [...]
756  //   The object created in a condition is destroyed and created
757  //   with each iteration of the loop.
758  RunCleanupsScope ConditionScope(*this);
759
760  if (S.getConditionVariable())
761    EmitDecl(*S.getConditionVariable());
762
763  // Evaluate the conditional in the while header.  C99 6.8.5.1: The
764  // evaluation of the controlling expression takes place before each
765  // execution of the loop body.
766  llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
767
768  // while(1) is common, avoid extra exit blocks.  Be sure
769  // to correctly handle break/continue though.
770  bool EmitBoolCondBranch = true;
771  if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
772    if (C->isOne())
773      EmitBoolCondBranch = false;
774
775  // As long as the condition is true, go to the loop body.
776  llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
777  if (EmitBoolCondBranch) {
778    llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
779    if (ConditionScope.requiresCleanups())
780      ExitBlock = createBasicBlock("while.exit");
781    Builder.CreateCondBr(
782        BoolCondVal, LoopBody, ExitBlock,
783        createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody())));
784
785    if (ExitBlock != LoopExit.getBlock()) {
786      EmitBlock(ExitBlock);
787      EmitBranchThroughCleanup(LoopExit);
788    }
789  }
790
791  // Emit the loop body.  We have to emit this in a cleanup scope
792  // because it might be a singleton DeclStmt.
793  {
794    RunCleanupsScope BodyScope(*this);
795    EmitBlock(LoopBody);
796    incrementProfileCounter(&S);
797    EmitStmt(S.getBody());
798  }
799
800  BreakContinueStack.pop_back();
801
802  // Immediately force cleanup.
803  ConditionScope.ForceCleanup();
804
805  EmitStopPoint(&S);
806  // Branch to the loop header again.
807  EmitBranch(LoopHeader.getBlock());
808
809  LoopStack.pop();
810
811  // Emit the exit block.
812  EmitBlock(LoopExit.getBlock(), true);
813
814  // The LoopHeader typically is just a branch if we skipped emitting
815  // a branch, try to erase it.
816  if (!EmitBoolCondBranch)
817    SimplifyForwardingBlocks(LoopHeader.getBlock());
818}
819
820void CodeGenFunction::EmitDoStmt(const DoStmt &S,
821                                 ArrayRef<const Attr *> DoAttrs) {
822  JumpDest LoopExit = getJumpDestInCurrentScope("do.end");
823  JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
824
825  uint64_t ParentCount = getCurrentProfileCount();
826
827  // Store the blocks to use for break and continue.
828  BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond));
829
830  // Emit the body of the loop.
831  llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
832
833  EmitBlockWithFallThrough(LoopBody, &S);
834  {
835    RunCleanupsScope BodyScope(*this);
836    EmitStmt(S.getBody());
837  }
838
839  EmitBlock(LoopCond.getBlock());
840
841  const SourceRange &R = S.getSourceRange();
842  LoopStack.push(LoopBody, CGM.getContext(), CGM.getCodeGenOpts(), DoAttrs,
843                 SourceLocToDebugLoc(R.getBegin()),
844                 SourceLocToDebugLoc(R.getEnd()));
845
846  // C99 6.8.5.2: "The evaluation of the controlling expression takes place
847  // after each execution of the loop body."
848
849  // Evaluate the conditional in the while header.
850  // C99 6.8.5p2/p4: The first substatement is executed if the expression
851  // compares unequal to 0.  The condition must be a scalar type.
852  llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
853
854  BreakContinueStack.pop_back();
855
856  // "do {} while (0)" is common in macros, avoid extra blocks.  Be sure
857  // to correctly handle break/continue though.
858  bool EmitBoolCondBranch = true;
859  if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
860    if (C->isZero())
861      EmitBoolCondBranch = false;
862
863  // As long as the condition is true, iterate the loop.
864  if (EmitBoolCondBranch) {
865    uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount;
866    Builder.CreateCondBr(
867        BoolCondVal, LoopBody, LoopExit.getBlock(),
868        createProfileWeightsForLoop(S.getCond(), BackedgeCount));
869  }
870
871  LoopStack.pop();
872
873  // Emit the exit block.
874  EmitBlock(LoopExit.getBlock());
875
876  // The DoCond block typically is just a branch if we skipped
877  // emitting a branch, try to erase it.
878  if (!EmitBoolCondBranch)
879    SimplifyForwardingBlocks(LoopCond.getBlock());
880}
881
882void CodeGenFunction::EmitForStmt(const ForStmt &S,
883                                  ArrayRef<const Attr *> ForAttrs) {
884  JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
885
886  LexicalScope ForScope(*this, S.getSourceRange());
887
888  // Evaluate the first part before the loop.
889  if (S.getInit())
890    EmitStmt(S.getInit());
891
892  // Start the loop with a block that tests the condition.
893  // If there's an increment, the continue scope will be overwritten
894  // later.
895  JumpDest Continue = getJumpDestInCurrentScope("for.cond");
896  llvm::BasicBlock *CondBlock = Continue.getBlock();
897  EmitBlock(CondBlock);
898
899  const SourceRange &R = S.getSourceRange();
900  LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
901                 SourceLocToDebugLoc(R.getBegin()),
902                 SourceLocToDebugLoc(R.getEnd()));
903
904  // If the for loop doesn't have an increment we can just use the
905  // condition as the continue block.  Otherwise we'll need to create
906  // a block for it (in the current scope, i.e. in the scope of the
907  // condition), and that we will become our continue block.
908  if (S.getInc())
909    Continue = getJumpDestInCurrentScope("for.inc");
910
911  // Store the blocks to use for break and continue.
912  BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
913
914  // Create a cleanup scope for the condition variable cleanups.
915  LexicalScope ConditionScope(*this, S.getSourceRange());
916
917  if (S.getCond()) {
918    // If the for statement has a condition scope, emit the local variable
919    // declaration.
920    if (S.getConditionVariable()) {
921      EmitDecl(*S.getConditionVariable());
922    }
923
924    llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
925    // If there are any cleanups between here and the loop-exit scope,
926    // create a block to stage a loop exit along.
927    if (ForScope.requiresCleanups())
928      ExitBlock = createBasicBlock("for.cond.cleanup");
929
930    // As long as the condition is true, iterate the loop.
931    llvm::BasicBlock *ForBody = createBasicBlock("for.body");
932
933    // C99 6.8.5p2/p4: The first substatement is executed if the expression
934    // compares unequal to 0.  The condition must be a scalar type.
935    llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
936    Builder.CreateCondBr(
937        BoolCondVal, ForBody, ExitBlock,
938        createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody())));
939
940    if (ExitBlock != LoopExit.getBlock()) {
941      EmitBlock(ExitBlock);
942      EmitBranchThroughCleanup(LoopExit);
943    }
944
945    EmitBlock(ForBody);
946  } else {
947    // Treat it as a non-zero constant.  Don't even create a new block for the
948    // body, just fall into it.
949  }
950  incrementProfileCounter(&S);
951
952  {
953    // Create a separate cleanup scope for the body, in case it is not
954    // a compound statement.
955    RunCleanupsScope BodyScope(*this);
956    EmitStmt(S.getBody());
957  }
958
959  // If there is an increment, emit it next.
960  if (S.getInc()) {
961    EmitBlock(Continue.getBlock());
962    EmitStmt(S.getInc());
963  }
964
965  BreakContinueStack.pop_back();
966
967  ConditionScope.ForceCleanup();
968
969  EmitStopPoint(&S);
970  EmitBranch(CondBlock);
971
972  ForScope.ForceCleanup();
973
974  LoopStack.pop();
975
976  // Emit the fall-through block.
977  EmitBlock(LoopExit.getBlock(), true);
978}
979
980void
981CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S,
982                                     ArrayRef<const Attr *> ForAttrs) {
983  JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
984
985  LexicalScope ForScope(*this, S.getSourceRange());
986
987  // Evaluate the first pieces before the loop.
988  if (S.getInit())
989    EmitStmt(S.getInit());
990  EmitStmt(S.getRangeStmt());
991  EmitStmt(S.getBeginStmt());
992  EmitStmt(S.getEndStmt());
993
994  // Start the loop with a block that tests the condition.
995  // If there's an increment, the continue scope will be overwritten
996  // later.
997  llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
998  EmitBlock(CondBlock);
999
1000  const SourceRange &R = S.getSourceRange();
1001  LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1002                 SourceLocToDebugLoc(R.getBegin()),
1003                 SourceLocToDebugLoc(R.getEnd()));
1004
1005  // If there are any cleanups between here and the loop-exit scope,
1006  // create a block to stage a loop exit along.
1007  llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1008  if (ForScope.requiresCleanups())
1009    ExitBlock = createBasicBlock("for.cond.cleanup");
1010
1011  // The loop body, consisting of the specified body and the loop variable.
1012  llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1013
1014  // The body is executed if the expression, contextually converted
1015  // to bool, is true.
1016  llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1017  Builder.CreateCondBr(
1018      BoolCondVal, ForBody, ExitBlock,
1019      createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody())));
1020
1021  if (ExitBlock != LoopExit.getBlock()) {
1022    EmitBlock(ExitBlock);
1023    EmitBranchThroughCleanup(LoopExit);
1024  }
1025
1026  EmitBlock(ForBody);
1027  incrementProfileCounter(&S);
1028
1029  // Create a block for the increment. In case of a 'continue', we jump there.
1030  JumpDest Continue = getJumpDestInCurrentScope("for.inc");
1031
1032  // Store the blocks to use for break and continue.
1033  BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1034
1035  {
1036    // Create a separate cleanup scope for the loop variable and body.
1037    LexicalScope BodyScope(*this, S.getSourceRange());
1038    EmitStmt(S.getLoopVarStmt());
1039    EmitStmt(S.getBody());
1040  }
1041
1042  EmitStopPoint(&S);
1043  // If there is an increment, emit it next.
1044  EmitBlock(Continue.getBlock());
1045  EmitStmt(S.getInc());
1046
1047  BreakContinueStack.pop_back();
1048
1049  EmitBranch(CondBlock);
1050
1051  ForScope.ForceCleanup();
1052
1053  LoopStack.pop();
1054
1055  // Emit the fall-through block.
1056  EmitBlock(LoopExit.getBlock(), true);
1057}
1058
1059void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
1060  if (RV.isScalar()) {
1061    Builder.CreateStore(RV.getScalarVal(), ReturnValue);
1062  } else if (RV.isAggregate()) {
1063    LValue Dest = MakeAddrLValue(ReturnValue, Ty);
1064    LValue Src = MakeAddrLValue(RV.getAggregateAddress(), Ty);
1065    EmitAggregateCopy(Dest, Src, Ty, getOverlapForReturnValue());
1066  } else {
1067    EmitStoreOfComplex(RV.getComplexVal(), MakeAddrLValue(ReturnValue, Ty),
1068                       /*init*/ true);
1069  }
1070  EmitBranchThroughCleanup(ReturnBlock);
1071}
1072
1073namespace {
1074// RAII struct used to save and restore a return statment's result expression.
1075struct SaveRetExprRAII {
1076  SaveRetExprRAII(const Expr *RetExpr, CodeGenFunction &CGF)
1077      : OldRetExpr(CGF.RetExpr), CGF(CGF) {
1078    CGF.RetExpr = RetExpr;
1079  }
1080  ~SaveRetExprRAII() { CGF.RetExpr = OldRetExpr; }
1081  const Expr *OldRetExpr;
1082  CodeGenFunction &CGF;
1083};
1084} // namespace
1085
1086/// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
1087/// if the function returns void, or may be missing one if the function returns
1088/// non-void.  Fun stuff :).
1089void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
1090  if (requiresReturnValueCheck()) {
1091    llvm::Constant *SLoc = EmitCheckSourceLocation(S.getBeginLoc());
1092    auto *SLocPtr =
1093        new llvm::GlobalVariable(CGM.getModule(), SLoc->getType(), false,
1094                                 llvm::GlobalVariable::PrivateLinkage, SLoc);
1095    SLocPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1096    CGM.getSanitizerMetadata()->disableSanitizerForGlobal(SLocPtr);
1097    assert(ReturnLocation.isValid() && "No valid return location");
1098    Builder.CreateStore(Builder.CreateBitCast(SLocPtr, Int8PtrTy),
1099                        ReturnLocation);
1100  }
1101
1102  // Returning from an outlined SEH helper is UB, and we already warn on it.
1103  if (IsOutlinedSEHHelper) {
1104    Builder.CreateUnreachable();
1105    Builder.ClearInsertionPoint();
1106  }
1107
1108  // Emit the result value, even if unused, to evaluate the side effects.
1109  const Expr *RV = S.getRetValue();
1110
1111  // Record the result expression of the return statement. The recorded
1112  // expression is used to determine whether a block capture's lifetime should
1113  // end at the end of the full expression as opposed to the end of the scope
1114  // enclosing the block expression.
1115  //
1116  // This permits a small, easily-implemented exception to our over-conservative
1117  // rules about not jumping to statements following block literals with
1118  // non-trivial cleanups.
1119  SaveRetExprRAII SaveRetExpr(RV, *this);
1120
1121  RunCleanupsScope cleanupScope(*this);
1122  if (const auto *EWC = dyn_cast_or_null<ExprWithCleanups>(RV))
1123    RV = EWC->getSubExpr();
1124  // FIXME: Clean this up by using an LValue for ReturnTemp,
1125  // EmitStoreThroughLValue, and EmitAnyExpr.
1126  // Check if the NRVO candidate was not globalized in OpenMP mode.
1127  if (getLangOpts().ElideConstructors && S.getNRVOCandidate() &&
1128      S.getNRVOCandidate()->isNRVOVariable() &&
1129      (!getLangOpts().OpenMP ||
1130       !CGM.getOpenMPRuntime()
1131            .getAddressOfLocalVariable(*this, S.getNRVOCandidate())
1132            .isValid())) {
1133    // Apply the named return value optimization for this return statement,
1134    // which means doing nothing: the appropriate result has already been
1135    // constructed into the NRVO variable.
1136
1137    // If there is an NRVO flag for this variable, set it to 1 into indicate
1138    // that the cleanup code should not destroy the variable.
1139    if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
1140      Builder.CreateFlagStore(Builder.getTrue(), NRVOFlag);
1141  } else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) {
1142    // Make sure not to return anything, but evaluate the expression
1143    // for side effects.
1144    if (RV)
1145      EmitAnyExpr(RV);
1146  } else if (!RV) {
1147    // Do nothing (return value is left uninitialized)
1148  } else if (FnRetTy->isReferenceType()) {
1149    // If this function returns a reference, take the address of the expression
1150    // rather than the value.
1151    RValue Result = EmitReferenceBindingToExpr(RV);
1152    Builder.CreateStore(Result.getScalarVal(), ReturnValue);
1153  } else {
1154    switch (getEvaluationKind(RV->getType())) {
1155    case TEK_Scalar:
1156      Builder.CreateStore(EmitScalarExpr(RV), ReturnValue);
1157      break;
1158    case TEK_Complex:
1159      EmitComplexExprIntoLValue(RV, MakeAddrLValue(ReturnValue, RV->getType()),
1160                                /*isInit*/ true);
1161      break;
1162    case TEK_Aggregate:
1163      EmitAggExpr(RV, AggValueSlot::forAddr(
1164                          ReturnValue, Qualifiers(),
1165                          AggValueSlot::IsDestructed,
1166                          AggValueSlot::DoesNotNeedGCBarriers,
1167                          AggValueSlot::IsNotAliased,
1168                          getOverlapForReturnValue()));
1169      break;
1170    }
1171  }
1172
1173  ++NumReturnExprs;
1174  if (!RV || RV->isEvaluatable(getContext()))
1175    ++NumSimpleReturnExprs;
1176
1177  cleanupScope.ForceCleanup();
1178  EmitBranchThroughCleanup(ReturnBlock);
1179}
1180
1181void CodeGenFunction::EmitDeclStmt(const DeclStmt &S) {
1182  // As long as debug info is modeled with instructions, we have to ensure we
1183  // have a place to insert here and write the stop point here.
1184  if (HaveInsertPoint())
1185    EmitStopPoint(&S);
1186
1187  for (const auto *I : S.decls())
1188    EmitDecl(*I);
1189}
1190
1191void CodeGenFunction::EmitBreakStmt(const BreakStmt &S) {
1192  assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
1193
1194  // If this code is reachable then emit a stop point (if generating
1195  // debug info). We have to do this ourselves because we are on the
1196  // "simple" statement path.
1197  if (HaveInsertPoint())
1198    EmitStopPoint(&S);
1199
1200  EmitBranchThroughCleanup(BreakContinueStack.back().BreakBlock);
1201}
1202
1203void CodeGenFunction::EmitContinueStmt(const ContinueStmt &S) {
1204  assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
1205
1206  // If this code is reachable then emit a stop point (if generating
1207  // debug info). We have to do this ourselves because we are on the
1208  // "simple" statement path.
1209  if (HaveInsertPoint())
1210    EmitStopPoint(&S);
1211
1212  EmitBranchThroughCleanup(BreakContinueStack.back().ContinueBlock);
1213}
1214
1215/// EmitCaseStmtRange - If case statement range is not too big then
1216/// add multiple cases to switch instruction, one for each value within
1217/// the range. If range is too big then emit "if" condition check.
1218void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S) {
1219  assert(S.getRHS() && "Expected RHS value in CaseStmt");
1220
1221  llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext());
1222  llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext());
1223
1224  // Emit the code for this case. We do this first to make sure it is
1225  // properly chained from our predecessor before generating the
1226  // switch machinery to enter this block.
1227  llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1228  EmitBlockWithFallThrough(CaseDest, &S);
1229  EmitStmt(S.getSubStmt());
1230
1231  // If range is empty, do nothing.
1232  if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS))
1233    return;
1234
1235  llvm::APInt Range = RHS - LHS;
1236  // FIXME: parameters such as this should not be hardcoded.
1237  if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
1238    // Range is small enough to add multiple switch instruction cases.
1239    uint64_t Total = getProfileCount(&S);
1240    unsigned NCases = Range.getZExtValue() + 1;
1241    // We only have one region counter for the entire set of cases here, so we
1242    // need to divide the weights evenly between the generated cases, ensuring
1243    // that the total weight is preserved. E.g., a weight of 5 over three cases
1244    // will be distributed as weights of 2, 2, and 1.
1245    uint64_t Weight = Total / NCases, Rem = Total % NCases;
1246    for (unsigned I = 0; I != NCases; ++I) {
1247      if (SwitchWeights)
1248        SwitchWeights->push_back(Weight + (Rem ? 1 : 0));
1249      if (Rem)
1250        Rem--;
1251      SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
1252      ++LHS;
1253    }
1254    return;
1255  }
1256
1257  // The range is too big. Emit "if" condition into a new block,
1258  // making sure to save and restore the current insertion point.
1259  llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
1260
1261  // Push this test onto the chain of range checks (which terminates
1262  // in the default basic block). The switch's default will be changed
1263  // to the top of this chain after switch emission is complete.
1264  llvm::BasicBlock *FalseDest = CaseRangeBlock;
1265  CaseRangeBlock = createBasicBlock("sw.caserange");
1266
1267  CurFn->getBasicBlockList().push_back(CaseRangeBlock);
1268  Builder.SetInsertPoint(CaseRangeBlock);
1269
1270  // Emit range check.
1271  llvm::Value *Diff =
1272    Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS));
1273  llvm::Value *Cond =
1274    Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds");
1275
1276  llvm::MDNode *Weights = nullptr;
1277  if (SwitchWeights) {
1278    uint64_t ThisCount = getProfileCount(&S);
1279    uint64_t DefaultCount = (*SwitchWeights)[0];
1280    Weights = createProfileWeights(ThisCount, DefaultCount);
1281
1282    // Since we're chaining the switch default through each large case range, we
1283    // need to update the weight for the default, ie, the first case, to include
1284    // this case.
1285    (*SwitchWeights)[0] += ThisCount;
1286  }
1287  Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights);
1288
1289  // Restore the appropriate insertion point.
1290  if (RestoreBB)
1291    Builder.SetInsertPoint(RestoreBB);
1292  else
1293    Builder.ClearInsertionPoint();
1294}
1295
1296void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) {
1297  // If there is no enclosing switch instance that we're aware of, then this
1298  // case statement and its block can be elided.  This situation only happens
1299  // when we've constant-folded the switch, are emitting the constant case,
1300  // and part of the constant case includes another case statement.  For
1301  // instance: switch (4) { case 4: do { case 5: } while (1); }
1302  if (!SwitchInsn) {
1303    EmitStmt(S.getSubStmt());
1304    return;
1305  }
1306
1307  // Handle case ranges.
1308  if (S.getRHS()) {
1309    EmitCaseStmtRange(S);
1310    return;
1311  }
1312
1313  llvm::ConstantInt *CaseVal =
1314    Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext()));
1315
1316  // If the body of the case is just a 'break', try to not emit an empty block.
1317  // If we're profiling or we're not optimizing, leave the block in for better
1318  // debug and coverage analysis.
1319  if (!CGM.getCodeGenOpts().hasProfileClangInstr() &&
1320      CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1321      isa<BreakStmt>(S.getSubStmt())) {
1322    JumpDest Block = BreakContinueStack.back().BreakBlock;
1323
1324    // Only do this optimization if there are no cleanups that need emitting.
1325    if (isObviouslyBranchWithoutCleanups(Block)) {
1326      if (SwitchWeights)
1327        SwitchWeights->push_back(getProfileCount(&S));
1328      SwitchInsn->addCase(CaseVal, Block.getBlock());
1329
1330      // If there was a fallthrough into this case, make sure to redirect it to
1331      // the end of the switch as well.
1332      if (Builder.GetInsertBlock()) {
1333        Builder.CreateBr(Block.getBlock());
1334        Builder.ClearInsertionPoint();
1335      }
1336      return;
1337    }
1338  }
1339
1340  llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1341  EmitBlockWithFallThrough(CaseDest, &S);
1342  if (SwitchWeights)
1343    SwitchWeights->push_back(getProfileCount(&S));
1344  SwitchInsn->addCase(CaseVal, CaseDest);
1345
1346  // Recursively emitting the statement is acceptable, but is not wonderful for
1347  // code where we have many case statements nested together, i.e.:
1348  //  case 1:
1349  //    case 2:
1350  //      case 3: etc.
1351  // Handling this recursively will create a new block for each case statement
1352  // that falls through to the next case which is IR intensive.  It also causes
1353  // deep recursion which can run into stack depth limitations.  Handle
1354  // sequential non-range case statements specially.
1355  const CaseStmt *CurCase = &S;
1356  const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
1357
1358  // Otherwise, iteratively add consecutive cases to this switch stmt.
1359  while (NextCase && NextCase->getRHS() == nullptr) {
1360    CurCase = NextCase;
1361    llvm::ConstantInt *CaseVal =
1362      Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext()));
1363
1364    if (SwitchWeights)
1365      SwitchWeights->push_back(getProfileCount(NextCase));
1366    if (CGM.getCodeGenOpts().hasProfileClangInstr()) {
1367      CaseDest = createBasicBlock("sw.bb");
1368      EmitBlockWithFallThrough(CaseDest, &S);
1369    }
1370
1371    SwitchInsn->addCase(CaseVal, CaseDest);
1372    NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
1373  }
1374
1375  // Normal default recursion for non-cases.
1376  EmitStmt(CurCase->getSubStmt());
1377}
1378
1379void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S) {
1380  // If there is no enclosing switch instance that we're aware of, then this
1381  // default statement can be elided. This situation only happens when we've
1382  // constant-folded the switch.
1383  if (!SwitchInsn) {
1384    EmitStmt(S.getSubStmt());
1385    return;
1386  }
1387
1388  llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
1389  assert(DefaultBlock->empty() &&
1390         "EmitDefaultStmt: Default block already defined?");
1391
1392  EmitBlockWithFallThrough(DefaultBlock, &S);
1393
1394  EmitStmt(S.getSubStmt());
1395}
1396
1397/// CollectStatementsForCase - Given the body of a 'switch' statement and a
1398/// constant value that is being switched on, see if we can dead code eliminate
1399/// the body of the switch to a simple series of statements to emit.  Basically,
1400/// on a switch (5) we want to find these statements:
1401///    case 5:
1402///      printf(...);    <--
1403///      ++i;            <--
1404///      break;
1405///
1406/// and add them to the ResultStmts vector.  If it is unsafe to do this
1407/// transformation (for example, one of the elided statements contains a label
1408/// that might be jumped to), return CSFC_Failure.  If we handled it and 'S'
1409/// should include statements after it (e.g. the printf() line is a substmt of
1410/// the case) then return CSFC_FallThrough.  If we handled it and found a break
1411/// statement, then return CSFC_Success.
1412///
1413/// If Case is non-null, then we are looking for the specified case, checking
1414/// that nothing we jump over contains labels.  If Case is null, then we found
1415/// the case and are looking for the break.
1416///
1417/// If the recursive walk actually finds our Case, then we set FoundCase to
1418/// true.
1419///
1420enum CSFC_Result { CSFC_Failure, CSFC_FallThrough, CSFC_Success };
1421static CSFC_Result CollectStatementsForCase(const Stmt *S,
1422                                            const SwitchCase *Case,
1423                                            bool &FoundCase,
1424                              SmallVectorImpl<const Stmt*> &ResultStmts) {
1425  // If this is a null statement, just succeed.
1426  if (!S)
1427    return Case ? CSFC_Success : CSFC_FallThrough;
1428
1429  // If this is the switchcase (case 4: or default) that we're looking for, then
1430  // we're in business.  Just add the substatement.
1431  if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
1432    if (S == Case) {
1433      FoundCase = true;
1434      return CollectStatementsForCase(SC->getSubStmt(), nullptr, FoundCase,
1435                                      ResultStmts);
1436    }
1437
1438    // Otherwise, this is some other case or default statement, just ignore it.
1439    return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase,
1440                                    ResultStmts);
1441  }
1442
1443  // If we are in the live part of the code and we found our break statement,
1444  // return a success!
1445  if (!Case && isa<BreakStmt>(S))
1446    return CSFC_Success;
1447
1448  // If this is a switch statement, then it might contain the SwitchCase, the
1449  // break, or neither.
1450  if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
1451    // Handle this as two cases: we might be looking for the SwitchCase (if so
1452    // the skipped statements must be skippable) or we might already have it.
1453    CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end();
1454    bool StartedInLiveCode = FoundCase;
1455    unsigned StartSize = ResultStmts.size();
1456
1457    // If we've not found the case yet, scan through looking for it.
1458    if (Case) {
1459      // Keep track of whether we see a skipped declaration.  The code could be
1460      // using the declaration even if it is skipped, so we can't optimize out
1461      // the decl if the kept statements might refer to it.
1462      bool HadSkippedDecl = false;
1463
1464      // If we're looking for the case, just see if we can skip each of the
1465      // substatements.
1466      for (; Case && I != E; ++I) {
1467        HadSkippedDecl |= CodeGenFunction::mightAddDeclToScope(*I);
1468
1469        switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
1470        case CSFC_Failure: return CSFC_Failure;
1471        case CSFC_Success:
1472          // A successful result means that either 1) that the statement doesn't
1473          // have the case and is skippable, or 2) does contain the case value
1474          // and also contains the break to exit the switch.  In the later case,
1475          // we just verify the rest of the statements are elidable.
1476          if (FoundCase) {
1477            // If we found the case and skipped declarations, we can't do the
1478            // optimization.
1479            if (HadSkippedDecl)
1480              return CSFC_Failure;
1481
1482            for (++I; I != E; ++I)
1483              if (CodeGenFunction::ContainsLabel(*I, true))
1484                return CSFC_Failure;
1485            return CSFC_Success;
1486          }
1487          break;
1488        case CSFC_FallThrough:
1489          // If we have a fallthrough condition, then we must have found the
1490          // case started to include statements.  Consider the rest of the
1491          // statements in the compound statement as candidates for inclusion.
1492          assert(FoundCase && "Didn't find case but returned fallthrough?");
1493          // We recursively found Case, so we're not looking for it anymore.
1494          Case = nullptr;
1495
1496          // If we found the case and skipped declarations, we can't do the
1497          // optimization.
1498          if (HadSkippedDecl)
1499            return CSFC_Failure;
1500          break;
1501        }
1502      }
1503
1504      if (!FoundCase)
1505        return CSFC_Success;
1506
1507      assert(!HadSkippedDecl && "fallthrough after skipping decl");
1508    }
1509
1510    // If we have statements in our range, then we know that the statements are
1511    // live and need to be added to the set of statements we're tracking.
1512    bool AnyDecls = false;
1513    for (; I != E; ++I) {
1514      AnyDecls |= CodeGenFunction::mightAddDeclToScope(*I);
1515
1516      switch (CollectStatementsForCase(*I, nullptr, FoundCase, ResultStmts)) {
1517      case CSFC_Failure: return CSFC_Failure;
1518      case CSFC_FallThrough:
1519        // A fallthrough result means that the statement was simple and just
1520        // included in ResultStmt, keep adding them afterwards.
1521        break;
1522      case CSFC_Success:
1523        // A successful result means that we found the break statement and
1524        // stopped statement inclusion.  We just ensure that any leftover stmts
1525        // are skippable and return success ourselves.
1526        for (++I; I != E; ++I)
1527          if (CodeGenFunction::ContainsLabel(*I, true))
1528            return CSFC_Failure;
1529        return CSFC_Success;
1530      }
1531    }
1532
1533    // If we're about to fall out of a scope without hitting a 'break;', we
1534    // can't perform the optimization if there were any decls in that scope
1535    // (we'd lose their end-of-lifetime).
1536    if (AnyDecls) {
1537      // If the entire compound statement was live, there's one more thing we
1538      // can try before giving up: emit the whole thing as a single statement.
1539      // We can do that unless the statement contains a 'break;'.
1540      // FIXME: Such a break must be at the end of a construct within this one.
1541      // We could emit this by just ignoring the BreakStmts entirely.
1542      if (StartedInLiveCode && !CodeGenFunction::containsBreak(S)) {
1543        ResultStmts.resize(StartSize);
1544        ResultStmts.push_back(S);
1545      } else {
1546        return CSFC_Failure;
1547      }
1548    }
1549
1550    return CSFC_FallThrough;
1551  }
1552
1553  // Okay, this is some other statement that we don't handle explicitly, like a
1554  // for statement or increment etc.  If we are skipping over this statement,
1555  // just verify it doesn't have labels, which would make it invalid to elide.
1556  if (Case) {
1557    if (CodeGenFunction::ContainsLabel(S, true))
1558      return CSFC_Failure;
1559    return CSFC_Success;
1560  }
1561
1562  // Otherwise, we want to include this statement.  Everything is cool with that
1563  // so long as it doesn't contain a break out of the switch we're in.
1564  if (CodeGenFunction::containsBreak(S)) return CSFC_Failure;
1565
1566  // Otherwise, everything is great.  Include the statement and tell the caller
1567  // that we fall through and include the next statement as well.
1568  ResultStmts.push_back(S);
1569  return CSFC_FallThrough;
1570}
1571
1572/// FindCaseStatementsForValue - Find the case statement being jumped to and
1573/// then invoke CollectStatementsForCase to find the list of statements to emit
1574/// for a switch on constant.  See the comment above CollectStatementsForCase
1575/// for more details.
1576static bool FindCaseStatementsForValue(const SwitchStmt &S,
1577                                       const llvm::APSInt &ConstantCondValue,
1578                                SmallVectorImpl<const Stmt*> &ResultStmts,
1579                                       ASTContext &C,
1580                                       const SwitchCase *&ResultCase) {
1581  // First step, find the switch case that is being branched to.  We can do this
1582  // efficiently by scanning the SwitchCase list.
1583  const SwitchCase *Case = S.getSwitchCaseList();
1584  const DefaultStmt *DefaultCase = nullptr;
1585
1586  for (; Case; Case = Case->getNextSwitchCase()) {
1587    // It's either a default or case.  Just remember the default statement in
1588    // case we're not jumping to any numbered cases.
1589    if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) {
1590      DefaultCase = DS;
1591      continue;
1592    }
1593
1594    // Check to see if this case is the one we're looking for.
1595    const CaseStmt *CS = cast<CaseStmt>(Case);
1596    // Don't handle case ranges yet.
1597    if (CS->getRHS()) return false;
1598
1599    // If we found our case, remember it as 'case'.
1600    if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue)
1601      break;
1602  }
1603
1604  // If we didn't find a matching case, we use a default if it exists, or we
1605  // elide the whole switch body!
1606  if (!Case) {
1607    // It is safe to elide the body of the switch if it doesn't contain labels
1608    // etc.  If it is safe, return successfully with an empty ResultStmts list.
1609    if (!DefaultCase)
1610      return !CodeGenFunction::ContainsLabel(&S);
1611    Case = DefaultCase;
1612  }
1613
1614  // Ok, we know which case is being jumped to, try to collect all the
1615  // statements that follow it.  This can fail for a variety of reasons.  Also,
1616  // check to see that the recursive walk actually found our case statement.
1617  // Insane cases like this can fail to find it in the recursive walk since we
1618  // don't handle every stmt kind:
1619  // switch (4) {
1620  //   while (1) {
1621  //     case 4: ...
1622  bool FoundCase = false;
1623  ResultCase = Case;
1624  return CollectStatementsForCase(S.getBody(), Case, FoundCase,
1625                                  ResultStmts) != CSFC_Failure &&
1626         FoundCase;
1627}
1628
1629void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
1630  // Handle nested switch statements.
1631  llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
1632  SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights;
1633  llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
1634
1635  // See if we can constant fold the condition of the switch and therefore only
1636  // emit the live case statement (if any) of the switch.
1637  llvm::APSInt ConstantCondValue;
1638  if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
1639    SmallVector<const Stmt*, 4> CaseStmts;
1640    const SwitchCase *Case = nullptr;
1641    if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
1642                                   getContext(), Case)) {
1643      if (Case)
1644        incrementProfileCounter(Case);
1645      RunCleanupsScope ExecutedScope(*this);
1646
1647      if (S.getInit())
1648        EmitStmt(S.getInit());
1649
1650      // Emit the condition variable if needed inside the entire cleanup scope
1651      // used by this special case for constant folded switches.
1652      if (S.getConditionVariable())
1653        EmitDecl(*S.getConditionVariable());
1654
1655      // At this point, we are no longer "within" a switch instance, so
1656      // we can temporarily enforce this to ensure that any embedded case
1657      // statements are not emitted.
1658      SwitchInsn = nullptr;
1659
1660      // Okay, we can dead code eliminate everything except this case.  Emit the
1661      // specified series of statements and we're good.
1662      for (unsigned i = 0, e = CaseStmts.size(); i != e; ++i)
1663        EmitStmt(CaseStmts[i]);
1664      incrementProfileCounter(&S);
1665
1666      // Now we want to restore the saved switch instance so that nested
1667      // switches continue to function properly
1668      SwitchInsn = SavedSwitchInsn;
1669
1670      return;
1671    }
1672  }
1673
1674  JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
1675
1676  RunCleanupsScope ConditionScope(*this);
1677
1678  if (S.getInit())
1679    EmitStmt(S.getInit());
1680
1681  if (S.getConditionVariable())
1682    EmitDecl(*S.getConditionVariable());
1683  llvm::Value *CondV = EmitScalarExpr(S.getCond());
1684
1685  // Create basic block to hold stuff that comes after switch
1686  // statement. We also need to create a default block now so that
1687  // explicit case ranges tests can have a place to jump to on
1688  // failure.
1689  llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
1690  SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
1691  if (PGO.haveRegionCounts()) {
1692    // Walk the SwitchCase list to find how many there are.
1693    uint64_t DefaultCount = 0;
1694    unsigned NumCases = 0;
1695    for (const SwitchCase *Case = S.getSwitchCaseList();
1696         Case;
1697         Case = Case->getNextSwitchCase()) {
1698      if (isa<DefaultStmt>(Case))
1699        DefaultCount = getProfileCount(Case);
1700      NumCases += 1;
1701    }
1702    SwitchWeights = new SmallVector<uint64_t, 16>();
1703    SwitchWeights->reserve(NumCases);
1704    // The default needs to be first. We store the edge count, so we already
1705    // know the right weight.
1706    SwitchWeights->push_back(DefaultCount);
1707  }
1708  CaseRangeBlock = DefaultBlock;
1709
1710  // Clear the insertion point to indicate we are in unreachable code.
1711  Builder.ClearInsertionPoint();
1712
1713  // All break statements jump to NextBlock. If BreakContinueStack is non-empty
1714  // then reuse last ContinueBlock.
1715  JumpDest OuterContinue;
1716  if (!BreakContinueStack.empty())
1717    OuterContinue = BreakContinueStack.back().ContinueBlock;
1718
1719  BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue));
1720
1721  // Emit switch body.
1722  EmitStmt(S.getBody());
1723
1724  BreakContinueStack.pop_back();
1725
1726  // Update the default block in case explicit case range tests have
1727  // been chained on top.
1728  SwitchInsn->setDefaultDest(CaseRangeBlock);
1729
1730  // If a default was never emitted:
1731  if (!DefaultBlock->getParent()) {
1732    // If we have cleanups, emit the default block so that there's a
1733    // place to jump through the cleanups from.
1734    if (ConditionScope.requiresCleanups()) {
1735      EmitBlock(DefaultBlock);
1736
1737    // Otherwise, just forward the default block to the switch end.
1738    } else {
1739      DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock());
1740      delete DefaultBlock;
1741    }
1742  }
1743
1744  ConditionScope.ForceCleanup();
1745
1746  // Emit continuation.
1747  EmitBlock(SwitchExit.getBlock(), true);
1748  incrementProfileCounter(&S);
1749
1750  // If the switch has a condition wrapped by __builtin_unpredictable,
1751  // create metadata that specifies that the switch is unpredictable.
1752  // Don't bother if not optimizing because that metadata would not be used.
1753  auto *Call = dyn_cast<CallExpr>(S.getCond());
1754  if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
1755    auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
1756    if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
1757      llvm::MDBuilder MDHelper(getLLVMContext());
1758      SwitchInsn->setMetadata(llvm::LLVMContext::MD_unpredictable,
1759                              MDHelper.createUnpredictable());
1760    }
1761  }
1762
1763  if (SwitchWeights) {
1764    assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() &&
1765           "switch weights do not match switch cases");
1766    // If there's only one jump destination there's no sense weighting it.
1767    if (SwitchWeights->size() > 1)
1768      SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
1769                              createProfileWeights(*SwitchWeights));
1770    delete SwitchWeights;
1771  }
1772  SwitchInsn = SavedSwitchInsn;
1773  SwitchWeights = SavedSwitchWeights;
1774  CaseRangeBlock = SavedCRBlock;
1775}
1776
1777static std::string
1778SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
1779                 SmallVectorImpl<TargetInfo::ConstraintInfo> *OutCons=nullptr) {
1780  std::string Result;
1781
1782  while (*Constraint) {
1783    switch (*Constraint) {
1784    default:
1785      Result += Target.convertConstraint(Constraint);
1786      break;
1787    // Ignore these
1788    case '*':
1789    case '?':
1790    case '!':
1791    case '=': // Will see this and the following in mult-alt constraints.
1792    case '+':
1793      break;
1794    case '#': // Ignore the rest of the constraint alternative.
1795      while (Constraint[1] && Constraint[1] != ',')
1796        Constraint++;
1797      break;
1798    case '&':
1799    case '%':
1800      Result += *Constraint;
1801      while (Constraint[1] && Constraint[1] == *Constraint)
1802        Constraint++;
1803      break;
1804    case ',':
1805      Result += "|";
1806      break;
1807    case 'g':
1808      Result += "imr";
1809      break;
1810    case '[': {
1811      assert(OutCons &&
1812             "Must pass output names to constraints with a symbolic name");
1813      unsigned Index;
1814      bool result = Target.resolveSymbolicName(Constraint, *OutCons, Index);
1815      assert(result && "Could not resolve symbolic name"); (void)result;
1816      Result += llvm::utostr(Index);
1817      break;
1818    }
1819    }
1820
1821    Constraint++;
1822  }
1823
1824  return Result;
1825}
1826
1827/// AddVariableConstraints - Look at AsmExpr and if it is a variable declared
1828/// as using a particular register add that as a constraint that will be used
1829/// in this asm stmt.
1830static std::string
1831AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
1832                       const TargetInfo &Target, CodeGenModule &CGM,
1833                       const AsmStmt &Stmt, const bool EarlyClobber) {
1834  const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr);
1835  if (!AsmDeclRef)
1836    return Constraint;
1837  const ValueDecl &Value = *AsmDeclRef->getDecl();
1838  const VarDecl *Variable = dyn_cast<VarDecl>(&Value);
1839  if (!Variable)
1840    return Constraint;
1841  if (Variable->getStorageClass() != SC_Register)
1842    return Constraint;
1843  AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
1844  if (!Attr)
1845    return Constraint;
1846  StringRef Register = Attr->getLabel();
1847  assert(Target.isValidGCCRegisterName(Register));
1848  // We're using validateOutputConstraint here because we only care if
1849  // this is a register constraint.
1850  TargetInfo::ConstraintInfo Info(Constraint, "");
1851  if (Target.validateOutputConstraint(Info) &&
1852      !Info.allowsRegister()) {
1853    CGM.ErrorUnsupported(&Stmt, "__asm__");
1854    return Constraint;
1855  }
1856  // Canonicalize the register here before returning it.
1857  Register = Target.getNormalizedGCCRegisterName(Register);
1858  return (EarlyClobber ? "&{" : "{") + Register.str() + "}";
1859}
1860
1861llvm::Value*
1862CodeGenFunction::EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info,
1863                                    LValue InputValue, QualType InputType,
1864                                    std::string &ConstraintStr,
1865                                    SourceLocation Loc) {
1866  llvm::Value *Arg;
1867  if (Info.allowsRegister() || !Info.allowsMemory()) {
1868    if (CodeGenFunction::hasScalarEvaluationKind(InputType)) {
1869      Arg = EmitLoadOfLValue(InputValue, Loc).getScalarVal();
1870    } else {
1871      llvm::Type *Ty = ConvertType(InputType);
1872      uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
1873      if (Size <= 64 && llvm::isPowerOf2_64(Size)) {
1874        Ty = llvm::IntegerType::get(getLLVMContext(), Size);
1875        Ty = llvm::PointerType::getUnqual(Ty);
1876
1877        Arg = Builder.CreateLoad(
1878            Builder.CreateBitCast(InputValue.getAddress(*this), Ty));
1879      } else {
1880        Arg = InputValue.getPointer(*this);
1881        ConstraintStr += '*';
1882      }
1883    }
1884  } else {
1885    Arg = InputValue.getPointer(*this);
1886    ConstraintStr += '*';
1887  }
1888
1889  return Arg;
1890}
1891
1892llvm::Value* CodeGenFunction::EmitAsmInput(
1893                                         const TargetInfo::ConstraintInfo &Info,
1894                                           const Expr *InputExpr,
1895                                           std::string &ConstraintStr) {
1896  // If this can't be a register or memory, i.e., has to be a constant
1897  // (immediate or symbolic), try to emit it as such.
1898  if (!Info.allowsRegister() && !Info.allowsMemory()) {
1899    if (Info.requiresImmediateConstant()) {
1900      Expr::EvalResult EVResult;
1901      InputExpr->EvaluateAsRValue(EVResult, getContext(), true);
1902
1903      llvm::APSInt IntResult;
1904      if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(),
1905                                          getContext()))
1906        return llvm::ConstantInt::get(getLLVMContext(), IntResult);
1907    }
1908
1909    Expr::EvalResult Result;
1910    if (InputExpr->EvaluateAsInt(Result, getContext()))
1911      return llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt());
1912  }
1913
1914  if (Info.allowsRegister() || !Info.allowsMemory())
1915    if (CodeGenFunction::hasScalarEvaluationKind(InputExpr->getType()))
1916      return EmitScalarExpr(InputExpr);
1917  if (InputExpr->getStmtClass() == Expr::CXXThisExprClass)
1918    return EmitScalarExpr(InputExpr);
1919  InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
1920  LValue Dest = EmitLValue(InputExpr);
1921  return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr,
1922                            InputExpr->getExprLoc());
1923}
1924
1925/// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
1926/// asm call instruction.  The !srcloc MDNode contains a list of constant
1927/// integers which are the source locations of the start of each line in the
1928/// asm.
1929static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
1930                                      CodeGenFunction &CGF) {
1931  SmallVector<llvm::Metadata *, 8> Locs;
1932  // Add the location of the first line to the MDNode.
1933  Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
1934      CGF.Int32Ty, Str->getBeginLoc().getRawEncoding())));
1935  StringRef StrVal = Str->getString();
1936  if (!StrVal.empty()) {
1937    const SourceManager &SM = CGF.CGM.getContext().getSourceManager();
1938    const LangOptions &LangOpts = CGF.CGM.getLangOpts();
1939    unsigned StartToken = 0;
1940    unsigned ByteOffset = 0;
1941
1942    // Add the location of the start of each subsequent line of the asm to the
1943    // MDNode.
1944    for (unsigned i = 0, e = StrVal.size() - 1; i != e; ++i) {
1945      if (StrVal[i] != '\n') continue;
1946      SourceLocation LineLoc = Str->getLocationOfByte(
1947          i + 1, SM, LangOpts, CGF.getTarget(), &StartToken, &ByteOffset);
1948      Locs.push_back(llvm::ConstantAsMetadata::get(
1949          llvm::ConstantInt::get(CGF.Int32Ty, LineLoc.getRawEncoding())));
1950    }
1951  }
1952
1953  return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
1954}
1955
1956static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
1957                              bool ReadOnly, bool ReadNone, const AsmStmt &S,
1958                              const std::vector<llvm::Type *> &ResultRegTypes,
1959                              CodeGenFunction &CGF,
1960                              std::vector<llvm::Value *> &RegResults) {
1961  Result.addAttribute(llvm::AttributeList::FunctionIndex,
1962                      llvm::Attribute::NoUnwind);
1963  // Attach readnone and readonly attributes.
1964  if (!HasSideEffect) {
1965    if (ReadNone)
1966      Result.addAttribute(llvm::AttributeList::FunctionIndex,
1967                          llvm::Attribute::ReadNone);
1968    else if (ReadOnly)
1969      Result.addAttribute(llvm::AttributeList::FunctionIndex,
1970                          llvm::Attribute::ReadOnly);
1971  }
1972
1973  // Slap the source location of the inline asm into a !srcloc metadata on the
1974  // call.
1975  if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S))
1976    Result.setMetadata("srcloc",
1977                       getAsmSrcLocInfo(gccAsmStmt->getAsmString(), CGF));
1978  else {
1979    // At least put the line number on MS inline asm blobs.
1980    llvm::Constant *Loc = llvm::ConstantInt::get(CGF.Int32Ty,
1981                                        S.getAsmLoc().getRawEncoding());
1982    Result.setMetadata("srcloc",
1983                       llvm::MDNode::get(CGF.getLLVMContext(),
1984                                         llvm::ConstantAsMetadata::get(Loc)));
1985  }
1986
1987  if (CGF.getLangOpts().assumeFunctionsAreConvergent())
1988    // Conservatively, mark all inline asm blocks in CUDA or OpenCL as
1989    // convergent (meaning, they may call an intrinsically convergent op, such
1990    // as bar.sync, and so can't have certain optimizations applied around
1991    // them).
1992    Result.addAttribute(llvm::AttributeList::FunctionIndex,
1993                        llvm::Attribute::Convergent);
1994  // Extract all of the register value results from the asm.
1995  if (ResultRegTypes.size() == 1) {
1996    RegResults.push_back(&Result);
1997  } else {
1998    for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
1999      llvm::Value *Tmp = CGF.Builder.CreateExtractValue(&Result, i, "asmresult");
2000      RegResults.push_back(Tmp);
2001    }
2002  }
2003}
2004
2005void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
2006  // Assemble the final asm string.
2007  std::string AsmString = S.generateAsmString(getContext());
2008
2009  // Get all the output and input constraints together.
2010  SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
2011  SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
2012
2013  for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
2014    StringRef Name;
2015    if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2016      Name = GAS->getOutputName(i);
2017    TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name);
2018    bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid;
2019    assert(IsValid && "Failed to parse output constraint");
2020    OutputConstraintInfos.push_back(Info);
2021  }
2022
2023  for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
2024    StringRef Name;
2025    if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2026      Name = GAS->getInputName(i);
2027    TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name);
2028    bool IsValid =
2029      getTarget().validateInputConstraint(OutputConstraintInfos, Info);
2030    assert(IsValid && "Failed to parse input constraint"); (void)IsValid;
2031    InputConstraintInfos.push_back(Info);
2032  }
2033
2034  std::string Constraints;
2035
2036  std::vector<LValue> ResultRegDests;
2037  std::vector<QualType> ResultRegQualTys;
2038  std::vector<llvm::Type *> ResultRegTypes;
2039  std::vector<llvm::Type *> ResultTruncRegTypes;
2040  std::vector<llvm::Type *> ArgTypes;
2041  std::vector<llvm::Value*> Args;
2042  llvm::BitVector ResultTypeRequiresCast;
2043
2044  // Keep track of inout constraints.
2045  std::string InOutConstraints;
2046  std::vector<llvm::Value*> InOutArgs;
2047  std::vector<llvm::Type*> InOutArgTypes;
2048
2049  // Keep track of out constraints for tied input operand.
2050  std::vector<std::string> OutputConstraints;
2051
2052  // An inline asm can be marked readonly if it meets the following conditions:
2053  //  - it doesn't have any sideeffects
2054  //  - it doesn't clobber memory
2055  //  - it doesn't return a value by-reference
2056  // It can be marked readnone if it doesn't have any input memory constraints
2057  // in addition to meeting the conditions listed above.
2058  bool ReadOnly = true, ReadNone = true;
2059
2060  for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
2061    TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
2062
2063    // Simplify the output constraint.
2064    std::string OutputConstraint(S.getOutputConstraint(i));
2065    OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1,
2066                                          getTarget(), &OutputConstraintInfos);
2067
2068    const Expr *OutExpr = S.getOutputExpr(i);
2069    OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
2070
2071    OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr,
2072                                              getTarget(), CGM, S,
2073                                              Info.earlyClobber());
2074    OutputConstraints.push_back(OutputConstraint);
2075    LValue Dest = EmitLValue(OutExpr);
2076    if (!Constraints.empty())
2077      Constraints += ',';
2078
2079    // If this is a register output, then make the inline asm return it
2080    // by-value.  If this is a memory result, return the value by-reference.
2081    bool isScalarizableAggregate =
2082        hasAggregateEvaluationKind(OutExpr->getType());
2083    if (!Info.allowsMemory() && (hasScalarEvaluationKind(OutExpr->getType()) ||
2084                                 isScalarizableAggregate)) {
2085      Constraints += "=" + OutputConstraint;
2086      ResultRegQualTys.push_back(OutExpr->getType());
2087      ResultRegDests.push_back(Dest);
2088      ResultTruncRegTypes.push_back(ConvertTypeForMem(OutExpr->getType()));
2089      if (Info.allowsRegister() && isScalarizableAggregate) {
2090        ResultTypeRequiresCast.push_back(true);
2091        unsigned Size = getContext().getTypeSize(OutExpr->getType());
2092        llvm::Type *ConvTy = llvm::IntegerType::get(getLLVMContext(), Size);
2093        ResultRegTypes.push_back(ConvTy);
2094      } else {
2095        ResultTypeRequiresCast.push_back(false);
2096        ResultRegTypes.push_back(ResultTruncRegTypes.back());
2097      }
2098      // If this output is tied to an input, and if the input is larger, then
2099      // we need to set the actual result type of the inline asm node to be the
2100      // same as the input type.
2101      if (Info.hasMatchingInput()) {
2102        unsigned InputNo;
2103        for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
2104          TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
2105          if (Input.hasTiedOperand() && Input.getTiedOperand() == i)
2106            break;
2107        }
2108        assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
2109
2110        QualType InputTy = S.getInputExpr(InputNo)->getType();
2111        QualType OutputType = OutExpr->getType();
2112
2113        uint64_t InputSize = getContext().getTypeSize(InputTy);
2114        if (getContext().getTypeSize(OutputType) < InputSize) {
2115          // Form the asm to return the value as a larger integer or fp type.
2116          ResultRegTypes.back() = ConvertType(InputTy);
2117        }
2118      }
2119      if (llvm::Type* AdjTy =
2120            getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2121                                                 ResultRegTypes.back()))
2122        ResultRegTypes.back() = AdjTy;
2123      else {
2124        CGM.getDiags().Report(S.getAsmLoc(),
2125                              diag::err_asm_invalid_type_in_input)
2126            << OutExpr->getType() << OutputConstraint;
2127      }
2128
2129      // Update largest vector width for any vector types.
2130      if (auto *VT = dyn_cast<llvm::VectorType>(ResultRegTypes.back()))
2131        LargestVectorWidth =
2132            std::max((uint64_t)LargestVectorWidth,
2133                     VT->getPrimitiveSizeInBits().getKnownMinSize());
2134    } else {
2135      ArgTypes.push_back(Dest.getAddress(*this).getType());
2136      Args.push_back(Dest.getPointer(*this));
2137      Constraints += "=*";
2138      Constraints += OutputConstraint;
2139      ReadOnly = ReadNone = false;
2140    }
2141
2142    if (Info.isReadWrite()) {
2143      InOutConstraints += ',';
2144
2145      const Expr *InputExpr = S.getOutputExpr(i);
2146      llvm::Value *Arg = EmitAsmInputLValue(Info, Dest, InputExpr->getType(),
2147                                            InOutConstraints,
2148                                            InputExpr->getExprLoc());
2149
2150      if (llvm::Type* AdjTy =
2151          getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2152                                               Arg->getType()))
2153        Arg = Builder.CreateBitCast(Arg, AdjTy);
2154
2155      // Update largest vector width for any vector types.
2156      if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2157        LargestVectorWidth =
2158            std::max((uint64_t)LargestVectorWidth,
2159                     VT->getPrimitiveSizeInBits().getKnownMinSize());
2160      if (Info.allowsRegister())
2161        InOutConstraints += llvm::utostr(i);
2162      else
2163        InOutConstraints += OutputConstraint;
2164
2165      InOutArgTypes.push_back(Arg->getType());
2166      InOutArgs.push_back(Arg);
2167    }
2168  }
2169
2170  // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX)
2171  // to the return value slot. Only do this when returning in registers.
2172  if (isa<MSAsmStmt>(&S)) {
2173    const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
2174    if (RetAI.isDirect() || RetAI.isExtend()) {
2175      // Make a fake lvalue for the return value slot.
2176      LValue ReturnSlot = MakeAddrLValue(ReturnValue, FnRetTy);
2177      CGM.getTargetCodeGenInfo().addReturnRegisterOutputs(
2178          *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes,
2179          ResultRegDests, AsmString, S.getNumOutputs());
2180      SawAsmBlock = true;
2181    }
2182  }
2183
2184  for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
2185    const Expr *InputExpr = S.getInputExpr(i);
2186
2187    TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
2188
2189    if (Info.allowsMemory())
2190      ReadNone = false;
2191
2192    if (!Constraints.empty())
2193      Constraints += ',';
2194
2195    // Simplify the input constraint.
2196    std::string InputConstraint(S.getInputConstraint(i));
2197    InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(),
2198                                         &OutputConstraintInfos);
2199
2200    InputConstraint = AddVariableConstraints(
2201        InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()),
2202        getTarget(), CGM, S, false /* No EarlyClobber */);
2203
2204    std::string ReplaceConstraint (InputConstraint);
2205    llvm::Value *Arg = EmitAsmInput(Info, InputExpr, Constraints);
2206
2207    // If this input argument is tied to a larger output result, extend the
2208    // input to be the same size as the output.  The LLVM backend wants to see
2209    // the input and output of a matching constraint be the same size.  Note
2210    // that GCC does not define what the top bits are here.  We use zext because
2211    // that is usually cheaper, but LLVM IR should really get an anyext someday.
2212    if (Info.hasTiedOperand()) {
2213      unsigned Output = Info.getTiedOperand();
2214      QualType OutputType = S.getOutputExpr(Output)->getType();
2215      QualType InputTy = InputExpr->getType();
2216
2217      if (getContext().getTypeSize(OutputType) >
2218          getContext().getTypeSize(InputTy)) {
2219        // Use ptrtoint as appropriate so that we can do our extension.
2220        if (isa<llvm::PointerType>(Arg->getType()))
2221          Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
2222        llvm::Type *OutputTy = ConvertType(OutputType);
2223        if (isa<llvm::IntegerType>(OutputTy))
2224          Arg = Builder.CreateZExt(Arg, OutputTy);
2225        else if (isa<llvm::PointerType>(OutputTy))
2226          Arg = Builder.CreateZExt(Arg, IntPtrTy);
2227        else {
2228          assert(OutputTy->isFloatingPointTy() && "Unexpected output type");
2229          Arg = Builder.CreateFPExt(Arg, OutputTy);
2230        }
2231      }
2232      // Deal with the tied operands' constraint code in adjustInlineAsmType.
2233      ReplaceConstraint = OutputConstraints[Output];
2234    }
2235    if (llvm::Type* AdjTy =
2236          getTargetHooks().adjustInlineAsmType(*this, ReplaceConstraint,
2237                                                   Arg->getType()))
2238      Arg = Builder.CreateBitCast(Arg, AdjTy);
2239    else
2240      CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input)
2241          << InputExpr->getType() << InputConstraint;
2242
2243    // Update largest vector width for any vector types.
2244    if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2245      LargestVectorWidth =
2246          std::max((uint64_t)LargestVectorWidth,
2247                   VT->getPrimitiveSizeInBits().getKnownMinSize());
2248
2249    ArgTypes.push_back(Arg->getType());
2250    Args.push_back(Arg);
2251    Constraints += InputConstraint;
2252  }
2253
2254  // Labels
2255  SmallVector<llvm::BasicBlock *, 16> Transfer;
2256  llvm::BasicBlock *Fallthrough = nullptr;
2257  bool IsGCCAsmGoto = false;
2258  if (const auto *GS =  dyn_cast<GCCAsmStmt>(&S)) {
2259    IsGCCAsmGoto = GS->isAsmGoto();
2260    if (IsGCCAsmGoto) {
2261      for (const auto *E : GS->labels()) {
2262        JumpDest Dest = getJumpDestForLabel(E->getLabel());
2263        Transfer.push_back(Dest.getBlock());
2264        llvm::BlockAddress *BA =
2265            llvm::BlockAddress::get(CurFn, Dest.getBlock());
2266        Args.push_back(BA);
2267        ArgTypes.push_back(BA->getType());
2268        if (!Constraints.empty())
2269          Constraints += ',';
2270        Constraints += 'X';
2271      }
2272      Fallthrough = createBasicBlock("asm.fallthrough");
2273    }
2274  }
2275
2276  // Append the "input" part of inout constraints last.
2277  for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
2278    ArgTypes.push_back(InOutArgTypes[i]);
2279    Args.push_back(InOutArgs[i]);
2280  }
2281  Constraints += InOutConstraints;
2282
2283  // Clobbers
2284  for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
2285    StringRef Clobber = S.getClobber(i);
2286
2287    if (Clobber == "memory")
2288      ReadOnly = ReadNone = false;
2289    else if (Clobber != "cc") {
2290      Clobber = getTarget().getNormalizedGCCRegisterName(Clobber);
2291      if (CGM.getCodeGenOpts().StackClashProtector &&
2292          getTarget().isSPRegName(Clobber)) {
2293        CGM.getDiags().Report(S.getAsmLoc(),
2294                              diag::warn_stack_clash_protection_inline_asm);
2295      }
2296    }
2297
2298    if (!Constraints.empty())
2299      Constraints += ',';
2300
2301    Constraints += "~{";
2302    Constraints += Clobber;
2303    Constraints += '}';
2304  }
2305
2306  // Add machine specific clobbers
2307  std::string MachineClobbers = getTarget().getClobbers();
2308  if (!MachineClobbers.empty()) {
2309    if (!Constraints.empty())
2310      Constraints += ',';
2311    Constraints += MachineClobbers;
2312  }
2313
2314  llvm::Type *ResultType;
2315  if (ResultRegTypes.empty())
2316    ResultType = VoidTy;
2317  else if (ResultRegTypes.size() == 1)
2318    ResultType = ResultRegTypes[0];
2319  else
2320    ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
2321
2322  llvm::FunctionType *FTy =
2323    llvm::FunctionType::get(ResultType, ArgTypes, false);
2324
2325  bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0;
2326  llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ?
2327    llvm::InlineAsm::AD_Intel : llvm::InlineAsm::AD_ATT;
2328  llvm::InlineAsm *IA =
2329    llvm::InlineAsm::get(FTy, AsmString, Constraints, HasSideEffect,
2330                         /* IsAlignStack */ false, AsmDialect);
2331  std::vector<llvm::Value*> RegResults;
2332  if (IsGCCAsmGoto) {
2333    llvm::CallBrInst *Result =
2334        Builder.CreateCallBr(IA, Fallthrough, Transfer, Args);
2335    EmitBlock(Fallthrough);
2336    UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, ReadOnly,
2337                      ReadNone, S, ResultRegTypes, *this, RegResults);
2338  } else {
2339    llvm::CallInst *Result =
2340        Builder.CreateCall(IA, Args, getBundlesForFunclet(IA));
2341    UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, ReadOnly,
2342                      ReadNone, S, ResultRegTypes, *this, RegResults);
2343  }
2344
2345  assert(RegResults.size() == ResultRegTypes.size());
2346  assert(RegResults.size() == ResultTruncRegTypes.size());
2347  assert(RegResults.size() == ResultRegDests.size());
2348  // ResultRegDests can be also populated by addReturnRegisterOutputs() above,
2349  // in which case its size may grow.
2350  assert(ResultTypeRequiresCast.size() <= ResultRegDests.size());
2351  for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
2352    llvm::Value *Tmp = RegResults[i];
2353
2354    // If the result type of the LLVM IR asm doesn't match the result type of
2355    // the expression, do the conversion.
2356    if (ResultRegTypes[i] != ResultTruncRegTypes[i]) {
2357      llvm::Type *TruncTy = ResultTruncRegTypes[i];
2358
2359      // Truncate the integer result to the right size, note that TruncTy can be
2360      // a pointer.
2361      if (TruncTy->isFloatingPointTy())
2362        Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
2363      else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
2364        uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
2365        Tmp = Builder.CreateTrunc(Tmp,
2366                   llvm::IntegerType::get(getLLVMContext(), (unsigned)ResSize));
2367        Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
2368      } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
2369        uint64_t TmpSize =CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
2370        Tmp = Builder.CreatePtrToInt(Tmp,
2371                   llvm::IntegerType::get(getLLVMContext(), (unsigned)TmpSize));
2372        Tmp = Builder.CreateTrunc(Tmp, TruncTy);
2373      } else if (TruncTy->isIntegerTy()) {
2374        Tmp = Builder.CreateZExtOrTrunc(Tmp, TruncTy);
2375      } else if (TruncTy->isVectorTy()) {
2376        Tmp = Builder.CreateBitCast(Tmp, TruncTy);
2377      }
2378    }
2379
2380    LValue Dest = ResultRegDests[i];
2381    // ResultTypeRequiresCast elements correspond to the first
2382    // ResultTypeRequiresCast.size() elements of RegResults.
2383    if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) {
2384      unsigned Size = getContext().getTypeSize(ResultRegQualTys[i]);
2385      Address A = Builder.CreateBitCast(Dest.getAddress(*this),
2386                                        ResultRegTypes[i]->getPointerTo());
2387      QualType Ty = getContext().getIntTypeForBitwidth(Size, /*Signed*/ false);
2388      if (Ty.isNull()) {
2389        const Expr *OutExpr = S.getOutputExpr(i);
2390        CGM.Error(
2391            OutExpr->getExprLoc(),
2392            "impossible constraint in asm: can't store value into a register");
2393        return;
2394      }
2395      Dest = MakeAddrLValue(A, Ty);
2396    }
2397    EmitStoreThroughLValue(RValue::get(Tmp), Dest);
2398  }
2399}
2400
2401LValue CodeGenFunction::InitCapturedStruct(const CapturedStmt &S) {
2402  const RecordDecl *RD = S.getCapturedRecordDecl();
2403  QualType RecordTy = getContext().getRecordType(RD);
2404
2405  // Initialize the captured struct.
2406  LValue SlotLV =
2407    MakeAddrLValue(CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
2408
2409  RecordDecl::field_iterator CurField = RD->field_begin();
2410  for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
2411                                                 E = S.capture_init_end();
2412       I != E; ++I, ++CurField) {
2413    LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
2414    if (CurField->hasCapturedVLAType()) {
2415      auto VAT = CurField->getCapturedVLAType();
2416      EmitStoreThroughLValue(RValue::get(VLASizeMap[VAT->getSizeExpr()]), LV);
2417    } else {
2418      EmitInitializerForField(*CurField, LV, *I);
2419    }
2420  }
2421
2422  return SlotLV;
2423}
2424
2425/// Generate an outlined function for the body of a CapturedStmt, store any
2426/// captured variables into the captured struct, and call the outlined function.
2427llvm::Function *
2428CodeGenFunction::EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K) {
2429  LValue CapStruct = InitCapturedStruct(S);
2430
2431  // Emit the CapturedDecl
2432  CodeGenFunction CGF(CGM, true);
2433  CGCapturedStmtRAII CapInfoRAII(CGF, new CGCapturedStmtInfo(S, K));
2434  llvm::Function *F = CGF.GenerateCapturedStmtFunction(S);
2435  delete CGF.CapturedStmtInfo;
2436
2437  // Emit call to the helper function.
2438  EmitCallOrInvoke(F, CapStruct.getPointer(*this));
2439
2440  return F;
2441}
2442
2443Address CodeGenFunction::GenerateCapturedStmtArgument(const CapturedStmt &S) {
2444  LValue CapStruct = InitCapturedStruct(S);
2445  return CapStruct.getAddress(*this);
2446}
2447
2448/// Creates the outlined function for a CapturedStmt.
2449llvm::Function *
2450CodeGenFunction::GenerateCapturedStmtFunction(const CapturedStmt &S) {
2451  assert(CapturedStmtInfo &&
2452    "CapturedStmtInfo should be set when generating the captured function");
2453  const CapturedDecl *CD = S.getCapturedDecl();
2454  const RecordDecl *RD = S.getCapturedRecordDecl();
2455  SourceLocation Loc = S.getBeginLoc();
2456  assert(CD->hasBody() && "missing CapturedDecl body");
2457
2458  // Build the argument list.
2459  ASTContext &Ctx = CGM.getContext();
2460  FunctionArgList Args;
2461  Args.append(CD->param_begin(), CD->param_end());
2462
2463  // Create the function declaration.
2464  const CGFunctionInfo &FuncInfo =
2465    CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, Args);
2466  llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
2467
2468  llvm::Function *F =
2469    llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
2470                           CapturedStmtInfo->getHelperName(), &CGM.getModule());
2471  CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
2472  if (CD->isNothrow())
2473    F->addFnAttr(llvm::Attribute::NoUnwind);
2474
2475  // Generate the function.
2476  StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(),
2477                CD->getBody()->getBeginLoc());
2478  // Set the context parameter in CapturedStmtInfo.
2479  Address DeclPtr = GetAddrOfLocalVar(CD->getContextParam());
2480  CapturedStmtInfo->setContextValue(Builder.CreateLoad(DeclPtr));
2481
2482  // Initialize variable-length arrays.
2483  LValue Base = MakeNaturalAlignAddrLValue(CapturedStmtInfo->getContextValue(),
2484                                           Ctx.getTagDeclType(RD));
2485  for (auto *FD : RD->fields()) {
2486    if (FD->hasCapturedVLAType()) {
2487      auto *ExprArg =
2488          EmitLoadOfLValue(EmitLValueForField(Base, FD), S.getBeginLoc())
2489              .getScalarVal();
2490      auto VAT = FD->getCapturedVLAType();
2491      VLASizeMap[VAT->getSizeExpr()] = ExprArg;
2492    }
2493  }
2494
2495  // If 'this' is captured, load it into CXXThisValue.
2496  if (CapturedStmtInfo->isCXXThisExprCaptured()) {
2497    FieldDecl *FD = CapturedStmtInfo->getThisFieldDecl();
2498    LValue ThisLValue = EmitLValueForField(Base, FD);
2499    CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal();
2500  }
2501
2502  PGO.assignRegionCounters(GlobalDecl(CD), F);
2503  CapturedStmtInfo->EmitBody(*this, CD->getBody());
2504  FinishFunction(CD->getBodyRBrace());
2505
2506  return F;
2507}
2508