1//===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCXXABI.h"
14#include "CGCleanup.h"
15#include "CGDebugInfo.h"
16#include "CGObjCRuntime.h"
17#include "CGOpenMPRuntime.h"
18#include "CodeGenFunction.h"
19#include "CodeGenModule.h"
20#include "ConstantEmitter.h"
21#include "TargetInfo.h"
22#include "clang/AST/ASTContext.h"
23#include "clang/AST/Attr.h"
24#include "clang/AST/DeclObjC.h"
25#include "clang/AST/Expr.h"
26#include "clang/AST/RecordLayout.h"
27#include "clang/AST/StmtVisitor.h"
28#include "clang/Basic/CodeGenOptions.h"
29#include "clang/Basic/TargetInfo.h"
30#include "llvm/ADT/APFixedPoint.h"
31#include "llvm/IR/CFG.h"
32#include "llvm/IR/Constants.h"
33#include "llvm/IR/DataLayout.h"
34#include "llvm/IR/DerivedTypes.h"
35#include "llvm/IR/FixedPointBuilder.h"
36#include "llvm/IR/Function.h"
37#include "llvm/IR/GetElementPtrTypeIterator.h"
38#include "llvm/IR/GlobalVariable.h"
39#include "llvm/IR/Intrinsics.h"
40#include "llvm/IR/IntrinsicsPowerPC.h"
41#include "llvm/IR/MatrixBuilder.h"
42#include "llvm/IR/Module.h"
43#include "llvm/Support/TypeSize.h"
44#include <cstdarg>
45#include <optional>
46
47using namespace clang;
48using namespace CodeGen;
49using llvm::Value;
50
51//===----------------------------------------------------------------------===//
52//                         Scalar Expression Emitter
53//===----------------------------------------------------------------------===//
54
55namespace {
56
57/// Determine whether the given binary operation may overflow.
58/// Sets \p Result to the value of the operation for BO_Add, BO_Sub, BO_Mul,
59/// and signed BO_{Div,Rem}. For these opcodes, and for unsigned BO_{Div,Rem},
60/// the returned overflow check is precise. The returned value is 'true' for
61/// all other opcodes, to be conservative.
62bool mayHaveIntegerOverflow(llvm::ConstantInt *LHS, llvm::ConstantInt *RHS,
63                             BinaryOperator::Opcode Opcode, bool Signed,
64                             llvm::APInt &Result) {
65  // Assume overflow is possible, unless we can prove otherwise.
66  bool Overflow = true;
67  const auto &LHSAP = LHS->getValue();
68  const auto &RHSAP = RHS->getValue();
69  if (Opcode == BO_Add) {
70    Result = Signed ? LHSAP.sadd_ov(RHSAP, Overflow)
71                    : LHSAP.uadd_ov(RHSAP, Overflow);
72  } else if (Opcode == BO_Sub) {
73    Result = Signed ? LHSAP.ssub_ov(RHSAP, Overflow)
74                    : LHSAP.usub_ov(RHSAP, Overflow);
75  } else if (Opcode == BO_Mul) {
76    Result = Signed ? LHSAP.smul_ov(RHSAP, Overflow)
77                    : LHSAP.umul_ov(RHSAP, Overflow);
78  } else if (Opcode == BO_Div || Opcode == BO_Rem) {
79    if (Signed && !RHS->isZero())
80      Result = LHSAP.sdiv_ov(RHSAP, Overflow);
81    else
82      return false;
83  }
84  return Overflow;
85}
86
87struct BinOpInfo {
88  Value *LHS;
89  Value *RHS;
90  QualType Ty;  // Computation Type.
91  BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform
92  FPOptions FPFeatures;
93  const Expr *E;      // Entire expr, for error unsupported.  May not be binop.
94
95  /// Check if the binop can result in integer overflow.
96  bool mayHaveIntegerOverflow() const {
97    // Without constant input, we can't rule out overflow.
98    auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS);
99    auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS);
100    if (!LHSCI || !RHSCI)
101      return true;
102
103    llvm::APInt Result;
104    return ::mayHaveIntegerOverflow(
105        LHSCI, RHSCI, Opcode, Ty->hasSignedIntegerRepresentation(), Result);
106  }
107
108  /// Check if the binop computes a division or a remainder.
109  bool isDivremOp() const {
110    return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign ||
111           Opcode == BO_RemAssign;
112  }
113
114  /// Check if the binop can result in an integer division by zero.
115  bool mayHaveIntegerDivisionByZero() const {
116    if (isDivremOp())
117      if (auto *CI = dyn_cast<llvm::ConstantInt>(RHS))
118        return CI->isZero();
119    return true;
120  }
121
122  /// Check if the binop can result in a float division by zero.
123  bool mayHaveFloatDivisionByZero() const {
124    if (isDivremOp())
125      if (auto *CFP = dyn_cast<llvm::ConstantFP>(RHS))
126        return CFP->isZero();
127    return true;
128  }
129
130  /// Check if at least one operand is a fixed point type. In such cases, this
131  /// operation did not follow usual arithmetic conversion and both operands
132  /// might not be of the same type.
133  bool isFixedPointOp() const {
134    // We cannot simply check the result type since comparison operations return
135    // an int.
136    if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) {
137      QualType LHSType = BinOp->getLHS()->getType();
138      QualType RHSType = BinOp->getRHS()->getType();
139      return LHSType->isFixedPointType() || RHSType->isFixedPointType();
140    }
141    if (const auto *UnOp = dyn_cast<UnaryOperator>(E))
142      return UnOp->getSubExpr()->getType()->isFixedPointType();
143    return false;
144  }
145};
146
147static bool MustVisitNullValue(const Expr *E) {
148  // If a null pointer expression's type is the C++0x nullptr_t, then
149  // it's not necessarily a simple constant and it must be evaluated
150  // for its potential side effects.
151  return E->getType()->isNullPtrType();
152}
153
154/// If \p E is a widened promoted integer, get its base (unpromoted) type.
155static std::optional<QualType> getUnwidenedIntegerType(const ASTContext &Ctx,
156                                                       const Expr *E) {
157  const Expr *Base = E->IgnoreImpCasts();
158  if (E == Base)
159    return std::nullopt;
160
161  QualType BaseTy = Base->getType();
162  if (!Ctx.isPromotableIntegerType(BaseTy) ||
163      Ctx.getTypeSize(BaseTy) >= Ctx.getTypeSize(E->getType()))
164    return std::nullopt;
165
166  return BaseTy;
167}
168
169/// Check if \p E is a widened promoted integer.
170static bool IsWidenedIntegerOp(const ASTContext &Ctx, const Expr *E) {
171  return getUnwidenedIntegerType(Ctx, E).has_value();
172}
173
174/// Check if we can skip the overflow check for \p Op.
175static bool CanElideOverflowCheck(const ASTContext &Ctx, const BinOpInfo &Op) {
176  assert((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) &&
177         "Expected a unary or binary operator");
178
179  // If the binop has constant inputs and we can prove there is no overflow,
180  // we can elide the overflow check.
181  if (!Op.mayHaveIntegerOverflow())
182    return true;
183
184  // If a unary op has a widened operand, the op cannot overflow.
185  if (const auto *UO = dyn_cast<UnaryOperator>(Op.E))
186    return !UO->canOverflow();
187
188  // We usually don't need overflow checks for binops with widened operands.
189  // Multiplication with promoted unsigned operands is a special case.
190  const auto *BO = cast<BinaryOperator>(Op.E);
191  auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, BO->getLHS());
192  if (!OptionalLHSTy)
193    return false;
194
195  auto OptionalRHSTy = getUnwidenedIntegerType(Ctx, BO->getRHS());
196  if (!OptionalRHSTy)
197    return false;
198
199  QualType LHSTy = *OptionalLHSTy;
200  QualType RHSTy = *OptionalRHSTy;
201
202  // This is the simple case: binops without unsigned multiplication, and with
203  // widened operands. No overflow check is needed here.
204  if ((Op.Opcode != BO_Mul && Op.Opcode != BO_MulAssign) ||
205      !LHSTy->isUnsignedIntegerType() || !RHSTy->isUnsignedIntegerType())
206    return true;
207
208  // For unsigned multiplication the overflow check can be elided if either one
209  // of the unpromoted types are less than half the size of the promoted type.
210  unsigned PromotedSize = Ctx.getTypeSize(Op.E->getType());
211  return (2 * Ctx.getTypeSize(LHSTy)) < PromotedSize ||
212         (2 * Ctx.getTypeSize(RHSTy)) < PromotedSize;
213}
214
215class ScalarExprEmitter
216  : public StmtVisitor<ScalarExprEmitter, Value*> {
217  CodeGenFunction &CGF;
218  CGBuilderTy &Builder;
219  bool IgnoreResultAssign;
220  llvm::LLVMContext &VMContext;
221public:
222
223  ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)
224    : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira),
225      VMContext(cgf.getLLVMContext()) {
226  }
227
228  //===--------------------------------------------------------------------===//
229  //                               Utilities
230  //===--------------------------------------------------------------------===//
231
232  bool TestAndClearIgnoreResultAssign() {
233    bool I = IgnoreResultAssign;
234    IgnoreResultAssign = false;
235    return I;
236  }
237
238  llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
239  LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
240  LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) {
241    return CGF.EmitCheckedLValue(E, TCK);
242  }
243
244  void EmitBinOpCheck(ArrayRef<std::pair<Value *, SanitizerMask>> Checks,
245                      const BinOpInfo &Info);
246
247  Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
248    return CGF.EmitLoadOfLValue(LV, Loc).getScalarVal();
249  }
250
251  void EmitLValueAlignmentAssumption(const Expr *E, Value *V) {
252    const AlignValueAttr *AVAttr = nullptr;
253    if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
254      const ValueDecl *VD = DRE->getDecl();
255
256      if (VD->getType()->isReferenceType()) {
257        if (const auto *TTy =
258                VD->getType().getNonReferenceType()->getAs<TypedefType>())
259          AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
260      } else {
261        // Assumptions for function parameters are emitted at the start of the
262        // function, so there is no need to repeat that here,
263        // unless the alignment-assumption sanitizer is enabled,
264        // then we prefer the assumption over alignment attribute
265        // on IR function param.
266        if (isa<ParmVarDecl>(VD) && !CGF.SanOpts.has(SanitizerKind::Alignment))
267          return;
268
269        AVAttr = VD->getAttr<AlignValueAttr>();
270      }
271    }
272
273    if (!AVAttr)
274      if (const auto *TTy = E->getType()->getAs<TypedefType>())
275        AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
276
277    if (!AVAttr)
278      return;
279
280    Value *AlignmentValue = CGF.EmitScalarExpr(AVAttr->getAlignment());
281    llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(AlignmentValue);
282    CGF.emitAlignmentAssumption(V, E, AVAttr->getLocation(), AlignmentCI);
283  }
284
285  /// EmitLoadOfLValue - Given an expression with complex type that represents a
286  /// value l-value, this method emits the address of the l-value, then loads
287  /// and returns the result.
288  Value *EmitLoadOfLValue(const Expr *E) {
289    Value *V = EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load),
290                                E->getExprLoc());
291
292    EmitLValueAlignmentAssumption(E, V);
293    return V;
294  }
295
296  /// EmitConversionToBool - Convert the specified expression value to a
297  /// boolean (i1) truth value.  This is equivalent to "Val != 0".
298  Value *EmitConversionToBool(Value *Src, QualType DstTy);
299
300  /// Emit a check that a conversion from a floating-point type does not
301  /// overflow.
302  void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType,
303                                Value *Src, QualType SrcType, QualType DstType,
304                                llvm::Type *DstTy, SourceLocation Loc);
305
306  /// Known implicit conversion check kinds.
307  /// Keep in sync with the enum of the same name in ubsan_handlers.h
308  enum ImplicitConversionCheckKind : unsigned char {
309    ICCK_IntegerTruncation = 0, // Legacy, was only used by clang 7.
310    ICCK_UnsignedIntegerTruncation = 1,
311    ICCK_SignedIntegerTruncation = 2,
312    ICCK_IntegerSignChange = 3,
313    ICCK_SignedIntegerTruncationOrSignChange = 4,
314  };
315
316  /// Emit a check that an [implicit] truncation of an integer  does not
317  /// discard any bits. It is not UB, so we use the value after truncation.
318  void EmitIntegerTruncationCheck(Value *Src, QualType SrcType, Value *Dst,
319                                  QualType DstType, SourceLocation Loc);
320
321  /// Emit a check that an [implicit] conversion of an integer does not change
322  /// the sign of the value. It is not UB, so we use the value after conversion.
323  /// NOTE: Src and Dst may be the exact same value! (point to the same thing)
324  void EmitIntegerSignChangeCheck(Value *Src, QualType SrcType, Value *Dst,
325                                  QualType DstType, SourceLocation Loc);
326
327  /// Emit a conversion from the specified type to the specified destination
328  /// type, both of which are LLVM scalar types.
329  struct ScalarConversionOpts {
330    bool TreatBooleanAsSigned;
331    bool EmitImplicitIntegerTruncationChecks;
332    bool EmitImplicitIntegerSignChangeChecks;
333
334    ScalarConversionOpts()
335        : TreatBooleanAsSigned(false),
336          EmitImplicitIntegerTruncationChecks(false),
337          EmitImplicitIntegerSignChangeChecks(false) {}
338
339    ScalarConversionOpts(clang::SanitizerSet SanOpts)
340        : TreatBooleanAsSigned(false),
341          EmitImplicitIntegerTruncationChecks(
342              SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)),
343          EmitImplicitIntegerSignChangeChecks(
344              SanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) {}
345  };
346  Value *EmitScalarCast(Value *Src, QualType SrcType, QualType DstType,
347                        llvm::Type *SrcTy, llvm::Type *DstTy,
348                        ScalarConversionOpts Opts);
349  Value *
350  EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy,
351                       SourceLocation Loc,
352                       ScalarConversionOpts Opts = ScalarConversionOpts());
353
354  /// Convert between either a fixed point and other fixed point or fixed point
355  /// and an integer.
356  Value *EmitFixedPointConversion(Value *Src, QualType SrcTy, QualType DstTy,
357                                  SourceLocation Loc);
358
359  /// Emit a conversion from the specified complex type to the specified
360  /// destination type, where the destination type is an LLVM scalar type.
361  Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
362                                       QualType SrcTy, QualType DstTy,
363                                       SourceLocation Loc);
364
365  /// EmitNullValue - Emit a value that corresponds to null for the given type.
366  Value *EmitNullValue(QualType Ty);
367
368  /// EmitFloatToBoolConversion - Perform an FP to boolean conversion.
369  Value *EmitFloatToBoolConversion(Value *V) {
370    // Compare against 0.0 for fp scalars.
371    llvm::Value *Zero = llvm::Constant::getNullValue(V->getType());
372    return Builder.CreateFCmpUNE(V, Zero, "tobool");
373  }
374
375  /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion.
376  Value *EmitPointerToBoolConversion(Value *V, QualType QT) {
377    Value *Zero = CGF.CGM.getNullPointer(cast<llvm::PointerType>(V->getType()), QT);
378
379    return Builder.CreateICmpNE(V, Zero, "tobool");
380  }
381
382  Value *EmitIntToBoolConversion(Value *V) {
383    // Because of the type rules of C, we often end up computing a
384    // logical value, then zero extending it to int, then wanting it
385    // as a logical value again.  Optimize this common case.
386    if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(V)) {
387      if (ZI->getOperand(0)->getType() == Builder.getInt1Ty()) {
388        Value *Result = ZI->getOperand(0);
389        // If there aren't any more uses, zap the instruction to save space.
390        // Note that there can be more uses, for example if this
391        // is the result of an assignment.
392        if (ZI->use_empty())
393          ZI->eraseFromParent();
394        return Result;
395      }
396    }
397
398    return Builder.CreateIsNotNull(V, "tobool");
399  }
400
401  //===--------------------------------------------------------------------===//
402  //                            Visitor Methods
403  //===--------------------------------------------------------------------===//
404
405  Value *Visit(Expr *E) {
406    ApplyDebugLocation DL(CGF, E);
407    return StmtVisitor<ScalarExprEmitter, Value*>::Visit(E);
408  }
409
410  Value *VisitStmt(Stmt *S) {
411    S->dump(llvm::errs(), CGF.getContext());
412    llvm_unreachable("Stmt can't have complex result type!");
413  }
414  Value *VisitExpr(Expr *S);
415
416  Value *VisitConstantExpr(ConstantExpr *E) {
417    // A constant expression of type 'void' generates no code and produces no
418    // value.
419    if (E->getType()->isVoidType())
420      return nullptr;
421
422    if (Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) {
423      if (E->isGLValue())
424        return CGF.Builder.CreateLoad(Address(
425            Result, CGF.ConvertTypeForMem(E->getType()),
426            CGF.getContext().getTypeAlignInChars(E->getType())));
427      return Result;
428    }
429    return Visit(E->getSubExpr());
430  }
431  Value *VisitParenExpr(ParenExpr *PE) {
432    return Visit(PE->getSubExpr());
433  }
434  Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
435    return Visit(E->getReplacement());
436  }
437  Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
438    return Visit(GE->getResultExpr());
439  }
440  Value *VisitCoawaitExpr(CoawaitExpr *S) {
441    return CGF.EmitCoawaitExpr(*S).getScalarVal();
442  }
443  Value *VisitCoyieldExpr(CoyieldExpr *S) {
444    return CGF.EmitCoyieldExpr(*S).getScalarVal();
445  }
446  Value *VisitUnaryCoawait(const UnaryOperator *E) {
447    return Visit(E->getSubExpr());
448  }
449
450  // Leaves.
451  Value *VisitIntegerLiteral(const IntegerLiteral *E) {
452    return Builder.getInt(E->getValue());
453  }
454  Value *VisitFixedPointLiteral(const FixedPointLiteral *E) {
455    return Builder.getInt(E->getValue());
456  }
457  Value *VisitFloatingLiteral(const FloatingLiteral *E) {
458    return llvm::ConstantFP::get(VMContext, E->getValue());
459  }
460  Value *VisitCharacterLiteral(const CharacterLiteral *E) {
461    return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
462  }
463  Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
464    return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
465  }
466  Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
467    return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
468  }
469  Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
470    if (E->getType()->isVoidType())
471      return nullptr;
472
473    return EmitNullValue(E->getType());
474  }
475  Value *VisitGNUNullExpr(const GNUNullExpr *E) {
476    return EmitNullValue(E->getType());
477  }
478  Value *VisitOffsetOfExpr(OffsetOfExpr *E);
479  Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
480  Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
481    llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel());
482    return Builder.CreateBitCast(V, ConvertType(E->getType()));
483  }
484
485  Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) {
486    return llvm::ConstantInt::get(ConvertType(E->getType()),E->getPackLength());
487  }
488
489  Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) {
490    return CGF.EmitPseudoObjectRValue(E).getScalarVal();
491  }
492
493  Value *VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E);
494
495  Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
496    if (E->isGLValue())
497      return EmitLoadOfLValue(CGF.getOrCreateOpaqueLValueMapping(E),
498                              E->getExprLoc());
499
500    // Otherwise, assume the mapping is the scalar directly.
501    return CGF.getOrCreateOpaqueRValueMapping(E).getScalarVal();
502  }
503
504  // l-values.
505  Value *VisitDeclRefExpr(DeclRefExpr *E) {
506    if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E))
507      return CGF.emitScalarConstant(Constant, E);
508    return EmitLoadOfLValue(E);
509  }
510
511  Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
512    return CGF.EmitObjCSelectorExpr(E);
513  }
514  Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
515    return CGF.EmitObjCProtocolExpr(E);
516  }
517  Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
518    return EmitLoadOfLValue(E);
519  }
520  Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
521    if (E->getMethodDecl() &&
522        E->getMethodDecl()->getReturnType()->isReferenceType())
523      return EmitLoadOfLValue(E);
524    return CGF.EmitObjCMessageExpr(E).getScalarVal();
525  }
526
527  Value *VisitObjCIsaExpr(ObjCIsaExpr *E) {
528    LValue LV = CGF.EmitObjCIsaExpr(E);
529    Value *V = CGF.EmitLoadOfLValue(LV, E->getExprLoc()).getScalarVal();
530    return V;
531  }
532
533  Value *VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) {
534    VersionTuple Version = E->getVersion();
535
536    // If we're checking for a platform older than our minimum deployment
537    // target, we can fold the check away.
538    if (Version <= CGF.CGM.getTarget().getPlatformMinVersion())
539      return llvm::ConstantInt::get(Builder.getInt1Ty(), 1);
540
541    return CGF.EmitBuiltinAvailable(Version);
542  }
543
544  Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
545  Value *VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E);
546  Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
547  Value *VisitConvertVectorExpr(ConvertVectorExpr *E);
548  Value *VisitMemberExpr(MemberExpr *E);
549  Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
550  Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
551    // Strictly speaking, we shouldn't be calling EmitLoadOfLValue, which
552    // transitively calls EmitCompoundLiteralLValue, here in C++ since compound
553    // literals aren't l-values in C++. We do so simply because that's the
554    // cleanest way to handle compound literals in C++.
555    // See the discussion here: https://reviews.llvm.org/D64464
556    return EmitLoadOfLValue(E);
557  }
558
559  Value *VisitInitListExpr(InitListExpr *E);
560
561  Value *VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) {
562    assert(CGF.getArrayInitIndex() &&
563           "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?");
564    return CGF.getArrayInitIndex();
565  }
566
567  Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
568    return EmitNullValue(E->getType());
569  }
570  Value *VisitExplicitCastExpr(ExplicitCastExpr *E) {
571    CGF.CGM.EmitExplicitCastExprType(E, &CGF);
572    return VisitCastExpr(E);
573  }
574  Value *VisitCastExpr(CastExpr *E);
575
576  Value *VisitCallExpr(const CallExpr *E) {
577    if (E->getCallReturnType(CGF.getContext())->isReferenceType())
578      return EmitLoadOfLValue(E);
579
580    Value *V = CGF.EmitCallExpr(E).getScalarVal();
581
582    EmitLValueAlignmentAssumption(E, V);
583    return V;
584  }
585
586  Value *VisitStmtExpr(const StmtExpr *E);
587
588  // Unary Operators.
589  Value *VisitUnaryPostDec(const UnaryOperator *E) {
590    LValue LV = EmitLValue(E->getSubExpr());
591    return EmitScalarPrePostIncDec(E, LV, false, false);
592  }
593  Value *VisitUnaryPostInc(const UnaryOperator *E) {
594    LValue LV = EmitLValue(E->getSubExpr());
595    return EmitScalarPrePostIncDec(E, LV, true, false);
596  }
597  Value *VisitUnaryPreDec(const UnaryOperator *E) {
598    LValue LV = EmitLValue(E->getSubExpr());
599    return EmitScalarPrePostIncDec(E, LV, false, true);
600  }
601  Value *VisitUnaryPreInc(const UnaryOperator *E) {
602    LValue LV = EmitLValue(E->getSubExpr());
603    return EmitScalarPrePostIncDec(E, LV, true, true);
604  }
605
606  llvm::Value *EmitIncDecConsiderOverflowBehavior(const UnaryOperator *E,
607                                                  llvm::Value *InVal,
608                                                  bool IsInc);
609
610  llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
611                                       bool isInc, bool isPre);
612
613
614  Value *VisitUnaryAddrOf(const UnaryOperator *E) {
615    if (isa<MemberPointerType>(E->getType())) // never sugared
616      return CGF.CGM.getMemberPointerConstant(E);
617
618    return EmitLValue(E->getSubExpr()).getPointer(CGF);
619  }
620  Value *VisitUnaryDeref(const UnaryOperator *E) {
621    if (E->getType()->isVoidType())
622      return Visit(E->getSubExpr()); // the actual value should be unused
623    return EmitLoadOfLValue(E);
624  }
625
626  Value *VisitUnaryPlus(const UnaryOperator *E,
627                        QualType PromotionType = QualType());
628  Value *VisitPlus(const UnaryOperator *E, QualType PromotionType);
629  Value *VisitUnaryMinus(const UnaryOperator *E,
630                         QualType PromotionType = QualType());
631  Value *VisitMinus(const UnaryOperator *E, QualType PromotionType);
632
633  Value *VisitUnaryNot      (const UnaryOperator *E);
634  Value *VisitUnaryLNot     (const UnaryOperator *E);
635  Value *VisitUnaryReal(const UnaryOperator *E,
636                        QualType PromotionType = QualType());
637  Value *VisitReal(const UnaryOperator *E, QualType PromotionType);
638  Value *VisitUnaryImag(const UnaryOperator *E,
639                        QualType PromotionType = QualType());
640  Value *VisitImag(const UnaryOperator *E, QualType PromotionType);
641  Value *VisitUnaryExtension(const UnaryOperator *E) {
642    return Visit(E->getSubExpr());
643  }
644
645  // C++
646  Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) {
647    return EmitLoadOfLValue(E);
648  }
649  Value *VisitSourceLocExpr(SourceLocExpr *SLE) {
650    auto &Ctx = CGF.getContext();
651    APValue Evaluated =
652        SLE->EvaluateInContext(Ctx, CGF.CurSourceLocExprScope.getDefaultExpr());
653    return ConstantEmitter(CGF).emitAbstract(SLE->getLocation(), Evaluated,
654                                             SLE->getType());
655  }
656
657  Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
658    CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE);
659    return Visit(DAE->getExpr());
660  }
661  Value *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
662    CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE);
663    return Visit(DIE->getExpr());
664  }
665  Value *VisitCXXThisExpr(CXXThisExpr *TE) {
666    return CGF.LoadCXXThis();
667  }
668
669  Value *VisitExprWithCleanups(ExprWithCleanups *E);
670  Value *VisitCXXNewExpr(const CXXNewExpr *E) {
671    return CGF.EmitCXXNewExpr(E);
672  }
673  Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
674    CGF.EmitCXXDeleteExpr(E);
675    return nullptr;
676  }
677
678  Value *VisitTypeTraitExpr(const TypeTraitExpr *E) {
679    return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
680  }
681
682  Value *VisitConceptSpecializationExpr(const ConceptSpecializationExpr *E) {
683    return Builder.getInt1(E->isSatisfied());
684  }
685
686  Value *VisitRequiresExpr(const RequiresExpr *E) {
687    return Builder.getInt1(E->isSatisfied());
688  }
689
690  Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
691    return llvm::ConstantInt::get(Builder.getInt32Ty(), E->getValue());
692  }
693
694  Value *VisitExpressionTraitExpr(const ExpressionTraitExpr *E) {
695    return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue());
696  }
697
698  Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) {
699    // C++ [expr.pseudo]p1:
700    //   The result shall only be used as the operand for the function call
701    //   operator (), and the result of such a call has type void. The only
702    //   effect is the evaluation of the postfix-expression before the dot or
703    //   arrow.
704    CGF.EmitScalarExpr(E->getBase());
705    return nullptr;
706  }
707
708  Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
709    return EmitNullValue(E->getType());
710  }
711
712  Value *VisitCXXThrowExpr(const CXXThrowExpr *E) {
713    CGF.EmitCXXThrowExpr(E);
714    return nullptr;
715  }
716
717  Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
718    return Builder.getInt1(E->getValue());
719  }
720
721  // Binary Operators.
722  Value *EmitMul(const BinOpInfo &Ops) {
723    if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
724      switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
725      case LangOptions::SOB_Defined:
726        return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
727      case LangOptions::SOB_Undefined:
728        if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
729          return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
730        [[fallthrough]];
731      case LangOptions::SOB_Trapping:
732        if (CanElideOverflowCheck(CGF.getContext(), Ops))
733          return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
734        return EmitOverflowCheckedBinOp(Ops);
735      }
736    }
737
738    if (Ops.Ty->isConstantMatrixType()) {
739      llvm::MatrixBuilder MB(Builder);
740      // We need to check the types of the operands of the operator to get the
741      // correct matrix dimensions.
742      auto *BO = cast<BinaryOperator>(Ops.E);
743      auto *LHSMatTy = dyn_cast<ConstantMatrixType>(
744          BO->getLHS()->getType().getCanonicalType());
745      auto *RHSMatTy = dyn_cast<ConstantMatrixType>(
746          BO->getRHS()->getType().getCanonicalType());
747      CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
748      if (LHSMatTy && RHSMatTy)
749        return MB.CreateMatrixMultiply(Ops.LHS, Ops.RHS, LHSMatTy->getNumRows(),
750                                       LHSMatTy->getNumColumns(),
751                                       RHSMatTy->getNumColumns());
752      return MB.CreateScalarMultiply(Ops.LHS, Ops.RHS);
753    }
754
755    if (Ops.Ty->isUnsignedIntegerType() &&
756        CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
757        !CanElideOverflowCheck(CGF.getContext(), Ops))
758      return EmitOverflowCheckedBinOp(Ops);
759
760    if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
761      //  Preserve the old values
762      CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
763      return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
764    }
765    if (Ops.isFixedPointOp())
766      return EmitFixedPointBinOp(Ops);
767    return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
768  }
769  /// Create a binary op that checks for overflow.
770  /// Currently only supports +, - and *.
771  Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
772
773  // Check for undefined division and modulus behaviors.
774  void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops,
775                                                  llvm::Value *Zero,bool isDiv);
776  // Common helper for getting how wide LHS of shift is.
777  static Value *GetWidthMinusOneValue(Value* LHS,Value* RHS);
778
779  // Used for shifting constraints for OpenCL, do mask for powers of 2, URem for
780  // non powers of two.
781  Value *ConstrainShiftValue(Value *LHS, Value *RHS, const Twine &Name);
782
783  Value *EmitDiv(const BinOpInfo &Ops);
784  Value *EmitRem(const BinOpInfo &Ops);
785  Value *EmitAdd(const BinOpInfo &Ops);
786  Value *EmitSub(const BinOpInfo &Ops);
787  Value *EmitShl(const BinOpInfo &Ops);
788  Value *EmitShr(const BinOpInfo &Ops);
789  Value *EmitAnd(const BinOpInfo &Ops) {
790    return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and");
791  }
792  Value *EmitXor(const BinOpInfo &Ops) {
793    return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor");
794  }
795  Value *EmitOr (const BinOpInfo &Ops) {
796    return Builder.CreateOr(Ops.LHS, Ops.RHS, "or");
797  }
798
799  // Helper functions for fixed point binary operations.
800  Value *EmitFixedPointBinOp(const BinOpInfo &Ops);
801
802  BinOpInfo EmitBinOps(const BinaryOperator *E,
803                       QualType PromotionTy = QualType());
804
805  Value *EmitPromotedValue(Value *result, QualType PromotionType);
806  Value *EmitUnPromotedValue(Value *result, QualType ExprType);
807  Value *EmitPromoted(const Expr *E, QualType PromotionType);
808
809  LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
810                            Value *(ScalarExprEmitter::*F)(const BinOpInfo &),
811                                  Value *&Result);
812
813  Value *EmitCompoundAssign(const CompoundAssignOperator *E,
814                            Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
815
816  QualType getPromotionType(QualType Ty) {
817    const auto &Ctx = CGF.getContext();
818    if (auto *CT = Ty->getAs<ComplexType>()) {
819      QualType ElementType = CT->getElementType();
820      if (ElementType.UseExcessPrecision(Ctx))
821        return Ctx.getComplexType(Ctx.FloatTy);
822    }
823
824    if (Ty.UseExcessPrecision(Ctx)) {
825      if (auto *VT = Ty->getAs<VectorType>()) {
826        unsigned NumElements = VT->getNumElements();
827        return Ctx.getVectorType(Ctx.FloatTy, NumElements, VT->getVectorKind());
828      }
829      return Ctx.FloatTy;
830    }
831
832    return QualType();
833  }
834
835  // Binary operators and binary compound assignment operators.
836#define HANDLEBINOP(OP)                                                        \
837  Value *VisitBin##OP(const BinaryOperator *E) {                               \
838    QualType promotionTy = getPromotionType(E->getType());                     \
839    auto result = Emit##OP(EmitBinOps(E, promotionTy));                        \
840    if (result && !promotionTy.isNull())                                       \
841      result = EmitUnPromotedValue(result, E->getType());                      \
842    return result;                                                             \
843  }                                                                            \
844  Value *VisitBin##OP##Assign(const CompoundAssignOperator *E) {               \
845    return EmitCompoundAssign(E, &ScalarExprEmitter::Emit##OP);                \
846  }
847  HANDLEBINOP(Mul)
848  HANDLEBINOP(Div)
849  HANDLEBINOP(Rem)
850  HANDLEBINOP(Add)
851  HANDLEBINOP(Sub)
852  HANDLEBINOP(Shl)
853  HANDLEBINOP(Shr)
854  HANDLEBINOP(And)
855  HANDLEBINOP(Xor)
856  HANDLEBINOP(Or)
857#undef HANDLEBINOP
858
859  // Comparisons.
860  Value *EmitCompare(const BinaryOperator *E, llvm::CmpInst::Predicate UICmpOpc,
861                     llvm::CmpInst::Predicate SICmpOpc,
862                     llvm::CmpInst::Predicate FCmpOpc, bool IsSignaling);
863#define VISITCOMP(CODE, UI, SI, FP, SIG) \
864    Value *VisitBin##CODE(const BinaryOperator *E) { \
865      return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
866                         llvm::FCmpInst::FP, SIG); }
867  VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT, true)
868  VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT, true)
869  VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE, true)
870  VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE, true)
871  VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ, false)
872  VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE, false)
873#undef VISITCOMP
874
875  Value *VisitBinAssign     (const BinaryOperator *E);
876
877  Value *VisitBinLAnd       (const BinaryOperator *E);
878  Value *VisitBinLOr        (const BinaryOperator *E);
879  Value *VisitBinComma      (const BinaryOperator *E);
880
881  Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); }
882  Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); }
883
884  Value *VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) {
885    return Visit(E->getSemanticForm());
886  }
887
888  // Other Operators.
889  Value *VisitBlockExpr(const BlockExpr *BE);
890  Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *);
891  Value *VisitChooseExpr(ChooseExpr *CE);
892  Value *VisitVAArgExpr(VAArgExpr *VE);
893  Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
894    return CGF.EmitObjCStringLiteral(E);
895  }
896  Value *VisitObjCBoxedExpr(ObjCBoxedExpr *E) {
897    return CGF.EmitObjCBoxedExpr(E);
898  }
899  Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
900    return CGF.EmitObjCArrayLiteral(E);
901  }
902  Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
903    return CGF.EmitObjCDictionaryLiteral(E);
904  }
905  Value *VisitAsTypeExpr(AsTypeExpr *CE);
906  Value *VisitAtomicExpr(AtomicExpr *AE);
907};
908}  // end anonymous namespace.
909
910//===----------------------------------------------------------------------===//
911//                                Utilities
912//===----------------------------------------------------------------------===//
913
914/// EmitConversionToBool - Convert the specified expression value to a
915/// boolean (i1) truth value.  This is equivalent to "Val != 0".
916Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
917  assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs");
918
919  if (SrcType->isRealFloatingType())
920    return EmitFloatToBoolConversion(Src);
921
922  if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(SrcType))
923    return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, Src, MPT);
924
925  assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&
926         "Unknown scalar type to convert");
927
928  if (isa<llvm::IntegerType>(Src->getType()))
929    return EmitIntToBoolConversion(Src);
930
931  assert(isa<llvm::PointerType>(Src->getType()));
932  return EmitPointerToBoolConversion(Src, SrcType);
933}
934
935void ScalarExprEmitter::EmitFloatConversionCheck(
936    Value *OrigSrc, QualType OrigSrcType, Value *Src, QualType SrcType,
937    QualType DstType, llvm::Type *DstTy, SourceLocation Loc) {
938  assert(SrcType->isFloatingType() && "not a conversion from floating point");
939  if (!isa<llvm::IntegerType>(DstTy))
940    return;
941
942  CodeGenFunction::SanitizerScope SanScope(&CGF);
943  using llvm::APFloat;
944  using llvm::APSInt;
945
946  llvm::Value *Check = nullptr;
947  const llvm::fltSemantics &SrcSema =
948    CGF.getContext().getFloatTypeSemantics(OrigSrcType);
949
950  // Floating-point to integer. This has undefined behavior if the source is
951  // +-Inf, NaN, or doesn't fit into the destination type (after truncation
952  // to an integer).
953  unsigned Width = CGF.getContext().getIntWidth(DstType);
954  bool Unsigned = DstType->isUnsignedIntegerOrEnumerationType();
955
956  APSInt Min = APSInt::getMinValue(Width, Unsigned);
957  APFloat MinSrc(SrcSema, APFloat::uninitialized);
958  if (MinSrc.convertFromAPInt(Min, !Unsigned, APFloat::rmTowardZero) &
959      APFloat::opOverflow)
960    // Don't need an overflow check for lower bound. Just check for
961    // -Inf/NaN.
962    MinSrc = APFloat::getInf(SrcSema, true);
963  else
964    // Find the largest value which is too small to represent (before
965    // truncation toward zero).
966    MinSrc.subtract(APFloat(SrcSema, 1), APFloat::rmTowardNegative);
967
968  APSInt Max = APSInt::getMaxValue(Width, Unsigned);
969  APFloat MaxSrc(SrcSema, APFloat::uninitialized);
970  if (MaxSrc.convertFromAPInt(Max, !Unsigned, APFloat::rmTowardZero) &
971      APFloat::opOverflow)
972    // Don't need an overflow check for upper bound. Just check for
973    // +Inf/NaN.
974    MaxSrc = APFloat::getInf(SrcSema, false);
975  else
976    // Find the smallest value which is too large to represent (before
977    // truncation toward zero).
978    MaxSrc.add(APFloat(SrcSema, 1), APFloat::rmTowardPositive);
979
980  // If we're converting from __half, convert the range to float to match
981  // the type of src.
982  if (OrigSrcType->isHalfType()) {
983    const llvm::fltSemantics &Sema =
984      CGF.getContext().getFloatTypeSemantics(SrcType);
985    bool IsInexact;
986    MinSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
987    MaxSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
988  }
989
990  llvm::Value *GE =
991    Builder.CreateFCmpOGT(Src, llvm::ConstantFP::get(VMContext, MinSrc));
992  llvm::Value *LE =
993    Builder.CreateFCmpOLT(Src, llvm::ConstantFP::get(VMContext, MaxSrc));
994  Check = Builder.CreateAnd(GE, LE);
995
996  llvm::Constant *StaticArgs[] = {CGF.EmitCheckSourceLocation(Loc),
997                                  CGF.EmitCheckTypeDescriptor(OrigSrcType),
998                                  CGF.EmitCheckTypeDescriptor(DstType)};
999  CGF.EmitCheck(std::make_pair(Check, SanitizerKind::FloatCastOverflow),
1000                SanitizerHandler::FloatCastOverflow, StaticArgs, OrigSrc);
1001}
1002
1003// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1004// Returns 'i1 false' when the truncation Src -> Dst was lossy.
1005static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1006                 std::pair<llvm::Value *, SanitizerMask>>
1007EmitIntegerTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst,
1008                                 QualType DstType, CGBuilderTy &Builder) {
1009  llvm::Type *SrcTy = Src->getType();
1010  llvm::Type *DstTy = Dst->getType();
1011  (void)DstTy; // Only used in assert()
1012
1013  // This should be truncation of integral types.
1014  assert(Src != Dst);
1015  assert(SrcTy->getScalarSizeInBits() > Dst->getType()->getScalarSizeInBits());
1016  assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
1017         "non-integer llvm type");
1018
1019  bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1020  bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1021
1022  // If both (src and dst) types are unsigned, then it's an unsigned truncation.
1023  // Else, it is a signed truncation.
1024  ScalarExprEmitter::ImplicitConversionCheckKind Kind;
1025  SanitizerMask Mask;
1026  if (!SrcSigned && !DstSigned) {
1027    Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
1028    Mask = SanitizerKind::ImplicitUnsignedIntegerTruncation;
1029  } else {
1030    Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
1031    Mask = SanitizerKind::ImplicitSignedIntegerTruncation;
1032  }
1033
1034  llvm::Value *Check = nullptr;
1035  // 1. Extend the truncated value back to the same width as the Src.
1036  Check = Builder.CreateIntCast(Dst, SrcTy, DstSigned, "anyext");
1037  // 2. Equality-compare with the original source value
1038  Check = Builder.CreateICmpEQ(Check, Src, "truncheck");
1039  // If the comparison result is 'i1 false', then the truncation was lossy.
1040  return std::make_pair(Kind, std::make_pair(Check, Mask));
1041}
1042
1043static bool PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(
1044    QualType SrcType, QualType DstType) {
1045  return SrcType->isIntegerType() && DstType->isIntegerType();
1046}
1047
1048void ScalarExprEmitter::EmitIntegerTruncationCheck(Value *Src, QualType SrcType,
1049                                                   Value *Dst, QualType DstType,
1050                                                   SourceLocation Loc) {
1051  if (!CGF.SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation))
1052    return;
1053
1054  // We only care about int->int conversions here.
1055  // We ignore conversions to/from pointer and/or bool.
1056  if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType,
1057                                                                       DstType))
1058    return;
1059
1060  unsigned SrcBits = Src->getType()->getScalarSizeInBits();
1061  unsigned DstBits = Dst->getType()->getScalarSizeInBits();
1062  // This must be truncation. Else we do not care.
1063  if (SrcBits <= DstBits)
1064    return;
1065
1066  assert(!DstType->isBooleanType() && "we should not get here with booleans.");
1067
1068  // If the integer sign change sanitizer is enabled,
1069  // and we are truncating from larger unsigned type to smaller signed type,
1070  // let that next sanitizer deal with it.
1071  bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1072  bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1073  if (CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange) &&
1074      (!SrcSigned && DstSigned))
1075    return;
1076
1077  CodeGenFunction::SanitizerScope SanScope(&CGF);
1078
1079  std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1080            std::pair<llvm::Value *, SanitizerMask>>
1081      Check =
1082          EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1083  // If the comparison result is 'i1 false', then the truncation was lossy.
1084
1085  // Do we care about this type of truncation?
1086  if (!CGF.SanOpts.has(Check.second.second))
1087    return;
1088
1089  llvm::Constant *StaticArgs[] = {
1090      CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1091      CGF.EmitCheckTypeDescriptor(DstType),
1092      llvm::ConstantInt::get(Builder.getInt8Ty(), Check.first)};
1093  CGF.EmitCheck(Check.second, SanitizerHandler::ImplicitConversion, StaticArgs,
1094                {Src, Dst});
1095}
1096
1097// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1098// Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1099static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1100                 std::pair<llvm::Value *, SanitizerMask>>
1101EmitIntegerSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst,
1102                                 QualType DstType, CGBuilderTy &Builder) {
1103  llvm::Type *SrcTy = Src->getType();
1104  llvm::Type *DstTy = Dst->getType();
1105
1106  assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
1107         "non-integer llvm type");
1108
1109  bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1110  bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1111  (void)SrcSigned; // Only used in assert()
1112  (void)DstSigned; // Only used in assert()
1113  unsigned SrcBits = SrcTy->getScalarSizeInBits();
1114  unsigned DstBits = DstTy->getScalarSizeInBits();
1115  (void)SrcBits; // Only used in assert()
1116  (void)DstBits; // Only used in assert()
1117
1118  assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
1119         "either the widths should be different, or the signednesses.");
1120
1121  // NOTE: zero value is considered to be non-negative.
1122  auto EmitIsNegativeTest = [&Builder](Value *V, QualType VType,
1123                                       const char *Name) -> Value * {
1124    // Is this value a signed type?
1125    bool VSigned = VType->isSignedIntegerOrEnumerationType();
1126    llvm::Type *VTy = V->getType();
1127    if (!VSigned) {
1128      // If the value is unsigned, then it is never negative.
1129      // FIXME: can we encounter non-scalar VTy here?
1130      return llvm::ConstantInt::getFalse(VTy->getContext());
1131    }
1132    // Get the zero of the same type with which we will be comparing.
1133    llvm::Constant *Zero = llvm::ConstantInt::get(VTy, 0);
1134    // %V.isnegative = icmp slt %V, 0
1135    // I.e is %V *strictly* less than zero, does it have negative value?
1136    return Builder.CreateICmp(llvm::ICmpInst::ICMP_SLT, V, Zero,
1137                              llvm::Twine(Name) + "." + V->getName() +
1138                                  ".negativitycheck");
1139  };
1140
1141  // 1. Was the old Value negative?
1142  llvm::Value *SrcIsNegative = EmitIsNegativeTest(Src, SrcType, "src");
1143  // 2. Is the new Value negative?
1144  llvm::Value *DstIsNegative = EmitIsNegativeTest(Dst, DstType, "dst");
1145  // 3. Now, was the 'negativity status' preserved during the conversion?
1146  //    NOTE: conversion from negative to zero is considered to change the sign.
1147  //    (We want to get 'false' when the conversion changed the sign)
1148  //    So we should just equality-compare the negativity statuses.
1149  llvm::Value *Check = nullptr;
1150  Check = Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "signchangecheck");
1151  // If the comparison result is 'false', then the conversion changed the sign.
1152  return std::make_pair(
1153      ScalarExprEmitter::ICCK_IntegerSignChange,
1154      std::make_pair(Check, SanitizerKind::ImplicitIntegerSignChange));
1155}
1156
1157void ScalarExprEmitter::EmitIntegerSignChangeCheck(Value *Src, QualType SrcType,
1158                                                   Value *Dst, QualType DstType,
1159                                                   SourceLocation Loc) {
1160  if (!CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange))
1161    return;
1162
1163  llvm::Type *SrcTy = Src->getType();
1164  llvm::Type *DstTy = Dst->getType();
1165
1166  // We only care about int->int conversions here.
1167  // We ignore conversions to/from pointer and/or bool.
1168  if (!PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(SrcType,
1169                                                                       DstType))
1170    return;
1171
1172  bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1173  bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1174  unsigned SrcBits = SrcTy->getScalarSizeInBits();
1175  unsigned DstBits = DstTy->getScalarSizeInBits();
1176
1177  // Now, we do not need to emit the check in *all* of the cases.
1178  // We can avoid emitting it in some obvious cases where it would have been
1179  // dropped by the opt passes (instcombine) always anyways.
1180  // If it's a cast between effectively the same type, no check.
1181  // NOTE: this is *not* equivalent to checking the canonical types.
1182  if (SrcSigned == DstSigned && SrcBits == DstBits)
1183    return;
1184  // At least one of the values needs to have signed type.
1185  // If both are unsigned, then obviously, neither of them can be negative.
1186  if (!SrcSigned && !DstSigned)
1187    return;
1188  // If the conversion is to *larger* *signed* type, then no check is needed.
1189  // Because either sign-extension happens (so the sign will remain),
1190  // or zero-extension will happen (the sign bit will be zero.)
1191  if ((DstBits > SrcBits) && DstSigned)
1192    return;
1193  if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1194      (SrcBits > DstBits) && SrcSigned) {
1195    // If the signed integer truncation sanitizer is enabled,
1196    // and this is a truncation from signed type, then no check is needed.
1197    // Because here sign change check is interchangeable with truncation check.
1198    return;
1199  }
1200  // That's it. We can't rule out any more cases with the data we have.
1201
1202  CodeGenFunction::SanitizerScope SanScope(&CGF);
1203
1204  std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1205            std::pair<llvm::Value *, SanitizerMask>>
1206      Check;
1207
1208  // Each of these checks needs to return 'false' when an issue was detected.
1209  ImplicitConversionCheckKind CheckKind;
1210  llvm::SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;
1211  // So we can 'and' all the checks together, and still get 'false',
1212  // if at least one of the checks detected an issue.
1213
1214  Check = EmitIntegerSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1215  CheckKind = Check.first;
1216  Checks.emplace_back(Check.second);
1217
1218  if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1219      (SrcBits > DstBits) && !SrcSigned && DstSigned) {
1220    // If the signed integer truncation sanitizer was enabled,
1221    // and we are truncating from larger unsigned type to smaller signed type,
1222    // let's handle the case we skipped in that check.
1223    Check =
1224        EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1225    CheckKind = ICCK_SignedIntegerTruncationOrSignChange;
1226    Checks.emplace_back(Check.second);
1227    // If the comparison result is 'i1 false', then the truncation was lossy.
1228  }
1229
1230  llvm::Constant *StaticArgs[] = {
1231      CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1232      CGF.EmitCheckTypeDescriptor(DstType),
1233      llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind)};
1234  // EmitCheck() will 'and' all the checks together.
1235  CGF.EmitCheck(Checks, SanitizerHandler::ImplicitConversion, StaticArgs,
1236                {Src, Dst});
1237}
1238
1239Value *ScalarExprEmitter::EmitScalarCast(Value *Src, QualType SrcType,
1240                                         QualType DstType, llvm::Type *SrcTy,
1241                                         llvm::Type *DstTy,
1242                                         ScalarConversionOpts Opts) {
1243  // The Element types determine the type of cast to perform.
1244  llvm::Type *SrcElementTy;
1245  llvm::Type *DstElementTy;
1246  QualType SrcElementType;
1247  QualType DstElementType;
1248  if (SrcType->isMatrixType() && DstType->isMatrixType()) {
1249    SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType();
1250    DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType();
1251    SrcElementType = SrcType->castAs<MatrixType>()->getElementType();
1252    DstElementType = DstType->castAs<MatrixType>()->getElementType();
1253  } else {
1254    assert(!SrcType->isMatrixType() && !DstType->isMatrixType() &&
1255           "cannot cast between matrix and non-matrix types");
1256    SrcElementTy = SrcTy;
1257    DstElementTy = DstTy;
1258    SrcElementType = SrcType;
1259    DstElementType = DstType;
1260  }
1261
1262  if (isa<llvm::IntegerType>(SrcElementTy)) {
1263    bool InputSigned = SrcElementType->isSignedIntegerOrEnumerationType();
1264    if (SrcElementType->isBooleanType() && Opts.TreatBooleanAsSigned) {
1265      InputSigned = true;
1266    }
1267
1268    if (isa<llvm::IntegerType>(DstElementTy))
1269      return Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
1270    if (InputSigned)
1271      return Builder.CreateSIToFP(Src, DstTy, "conv");
1272    return Builder.CreateUIToFP(Src, DstTy, "conv");
1273  }
1274
1275  if (isa<llvm::IntegerType>(DstElementTy)) {
1276    assert(SrcElementTy->isFloatingPointTy() && "Unknown real conversion");
1277    bool IsSigned = DstElementType->isSignedIntegerOrEnumerationType();
1278
1279    // If we can't recognize overflow as undefined behavior, assume that
1280    // overflow saturates. This protects against normal optimizations if we are
1281    // compiling with non-standard FP semantics.
1282    if (!CGF.CGM.getCodeGenOpts().StrictFloatCastOverflow) {
1283      llvm::Intrinsic::ID IID =
1284          IsSigned ? llvm::Intrinsic::fptosi_sat : llvm::Intrinsic::fptoui_sat;
1285      return Builder.CreateCall(CGF.CGM.getIntrinsic(IID, {DstTy, SrcTy}), Src);
1286    }
1287
1288    if (IsSigned)
1289      return Builder.CreateFPToSI(Src, DstTy, "conv");
1290    return Builder.CreateFPToUI(Src, DstTy, "conv");
1291  }
1292
1293  if (DstElementTy->getTypeID() < SrcElementTy->getTypeID())
1294    return Builder.CreateFPTrunc(Src, DstTy, "conv");
1295  return Builder.CreateFPExt(Src, DstTy, "conv");
1296}
1297
1298/// Emit a conversion from the specified type to the specified destination type,
1299/// both of which are LLVM scalar types.
1300Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
1301                                               QualType DstType,
1302                                               SourceLocation Loc,
1303                                               ScalarConversionOpts Opts) {
1304  // All conversions involving fixed point types should be handled by the
1305  // EmitFixedPoint family functions. This is done to prevent bloating up this
1306  // function more, and although fixed point numbers are represented by
1307  // integers, we do not want to follow any logic that assumes they should be
1308  // treated as integers.
1309  // TODO(leonardchan): When necessary, add another if statement checking for
1310  // conversions to fixed point types from other types.
1311  if (SrcType->isFixedPointType()) {
1312    if (DstType->isBooleanType())
1313      // It is important that we check this before checking if the dest type is
1314      // an integer because booleans are technically integer types.
1315      // We do not need to check the padding bit on unsigned types if unsigned
1316      // padding is enabled because overflow into this bit is undefined
1317      // behavior.
1318      return Builder.CreateIsNotNull(Src, "tobool");
1319    if (DstType->isFixedPointType() || DstType->isIntegerType() ||
1320        DstType->isRealFloatingType())
1321      return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1322
1323    llvm_unreachable(
1324        "Unhandled scalar conversion from a fixed point type to another type.");
1325  } else if (DstType->isFixedPointType()) {
1326    if (SrcType->isIntegerType() || SrcType->isRealFloatingType())
1327      // This also includes converting booleans and enums to fixed point types.
1328      return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1329
1330    llvm_unreachable(
1331        "Unhandled scalar conversion to a fixed point type from another type.");
1332  }
1333
1334  QualType NoncanonicalSrcType = SrcType;
1335  QualType NoncanonicalDstType = DstType;
1336
1337  SrcType = CGF.getContext().getCanonicalType(SrcType);
1338  DstType = CGF.getContext().getCanonicalType(DstType);
1339  if (SrcType == DstType) return Src;
1340
1341  if (DstType->isVoidType()) return nullptr;
1342
1343  llvm::Value *OrigSrc = Src;
1344  QualType OrigSrcType = SrcType;
1345  llvm::Type *SrcTy = Src->getType();
1346
1347  // Handle conversions to bool first, they are special: comparisons against 0.
1348  if (DstType->isBooleanType())
1349    return EmitConversionToBool(Src, SrcType);
1350
1351  llvm::Type *DstTy = ConvertType(DstType);
1352
1353  // Cast from half through float if half isn't a native type.
1354  if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1355    // Cast to FP using the intrinsic if the half type itself isn't supported.
1356    if (DstTy->isFloatingPointTy()) {
1357      if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics())
1358        return Builder.CreateCall(
1359            CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, DstTy),
1360            Src);
1361    } else {
1362      // Cast to other types through float, using either the intrinsic or FPExt,
1363      // depending on whether the half type itself is supported
1364      // (as opposed to operations on half, available with NativeHalfType).
1365      if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
1366        Src = Builder.CreateCall(
1367            CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
1368                                 CGF.CGM.FloatTy),
1369            Src);
1370      } else {
1371        Src = Builder.CreateFPExt(Src, CGF.CGM.FloatTy, "conv");
1372      }
1373      SrcType = CGF.getContext().FloatTy;
1374      SrcTy = CGF.FloatTy;
1375    }
1376  }
1377
1378  // Ignore conversions like int -> uint.
1379  if (SrcTy == DstTy) {
1380    if (Opts.EmitImplicitIntegerSignChangeChecks)
1381      EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Src,
1382                                 NoncanonicalDstType, Loc);
1383
1384    return Src;
1385  }
1386
1387  // Handle pointer conversions next: pointers can only be converted to/from
1388  // other pointers and integers. Check for pointer types in terms of LLVM, as
1389  // some native types (like Obj-C id) may map to a pointer type.
1390  if (auto DstPT = dyn_cast<llvm::PointerType>(DstTy)) {
1391    // The source value may be an integer, or a pointer.
1392    if (isa<llvm::PointerType>(SrcTy))
1393      return Builder.CreateBitCast(Src, DstTy, "conv");
1394
1395    assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
1396    // First, convert to the correct width so that we control the kind of
1397    // extension.
1398    llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DstPT);
1399    bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
1400    llvm::Value* IntResult =
1401        Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
1402    // Then, cast to pointer.
1403    return Builder.CreateIntToPtr(IntResult, DstTy, "conv");
1404  }
1405
1406  if (isa<llvm::PointerType>(SrcTy)) {
1407    // Must be an ptr to int cast.
1408    assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?");
1409    return Builder.CreatePtrToInt(Src, DstTy, "conv");
1410  }
1411
1412  // A scalar can be splatted to an extended vector of the same element type
1413  if (DstType->isExtVectorType() && !SrcType->isVectorType()) {
1414    // Sema should add casts to make sure that the source expression's type is
1415    // the same as the vector's element type (sans qualifiers)
1416    assert(DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() ==
1417               SrcType.getTypePtr() &&
1418           "Splatted expr doesn't match with vector element type?");
1419
1420    // Splat the element across to all elements
1421    unsigned NumElements = cast<llvm::FixedVectorType>(DstTy)->getNumElements();
1422    return Builder.CreateVectorSplat(NumElements, Src, "splat");
1423  }
1424
1425  if (SrcType->isMatrixType() && DstType->isMatrixType())
1426    return EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1427
1428  if (isa<llvm::VectorType>(SrcTy) || isa<llvm::VectorType>(DstTy)) {
1429    // Allow bitcast from vector to integer/fp of the same size.
1430    llvm::TypeSize SrcSize = SrcTy->getPrimitiveSizeInBits();
1431    llvm::TypeSize DstSize = DstTy->getPrimitiveSizeInBits();
1432    if (SrcSize == DstSize)
1433      return Builder.CreateBitCast(Src, DstTy, "conv");
1434
1435    // Conversions between vectors of different sizes are not allowed except
1436    // when vectors of half are involved. Operations on storage-only half
1437    // vectors require promoting half vector operands to float vectors and
1438    // truncating the result, which is either an int or float vector, to a
1439    // short or half vector.
1440
1441    // Source and destination are both expected to be vectors.
1442    llvm::Type *SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType();
1443    llvm::Type *DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType();
1444    (void)DstElementTy;
1445
1446    assert(((SrcElementTy->isIntegerTy() &&
1447             DstElementTy->isIntegerTy()) ||
1448            (SrcElementTy->isFloatingPointTy() &&
1449             DstElementTy->isFloatingPointTy())) &&
1450           "unexpected conversion between a floating-point vector and an "
1451           "integer vector");
1452
1453    // Truncate an i32 vector to an i16 vector.
1454    if (SrcElementTy->isIntegerTy())
1455      return Builder.CreateIntCast(Src, DstTy, false, "conv");
1456
1457    // Truncate a float vector to a half vector.
1458    if (SrcSize > DstSize)
1459      return Builder.CreateFPTrunc(Src, DstTy, "conv");
1460
1461    // Promote a half vector to a float vector.
1462    return Builder.CreateFPExt(Src, DstTy, "conv");
1463  }
1464
1465  // Finally, we have the arithmetic types: real int/float.
1466  Value *Res = nullptr;
1467  llvm::Type *ResTy = DstTy;
1468
1469  // An overflowing conversion has undefined behavior if either the source type
1470  // or the destination type is a floating-point type. However, we consider the
1471  // range of representable values for all floating-point types to be
1472  // [-inf,+inf], so no overflow can ever happen when the destination type is a
1473  // floating-point type.
1474  if (CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) &&
1475      OrigSrcType->isFloatingType())
1476    EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy,
1477                             Loc);
1478
1479  // Cast to half through float if half isn't a native type.
1480  if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1481    // Make sure we cast in a single step if from another FP type.
1482    if (SrcTy->isFloatingPointTy()) {
1483      // Use the intrinsic if the half type itself isn't supported
1484      // (as opposed to operations on half, available with NativeHalfType).
1485      if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics())
1486        return Builder.CreateCall(
1487            CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, SrcTy), Src);
1488      // If the half type is supported, just use an fptrunc.
1489      return Builder.CreateFPTrunc(Src, DstTy);
1490    }
1491    DstTy = CGF.FloatTy;
1492  }
1493
1494  Res = EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1495
1496  if (DstTy != ResTy) {
1497    if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
1498      assert(ResTy->isIntegerTy(16) && "Only half FP requires extra conversion");
1499      Res = Builder.CreateCall(
1500        CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, CGF.CGM.FloatTy),
1501        Res);
1502    } else {
1503      Res = Builder.CreateFPTrunc(Res, ResTy, "conv");
1504    }
1505  }
1506
1507  if (Opts.EmitImplicitIntegerTruncationChecks)
1508    EmitIntegerTruncationCheck(Src, NoncanonicalSrcType, Res,
1509                               NoncanonicalDstType, Loc);
1510
1511  if (Opts.EmitImplicitIntegerSignChangeChecks)
1512    EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Res,
1513                               NoncanonicalDstType, Loc);
1514
1515  return Res;
1516}
1517
1518Value *ScalarExprEmitter::EmitFixedPointConversion(Value *Src, QualType SrcTy,
1519                                                   QualType DstTy,
1520                                                   SourceLocation Loc) {
1521  llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
1522  llvm::Value *Result;
1523  if (SrcTy->isRealFloatingType())
1524    Result = FPBuilder.CreateFloatingToFixed(Src,
1525        CGF.getContext().getFixedPointSemantics(DstTy));
1526  else if (DstTy->isRealFloatingType())
1527    Result = FPBuilder.CreateFixedToFloating(Src,
1528        CGF.getContext().getFixedPointSemantics(SrcTy),
1529        ConvertType(DstTy));
1530  else {
1531    auto SrcFPSema = CGF.getContext().getFixedPointSemantics(SrcTy);
1532    auto DstFPSema = CGF.getContext().getFixedPointSemantics(DstTy);
1533
1534    if (DstTy->isIntegerType())
1535      Result = FPBuilder.CreateFixedToInteger(Src, SrcFPSema,
1536                                              DstFPSema.getWidth(),
1537                                              DstFPSema.isSigned());
1538    else if (SrcTy->isIntegerType())
1539      Result =  FPBuilder.CreateIntegerToFixed(Src, SrcFPSema.isSigned(),
1540                                               DstFPSema);
1541    else
1542      Result = FPBuilder.CreateFixedToFixed(Src, SrcFPSema, DstFPSema);
1543  }
1544  return Result;
1545}
1546
1547/// Emit a conversion from the specified complex type to the specified
1548/// destination type, where the destination type is an LLVM scalar type.
1549Value *ScalarExprEmitter::EmitComplexToScalarConversion(
1550    CodeGenFunction::ComplexPairTy Src, QualType SrcTy, QualType DstTy,
1551    SourceLocation Loc) {
1552  // Get the source element type.
1553  SrcTy = SrcTy->castAs<ComplexType>()->getElementType();
1554
1555  // Handle conversions to bool first, they are special: comparisons against 0.
1556  if (DstTy->isBooleanType()) {
1557    //  Complex != 0  -> (Real != 0) | (Imag != 0)
1558    Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1559    Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy, Loc);
1560    return Builder.CreateOr(Src.first, Src.second, "tobool");
1561  }
1562
1563  // C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
1564  // the imaginary part of the complex value is discarded and the value of the
1565  // real part is converted according to the conversion rules for the
1566  // corresponding real type.
1567  return EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1568}
1569
1570Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
1571  return CGF.EmitFromMemory(CGF.CGM.EmitNullConstant(Ty), Ty);
1572}
1573
1574/// Emit a sanitization check for the given "binary" operation (which
1575/// might actually be a unary increment which has been lowered to a binary
1576/// operation). The check passes if all values in \p Checks (which are \c i1),
1577/// are \c true.
1578void ScalarExprEmitter::EmitBinOpCheck(
1579    ArrayRef<std::pair<Value *, SanitizerMask>> Checks, const BinOpInfo &Info) {
1580  assert(CGF.IsSanitizerScope);
1581  SanitizerHandler Check;
1582  SmallVector<llvm::Constant *, 4> StaticData;
1583  SmallVector<llvm::Value *, 2> DynamicData;
1584
1585  BinaryOperatorKind Opcode = Info.Opcode;
1586  if (BinaryOperator::isCompoundAssignmentOp(Opcode))
1587    Opcode = BinaryOperator::getOpForCompoundAssignment(Opcode);
1588
1589  StaticData.push_back(CGF.EmitCheckSourceLocation(Info.E->getExprLoc()));
1590  const UnaryOperator *UO = dyn_cast<UnaryOperator>(Info.E);
1591  if (UO && UO->getOpcode() == UO_Minus) {
1592    Check = SanitizerHandler::NegateOverflow;
1593    StaticData.push_back(CGF.EmitCheckTypeDescriptor(UO->getType()));
1594    DynamicData.push_back(Info.RHS);
1595  } else {
1596    if (BinaryOperator::isShiftOp(Opcode)) {
1597      // Shift LHS negative or too large, or RHS out of bounds.
1598      Check = SanitizerHandler::ShiftOutOfBounds;
1599      const BinaryOperator *BO = cast<BinaryOperator>(Info.E);
1600      StaticData.push_back(
1601        CGF.EmitCheckTypeDescriptor(BO->getLHS()->getType()));
1602      StaticData.push_back(
1603        CGF.EmitCheckTypeDescriptor(BO->getRHS()->getType()));
1604    } else if (Opcode == BO_Div || Opcode == BO_Rem) {
1605      // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1).
1606      Check = SanitizerHandler::DivremOverflow;
1607      StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1608    } else {
1609      // Arithmetic overflow (+, -, *).
1610      switch (Opcode) {
1611      case BO_Add: Check = SanitizerHandler::AddOverflow; break;
1612      case BO_Sub: Check = SanitizerHandler::SubOverflow; break;
1613      case BO_Mul: Check = SanitizerHandler::MulOverflow; break;
1614      default: llvm_unreachable("unexpected opcode for bin op check");
1615      }
1616      StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1617    }
1618    DynamicData.push_back(Info.LHS);
1619    DynamicData.push_back(Info.RHS);
1620  }
1621
1622  CGF.EmitCheck(Checks, Check, StaticData, DynamicData);
1623}
1624
1625//===----------------------------------------------------------------------===//
1626//                            Visitor Methods
1627//===----------------------------------------------------------------------===//
1628
1629Value *ScalarExprEmitter::VisitExpr(Expr *E) {
1630  CGF.ErrorUnsupported(E, "scalar expression");
1631  if (E->getType()->isVoidType())
1632    return nullptr;
1633  return llvm::UndefValue::get(CGF.ConvertType(E->getType()));
1634}
1635
1636Value *
1637ScalarExprEmitter::VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E) {
1638  ASTContext &Context = CGF.getContext();
1639  unsigned AddrSpace =
1640      Context.getTargetAddressSpace(CGF.CGM.GetGlobalConstantAddressSpace());
1641  llvm::Constant *GlobalConstStr = Builder.CreateGlobalStringPtr(
1642      E->ComputeName(Context), "__usn_str", AddrSpace);
1643
1644  llvm::Type *ExprTy = ConvertType(E->getType());
1645  return Builder.CreatePointerBitCastOrAddrSpaceCast(GlobalConstStr, ExprTy,
1646                                                     "usn_addr_cast");
1647}
1648
1649Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
1650  // Vector Mask Case
1651  if (E->getNumSubExprs() == 2) {
1652    Value *LHS = CGF.EmitScalarExpr(E->getExpr(0));
1653    Value *RHS = CGF.EmitScalarExpr(E->getExpr(1));
1654    Value *Mask;
1655
1656    auto *LTy = cast<llvm::FixedVectorType>(LHS->getType());
1657    unsigned LHSElts = LTy->getNumElements();
1658
1659    Mask = RHS;
1660
1661    auto *MTy = cast<llvm::FixedVectorType>(Mask->getType());
1662
1663    // Mask off the high bits of each shuffle index.
1664    Value *MaskBits =
1665        llvm::ConstantInt::get(MTy, llvm::NextPowerOf2(LHSElts - 1) - 1);
1666    Mask = Builder.CreateAnd(Mask, MaskBits, "mask");
1667
1668    // newv = undef
1669    // mask = mask & maskbits
1670    // for each elt
1671    //   n = extract mask i
1672    //   x = extract val n
1673    //   newv = insert newv, x, i
1674    auto *RTy = llvm::FixedVectorType::get(LTy->getElementType(),
1675                                           MTy->getNumElements());
1676    Value* NewV = llvm::PoisonValue::get(RTy);
1677    for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
1678      Value *IIndx = llvm::ConstantInt::get(CGF.SizeTy, i);
1679      Value *Indx = Builder.CreateExtractElement(Mask, IIndx, "shuf_idx");
1680
1681      Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt");
1682      NewV = Builder.CreateInsertElement(NewV, VExt, IIndx, "shuf_ins");
1683    }
1684    return NewV;
1685  }
1686
1687  Value* V1 = CGF.EmitScalarExpr(E->getExpr(0));
1688  Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
1689
1690  SmallVector<int, 32> Indices;
1691  for (unsigned i = 2; i < E->getNumSubExprs(); ++i) {
1692    llvm::APSInt Idx = E->getShuffleMaskIdx(CGF.getContext(), i-2);
1693    // Check for -1 and output it as undef in the IR.
1694    if (Idx.isSigned() && Idx.isAllOnes())
1695      Indices.push_back(-1);
1696    else
1697      Indices.push_back(Idx.getZExtValue());
1698  }
1699
1700  return Builder.CreateShuffleVector(V1, V2, Indices, "shuffle");
1701}
1702
1703Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) {
1704  QualType SrcType = E->getSrcExpr()->getType(),
1705           DstType = E->getType();
1706
1707  Value *Src  = CGF.EmitScalarExpr(E->getSrcExpr());
1708
1709  SrcType = CGF.getContext().getCanonicalType(SrcType);
1710  DstType = CGF.getContext().getCanonicalType(DstType);
1711  if (SrcType == DstType) return Src;
1712
1713  assert(SrcType->isVectorType() &&
1714         "ConvertVector source type must be a vector");
1715  assert(DstType->isVectorType() &&
1716         "ConvertVector destination type must be a vector");
1717
1718  llvm::Type *SrcTy = Src->getType();
1719  llvm::Type *DstTy = ConvertType(DstType);
1720
1721  // Ignore conversions like int -> uint.
1722  if (SrcTy == DstTy)
1723    return Src;
1724
1725  QualType SrcEltType = SrcType->castAs<VectorType>()->getElementType(),
1726           DstEltType = DstType->castAs<VectorType>()->getElementType();
1727
1728  assert(SrcTy->isVectorTy() &&
1729         "ConvertVector source IR type must be a vector");
1730  assert(DstTy->isVectorTy() &&
1731         "ConvertVector destination IR type must be a vector");
1732
1733  llvm::Type *SrcEltTy = cast<llvm::VectorType>(SrcTy)->getElementType(),
1734             *DstEltTy = cast<llvm::VectorType>(DstTy)->getElementType();
1735
1736  if (DstEltType->isBooleanType()) {
1737    assert((SrcEltTy->isFloatingPointTy() ||
1738            isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion");
1739
1740    llvm::Value *Zero = llvm::Constant::getNullValue(SrcTy);
1741    if (SrcEltTy->isFloatingPointTy()) {
1742      return Builder.CreateFCmpUNE(Src, Zero, "tobool");
1743    } else {
1744      return Builder.CreateICmpNE(Src, Zero, "tobool");
1745    }
1746  }
1747
1748  // We have the arithmetic types: real int/float.
1749  Value *Res = nullptr;
1750
1751  if (isa<llvm::IntegerType>(SrcEltTy)) {
1752    bool InputSigned = SrcEltType->isSignedIntegerOrEnumerationType();
1753    if (isa<llvm::IntegerType>(DstEltTy))
1754      Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
1755    else if (InputSigned)
1756      Res = Builder.CreateSIToFP(Src, DstTy, "conv");
1757    else
1758      Res = Builder.CreateUIToFP(Src, DstTy, "conv");
1759  } else if (isa<llvm::IntegerType>(DstEltTy)) {
1760    assert(SrcEltTy->isFloatingPointTy() && "Unknown real conversion");
1761    if (DstEltType->isSignedIntegerOrEnumerationType())
1762      Res = Builder.CreateFPToSI(Src, DstTy, "conv");
1763    else
1764      Res = Builder.CreateFPToUI(Src, DstTy, "conv");
1765  } else {
1766    assert(SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() &&
1767           "Unknown real conversion");
1768    if (DstEltTy->getTypeID() < SrcEltTy->getTypeID())
1769      Res = Builder.CreateFPTrunc(Src, DstTy, "conv");
1770    else
1771      Res = Builder.CreateFPExt(Src, DstTy, "conv");
1772  }
1773
1774  return Res;
1775}
1776
1777Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
1778  if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E)) {
1779    CGF.EmitIgnoredExpr(E->getBase());
1780    return CGF.emitScalarConstant(Constant, E);
1781  } else {
1782    Expr::EvalResult Result;
1783    if (E->EvaluateAsInt(Result, CGF.getContext(), Expr::SE_AllowSideEffects)) {
1784      llvm::APSInt Value = Result.Val.getInt();
1785      CGF.EmitIgnoredExpr(E->getBase());
1786      return Builder.getInt(Value);
1787    }
1788  }
1789
1790  return EmitLoadOfLValue(E);
1791}
1792
1793Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
1794  TestAndClearIgnoreResultAssign();
1795
1796  // Emit subscript expressions in rvalue context's.  For most cases, this just
1797  // loads the lvalue formed by the subscript expr.  However, we have to be
1798  // careful, because the base of a vector subscript is occasionally an rvalue,
1799  // so we can't get it as an lvalue.
1800  if (!E->getBase()->getType()->isVectorType() &&
1801      !E->getBase()->getType()->isSveVLSBuiltinType())
1802    return EmitLoadOfLValue(E);
1803
1804  // Handle the vector case.  The base must be a vector, the index must be an
1805  // integer value.
1806  Value *Base = Visit(E->getBase());
1807  Value *Idx  = Visit(E->getIdx());
1808  QualType IdxTy = E->getIdx()->getType();
1809
1810  if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
1811    CGF.EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, /*Accessed*/true);
1812
1813  return Builder.CreateExtractElement(Base, Idx, "vecext");
1814}
1815
1816Value *ScalarExprEmitter::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E) {
1817  TestAndClearIgnoreResultAssign();
1818
1819  // Handle the vector case.  The base must be a vector, the index must be an
1820  // integer value.
1821  Value *RowIdx = Visit(E->getRowIdx());
1822  Value *ColumnIdx = Visit(E->getColumnIdx());
1823
1824  const auto *MatrixTy = E->getBase()->getType()->castAs<ConstantMatrixType>();
1825  unsigned NumRows = MatrixTy->getNumRows();
1826  llvm::MatrixBuilder MB(Builder);
1827  Value *Idx = MB.CreateIndex(RowIdx, ColumnIdx, NumRows);
1828  if (CGF.CGM.getCodeGenOpts().OptimizationLevel > 0)
1829    MB.CreateIndexAssumption(Idx, MatrixTy->getNumElementsFlattened());
1830
1831  Value *Matrix = Visit(E->getBase());
1832
1833  // TODO: Should we emit bounds checks with SanitizerKind::ArrayBounds?
1834  return Builder.CreateExtractElement(Matrix, Idx, "matrixext");
1835}
1836
1837static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
1838                      unsigned Off) {
1839  int MV = SVI->getMaskValue(Idx);
1840  if (MV == -1)
1841    return -1;
1842  return Off + MV;
1843}
1844
1845static int getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) {
1846  assert(llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) &&
1847         "Index operand too large for shufflevector mask!");
1848  return C->getZExtValue();
1849}
1850
1851Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
1852  bool Ignore = TestAndClearIgnoreResultAssign();
1853  (void)Ignore;
1854  assert (Ignore == false && "init list ignored");
1855  unsigned NumInitElements = E->getNumInits();
1856
1857  if (E->hadArrayRangeDesignator())
1858    CGF.ErrorUnsupported(E, "GNU array range designator extension");
1859
1860  llvm::VectorType *VType =
1861    dyn_cast<llvm::VectorType>(ConvertType(E->getType()));
1862
1863  if (!VType) {
1864    if (NumInitElements == 0) {
1865      // C++11 value-initialization for the scalar.
1866      return EmitNullValue(E->getType());
1867    }
1868    // We have a scalar in braces. Just use the first element.
1869    return Visit(E->getInit(0));
1870  }
1871
1872  if (isa<llvm::ScalableVectorType>(VType)) {
1873    if (NumInitElements == 0) {
1874      // C++11 value-initialization for the vector.
1875      return EmitNullValue(E->getType());
1876    }
1877
1878    if (NumInitElements == 1) {
1879      Expr *InitVector = E->getInit(0);
1880
1881      // Initialize from another scalable vector of the same type.
1882      if (InitVector->getType() == E->getType())
1883        return Visit(InitVector);
1884    }
1885
1886    llvm_unreachable("Unexpected initialization of a scalable vector!");
1887  }
1888
1889  unsigned ResElts = cast<llvm::FixedVectorType>(VType)->getNumElements();
1890
1891  // Loop over initializers collecting the Value for each, and remembering
1892  // whether the source was swizzle (ExtVectorElementExpr).  This will allow
1893  // us to fold the shuffle for the swizzle into the shuffle for the vector
1894  // initializer, since LLVM optimizers generally do not want to touch
1895  // shuffles.
1896  unsigned CurIdx = 0;
1897  bool VIsPoisonShuffle = false;
1898  llvm::Value *V = llvm::PoisonValue::get(VType);
1899  for (unsigned i = 0; i != NumInitElements; ++i) {
1900    Expr *IE = E->getInit(i);
1901    Value *Init = Visit(IE);
1902    SmallVector<int, 16> Args;
1903
1904    llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType());
1905
1906    // Handle scalar elements.  If the scalar initializer is actually one
1907    // element of a different vector of the same width, use shuffle instead of
1908    // extract+insert.
1909    if (!VVT) {
1910      if (isa<ExtVectorElementExpr>(IE)) {
1911        llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Init);
1912
1913        if (cast<llvm::FixedVectorType>(EI->getVectorOperandType())
1914                ->getNumElements() == ResElts) {
1915          llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand());
1916          Value *LHS = nullptr, *RHS = nullptr;
1917          if (CurIdx == 0) {
1918            // insert into poison -> shuffle (src, poison)
1919            // shufflemask must use an i32
1920            Args.push_back(getAsInt32(C, CGF.Int32Ty));
1921            Args.resize(ResElts, -1);
1922
1923            LHS = EI->getVectorOperand();
1924            RHS = V;
1925            VIsPoisonShuffle = true;
1926          } else if (VIsPoisonShuffle) {
1927            // insert into poison shuffle && size match -> shuffle (v, src)
1928            llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V);
1929            for (unsigned j = 0; j != CurIdx; ++j)
1930              Args.push_back(getMaskElt(SVV, j, 0));
1931            Args.push_back(ResElts + C->getZExtValue());
1932            Args.resize(ResElts, -1);
1933
1934            LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
1935            RHS = EI->getVectorOperand();
1936            VIsPoisonShuffle = false;
1937          }
1938          if (!Args.empty()) {
1939            V = Builder.CreateShuffleVector(LHS, RHS, Args);
1940            ++CurIdx;
1941            continue;
1942          }
1943        }
1944      }
1945      V = Builder.CreateInsertElement(V, Init, Builder.getInt32(CurIdx),
1946                                      "vecinit");
1947      VIsPoisonShuffle = false;
1948      ++CurIdx;
1949      continue;
1950    }
1951
1952    unsigned InitElts = cast<llvm::FixedVectorType>(VVT)->getNumElements();
1953
1954    // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's
1955    // input is the same width as the vector being constructed, generate an
1956    // optimized shuffle of the swizzle input into the result.
1957    unsigned Offset = (CurIdx == 0) ? 0 : ResElts;
1958    if (isa<ExtVectorElementExpr>(IE)) {
1959      llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init);
1960      Value *SVOp = SVI->getOperand(0);
1961      auto *OpTy = cast<llvm::FixedVectorType>(SVOp->getType());
1962
1963      if (OpTy->getNumElements() == ResElts) {
1964        for (unsigned j = 0; j != CurIdx; ++j) {
1965          // If the current vector initializer is a shuffle with poison, merge
1966          // this shuffle directly into it.
1967          if (VIsPoisonShuffle) {
1968            Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0));
1969          } else {
1970            Args.push_back(j);
1971          }
1972        }
1973        for (unsigned j = 0, je = InitElts; j != je; ++j)
1974          Args.push_back(getMaskElt(SVI, j, Offset));
1975        Args.resize(ResElts, -1);
1976
1977        if (VIsPoisonShuffle)
1978          V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
1979
1980        Init = SVOp;
1981      }
1982    }
1983
1984    // Extend init to result vector length, and then shuffle its contribution
1985    // to the vector initializer into V.
1986    if (Args.empty()) {
1987      for (unsigned j = 0; j != InitElts; ++j)
1988        Args.push_back(j);
1989      Args.resize(ResElts, -1);
1990      Init = Builder.CreateShuffleVector(Init, Args, "vext");
1991
1992      Args.clear();
1993      for (unsigned j = 0; j != CurIdx; ++j)
1994        Args.push_back(j);
1995      for (unsigned j = 0; j != InitElts; ++j)
1996        Args.push_back(j + Offset);
1997      Args.resize(ResElts, -1);
1998    }
1999
2000    // If V is poison, make sure it ends up on the RHS of the shuffle to aid
2001    // merging subsequent shuffles into this one.
2002    if (CurIdx == 0)
2003      std::swap(V, Init);
2004    V = Builder.CreateShuffleVector(V, Init, Args, "vecinit");
2005    VIsPoisonShuffle = isa<llvm::PoisonValue>(Init);
2006    CurIdx += InitElts;
2007  }
2008
2009  // FIXME: evaluate codegen vs. shuffling against constant null vector.
2010  // Emit remaining default initializers.
2011  llvm::Type *EltTy = VType->getElementType();
2012
2013  // Emit remaining default initializers
2014  for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {
2015    Value *Idx = Builder.getInt32(CurIdx);
2016    llvm::Value *Init = llvm::Constant::getNullValue(EltTy);
2017    V = Builder.CreateInsertElement(V, Init, Idx, "vecinit");
2018  }
2019  return V;
2020}
2021
2022bool CodeGenFunction::ShouldNullCheckClassCastValue(const CastExpr *CE) {
2023  const Expr *E = CE->getSubExpr();
2024
2025  if (CE->getCastKind() == CK_UncheckedDerivedToBase)
2026    return false;
2027
2028  if (isa<CXXThisExpr>(E->IgnoreParens())) {
2029    // We always assume that 'this' is never null.
2030    return false;
2031  }
2032
2033  if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
2034    // And that glvalue casts are never null.
2035    if (ICE->isGLValue())
2036      return false;
2037  }
2038
2039  return true;
2040}
2041
2042// VisitCastExpr - Emit code for an explicit or implicit cast.  Implicit casts
2043// have to handle a more broad range of conversions than explicit casts, as they
2044// handle things like function to ptr-to-function decay etc.
2045Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
2046  Expr *E = CE->getSubExpr();
2047  QualType DestTy = CE->getType();
2048  CastKind Kind = CE->getCastKind();
2049  CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, CE);
2050
2051  // These cases are generally not written to ignore the result of
2052  // evaluating their sub-expressions, so we clear this now.
2053  bool Ignored = TestAndClearIgnoreResultAssign();
2054
2055  // Since almost all cast kinds apply to scalars, this switch doesn't have
2056  // a default case, so the compiler will warn on a missing case.  The cases
2057  // are in the same order as in the CastKind enum.
2058  switch (Kind) {
2059  case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
2060  case CK_BuiltinFnToFnPtr:
2061    llvm_unreachable("builtin functions are handled elsewhere");
2062
2063  case CK_LValueBitCast:
2064  case CK_ObjCObjectLValueCast: {
2065    Address Addr = EmitLValue(E).getAddress(CGF);
2066    Addr = Addr.withElementType(CGF.ConvertTypeForMem(DestTy));
2067    LValue LV = CGF.MakeAddrLValue(Addr, DestTy);
2068    return EmitLoadOfLValue(LV, CE->getExprLoc());
2069  }
2070
2071  case CK_LValueToRValueBitCast: {
2072    LValue SourceLVal = CGF.EmitLValue(E);
2073    Address Addr = SourceLVal.getAddress(CGF).withElementType(
2074        CGF.ConvertTypeForMem(DestTy));
2075    LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
2076    DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2077    return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2078  }
2079
2080  case CK_CPointerToObjCPointerCast:
2081  case CK_BlockPointerToObjCPointerCast:
2082  case CK_AnyPointerToBlockPointerCast:
2083  case CK_BitCast: {
2084    Value *Src = Visit(const_cast<Expr*>(E));
2085    llvm::Type *SrcTy = Src->getType();
2086    llvm::Type *DstTy = ConvertType(DestTy);
2087    assert(
2088        (!SrcTy->isPtrOrPtrVectorTy() || !DstTy->isPtrOrPtrVectorTy() ||
2089         SrcTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace()) &&
2090        "Address-space cast must be used to convert address spaces");
2091
2092    if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
2093      if (auto *PT = DestTy->getAs<PointerType>()) {
2094        CGF.EmitVTablePtrCheckForCast(
2095            PT->getPointeeType(),
2096            Address(Src,
2097                    CGF.ConvertTypeForMem(
2098                        E->getType()->castAs<PointerType>()->getPointeeType()),
2099                    CGF.getPointerAlign()),
2100            /*MayBeNull=*/true, CodeGenFunction::CFITCK_UnrelatedCast,
2101            CE->getBeginLoc());
2102      }
2103    }
2104
2105    if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2106      const QualType SrcType = E->getType();
2107
2108      if (SrcType.mayBeNotDynamicClass() && DestTy.mayBeDynamicClass()) {
2109        // Casting to pointer that could carry dynamic information (provided by
2110        // invariant.group) requires launder.
2111        Src = Builder.CreateLaunderInvariantGroup(Src);
2112      } else if (SrcType.mayBeDynamicClass() && DestTy.mayBeNotDynamicClass()) {
2113        // Casting to pointer that does not carry dynamic information (provided
2114        // by invariant.group) requires stripping it.  Note that we don't do it
2115        // if the source could not be dynamic type and destination could be
2116        // dynamic because dynamic information is already laundered.  It is
2117        // because launder(strip(src)) == launder(src), so there is no need to
2118        // add extra strip before launder.
2119        Src = Builder.CreateStripInvariantGroup(Src);
2120      }
2121    }
2122
2123    // Update heapallocsite metadata when there is an explicit pointer cast.
2124    if (auto *CI = dyn_cast<llvm::CallBase>(Src)) {
2125      if (CI->getMetadata("heapallocsite") && isa<ExplicitCastExpr>(CE) &&
2126          !isa<CastExpr>(E)) {
2127        QualType PointeeType = DestTy->getPointeeType();
2128        if (!PointeeType.isNull())
2129          CGF.getDebugInfo()->addHeapAllocSiteMetadata(CI, PointeeType,
2130                                                       CE->getExprLoc());
2131      }
2132    }
2133
2134    // If Src is a fixed vector and Dst is a scalable vector, and both have the
2135    // same element type, use the llvm.vector.insert intrinsic to perform the
2136    // bitcast.
2137    if (const auto *FixedSrc = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
2138      if (const auto *ScalableDst = dyn_cast<llvm::ScalableVectorType>(DstTy)) {
2139        // If we are casting a fixed i8 vector to a scalable 16 x i1 predicate
2140        // vector, use a vector insert and bitcast the result.
2141        bool NeedsBitCast = false;
2142        auto PredType = llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
2143        llvm::Type *OrigType = DstTy;
2144        if (ScalableDst == PredType &&
2145            FixedSrc->getElementType() == Builder.getInt8Ty()) {
2146          DstTy = llvm::ScalableVectorType::get(Builder.getInt8Ty(), 2);
2147          ScalableDst = cast<llvm::ScalableVectorType>(DstTy);
2148          NeedsBitCast = true;
2149        }
2150        if (FixedSrc->getElementType() == ScalableDst->getElementType()) {
2151          llvm::Value *UndefVec = llvm::UndefValue::get(DstTy);
2152          llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
2153          llvm::Value *Result = Builder.CreateInsertVector(
2154              DstTy, UndefVec, Src, Zero, "cast.scalable");
2155          if (NeedsBitCast)
2156            Result = Builder.CreateBitCast(Result, OrigType);
2157          return Result;
2158        }
2159      }
2160    }
2161
2162    // If Src is a scalable vector and Dst is a fixed vector, and both have the
2163    // same element type, use the llvm.vector.extract intrinsic to perform the
2164    // bitcast.
2165    if (const auto *ScalableSrc = dyn_cast<llvm::ScalableVectorType>(SrcTy)) {
2166      if (const auto *FixedDst = dyn_cast<llvm::FixedVectorType>(DstTy)) {
2167        // If we are casting a scalable 16 x i1 predicate vector to a fixed i8
2168        // vector, bitcast the source and use a vector extract.
2169        auto PredType = llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
2170        if (ScalableSrc == PredType &&
2171            FixedDst->getElementType() == Builder.getInt8Ty()) {
2172          SrcTy = llvm::ScalableVectorType::get(Builder.getInt8Ty(), 2);
2173          ScalableSrc = cast<llvm::ScalableVectorType>(SrcTy);
2174          Src = Builder.CreateBitCast(Src, SrcTy);
2175        }
2176        if (ScalableSrc->getElementType() == FixedDst->getElementType()) {
2177          llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
2178          return Builder.CreateExtractVector(DstTy, Src, Zero, "cast.fixed");
2179        }
2180      }
2181    }
2182
2183    // Perform VLAT <-> VLST bitcast through memory.
2184    // TODO: since the llvm.experimental.vector.{insert,extract} intrinsics
2185    //       require the element types of the vectors to be the same, we
2186    //       need to keep this around for bitcasts between VLAT <-> VLST where
2187    //       the element types of the vectors are not the same, until we figure
2188    //       out a better way of doing these casts.
2189    if ((isa<llvm::FixedVectorType>(SrcTy) &&
2190         isa<llvm::ScalableVectorType>(DstTy)) ||
2191        (isa<llvm::ScalableVectorType>(SrcTy) &&
2192         isa<llvm::FixedVectorType>(DstTy))) {
2193      Address Addr = CGF.CreateDefaultAlignTempAlloca(SrcTy, "saved-value");
2194      LValue LV = CGF.MakeAddrLValue(Addr, E->getType());
2195      CGF.EmitStoreOfScalar(Src, LV);
2196      Addr = Addr.withElementType(CGF.ConvertTypeForMem(DestTy));
2197      LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
2198      DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2199      return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2200    }
2201    return Builder.CreateBitCast(Src, DstTy);
2202  }
2203  case CK_AddressSpaceConversion: {
2204    Expr::EvalResult Result;
2205    if (E->EvaluateAsRValue(Result, CGF.getContext()) &&
2206        Result.Val.isNullPointer()) {
2207      // If E has side effect, it is emitted even if its final result is a
2208      // null pointer. In that case, a DCE pass should be able to
2209      // eliminate the useless instructions emitted during translating E.
2210      if (Result.HasSideEffects)
2211        Visit(E);
2212      return CGF.CGM.getNullPointer(cast<llvm::PointerType>(
2213          ConvertType(DestTy)), DestTy);
2214    }
2215    // Since target may map different address spaces in AST to the same address
2216    // space, an address space conversion may end up as a bitcast.
2217    return CGF.CGM.getTargetCodeGenInfo().performAddrSpaceCast(
2218        CGF, Visit(E), E->getType()->getPointeeType().getAddressSpace(),
2219        DestTy->getPointeeType().getAddressSpace(), ConvertType(DestTy));
2220  }
2221  case CK_AtomicToNonAtomic:
2222  case CK_NonAtomicToAtomic:
2223  case CK_UserDefinedConversion:
2224    return Visit(const_cast<Expr*>(E));
2225
2226  case CK_NoOp: {
2227    return CE->changesVolatileQualification() ? EmitLoadOfLValue(CE)
2228                                              : Visit(const_cast<Expr *>(E));
2229  }
2230
2231  case CK_BaseToDerived: {
2232    const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl();
2233    assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!");
2234
2235    Address Base = CGF.EmitPointerWithAlignment(E);
2236    Address Derived =
2237      CGF.GetAddressOfDerivedClass(Base, DerivedClassDecl,
2238                                   CE->path_begin(), CE->path_end(),
2239                                   CGF.ShouldNullCheckClassCastValue(CE));
2240
2241    // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is
2242    // performed and the object is not of the derived type.
2243    if (CGF.sanitizePerformTypeCheck())
2244      CGF.EmitTypeCheck(CodeGenFunction::TCK_DowncastPointer, CE->getExprLoc(),
2245                        Derived.getPointer(), DestTy->getPointeeType());
2246
2247    if (CGF.SanOpts.has(SanitizerKind::CFIDerivedCast))
2248      CGF.EmitVTablePtrCheckForCast(DestTy->getPointeeType(), Derived,
2249                                    /*MayBeNull=*/true,
2250                                    CodeGenFunction::CFITCK_DerivedCast,
2251                                    CE->getBeginLoc());
2252
2253    return Derived.getPointer();
2254  }
2255  case CK_UncheckedDerivedToBase:
2256  case CK_DerivedToBase: {
2257    // The EmitPointerWithAlignment path does this fine; just discard
2258    // the alignment.
2259    return CGF.EmitPointerWithAlignment(CE).getPointer();
2260  }
2261
2262  case CK_Dynamic: {
2263    Address V = CGF.EmitPointerWithAlignment(E);
2264    const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE);
2265    return CGF.EmitDynamicCast(V, DCE);
2266  }
2267
2268  case CK_ArrayToPointerDecay:
2269    return CGF.EmitArrayToPointerDecay(E).getPointer();
2270  case CK_FunctionToPointerDecay:
2271    return EmitLValue(E).getPointer(CGF);
2272
2273  case CK_NullToPointer:
2274    if (MustVisitNullValue(E))
2275      CGF.EmitIgnoredExpr(E);
2276
2277    return CGF.CGM.getNullPointer(cast<llvm::PointerType>(ConvertType(DestTy)),
2278                              DestTy);
2279
2280  case CK_NullToMemberPointer: {
2281    if (MustVisitNullValue(E))
2282      CGF.EmitIgnoredExpr(E);
2283
2284    const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>();
2285    return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
2286  }
2287
2288  case CK_ReinterpretMemberPointer:
2289  case CK_BaseToDerivedMemberPointer:
2290  case CK_DerivedToBaseMemberPointer: {
2291    Value *Src = Visit(E);
2292
2293    // Note that the AST doesn't distinguish between checked and
2294    // unchecked member pointer conversions, so we always have to
2295    // implement checked conversions here.  This is inefficient when
2296    // actual control flow may be required in order to perform the
2297    // check, which it is for data member pointers (but not member
2298    // function pointers on Itanium and ARM).
2299    return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src);
2300  }
2301
2302  case CK_ARCProduceObject:
2303    return CGF.EmitARCRetainScalarExpr(E);
2304  case CK_ARCConsumeObject:
2305    return CGF.EmitObjCConsumeObject(E->getType(), Visit(E));
2306  case CK_ARCReclaimReturnedObject:
2307    return CGF.EmitARCReclaimReturnedObject(E, /*allowUnsafe*/ Ignored);
2308  case CK_ARCExtendBlockObject:
2309    return CGF.EmitARCExtendBlockObject(E);
2310
2311  case CK_CopyAndAutoreleaseBlockObject:
2312    return CGF.EmitBlockCopyAndAutorelease(Visit(E), E->getType());
2313
2314  case CK_FloatingRealToComplex:
2315  case CK_FloatingComplexCast:
2316  case CK_IntegralRealToComplex:
2317  case CK_IntegralComplexCast:
2318  case CK_IntegralComplexToFloatingComplex:
2319  case CK_FloatingComplexToIntegralComplex:
2320  case CK_ConstructorConversion:
2321  case CK_ToUnion:
2322    llvm_unreachable("scalar cast to non-scalar value");
2323
2324  case CK_LValueToRValue:
2325    assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy));
2326    assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!");
2327    return Visit(const_cast<Expr*>(E));
2328
2329  case CK_IntegralToPointer: {
2330    Value *Src = Visit(const_cast<Expr*>(E));
2331
2332    // First, convert to the correct width so that we control the kind of
2333    // extension.
2334    auto DestLLVMTy = ConvertType(DestTy);
2335    llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DestLLVMTy);
2336    bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType();
2337    llvm::Value* IntResult =
2338      Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
2339
2340    auto *IntToPtr = Builder.CreateIntToPtr(IntResult, DestLLVMTy);
2341
2342    if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2343      // Going from integer to pointer that could be dynamic requires reloading
2344      // dynamic information from invariant.group.
2345      if (DestTy.mayBeDynamicClass())
2346        IntToPtr = Builder.CreateLaunderInvariantGroup(IntToPtr);
2347    }
2348    return IntToPtr;
2349  }
2350  case CK_PointerToIntegral: {
2351    assert(!DestTy->isBooleanType() && "bool should use PointerToBool");
2352    auto *PtrExpr = Visit(E);
2353
2354    if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2355      const QualType SrcType = E->getType();
2356
2357      // Casting to integer requires stripping dynamic information as it does
2358      // not carries it.
2359      if (SrcType.mayBeDynamicClass())
2360        PtrExpr = Builder.CreateStripInvariantGroup(PtrExpr);
2361    }
2362
2363    return Builder.CreatePtrToInt(PtrExpr, ConvertType(DestTy));
2364  }
2365  case CK_ToVoid: {
2366    CGF.EmitIgnoredExpr(E);
2367    return nullptr;
2368  }
2369  case CK_MatrixCast: {
2370    return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2371                                CE->getExprLoc());
2372  }
2373  case CK_VectorSplat: {
2374    llvm::Type *DstTy = ConvertType(DestTy);
2375    Value *Elt = Visit(const_cast<Expr *>(E));
2376    // Splat the element across to all elements
2377    llvm::ElementCount NumElements =
2378        cast<llvm::VectorType>(DstTy)->getElementCount();
2379    return Builder.CreateVectorSplat(NumElements, Elt, "splat");
2380  }
2381
2382  case CK_FixedPointCast:
2383    return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2384                                CE->getExprLoc());
2385
2386  case CK_FixedPointToBoolean:
2387    assert(E->getType()->isFixedPointType() &&
2388           "Expected src type to be fixed point type");
2389    assert(DestTy->isBooleanType() && "Expected dest type to be boolean type");
2390    return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2391                                CE->getExprLoc());
2392
2393  case CK_FixedPointToIntegral:
2394    assert(E->getType()->isFixedPointType() &&
2395           "Expected src type to be fixed point type");
2396    assert(DestTy->isIntegerType() && "Expected dest type to be an integer");
2397    return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2398                                CE->getExprLoc());
2399
2400  case CK_IntegralToFixedPoint:
2401    assert(E->getType()->isIntegerType() &&
2402           "Expected src type to be an integer");
2403    assert(DestTy->isFixedPointType() &&
2404           "Expected dest type to be fixed point type");
2405    return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2406                                CE->getExprLoc());
2407
2408  case CK_IntegralCast: {
2409    ScalarConversionOpts Opts;
2410    if (auto *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
2411      if (!ICE->isPartOfExplicitCast())
2412        Opts = ScalarConversionOpts(CGF.SanOpts);
2413    }
2414    return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2415                                CE->getExprLoc(), Opts);
2416  }
2417  case CK_IntegralToFloating:
2418  case CK_FloatingToIntegral:
2419  case CK_FloatingCast:
2420  case CK_FixedPointToFloating:
2421  case CK_FloatingToFixedPoint: {
2422    CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2423    return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2424                                CE->getExprLoc());
2425  }
2426  case CK_BooleanToSignedIntegral: {
2427    ScalarConversionOpts Opts;
2428    Opts.TreatBooleanAsSigned = true;
2429    return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2430                                CE->getExprLoc(), Opts);
2431  }
2432  case CK_IntegralToBoolean:
2433    return EmitIntToBoolConversion(Visit(E));
2434  case CK_PointerToBoolean:
2435    return EmitPointerToBoolConversion(Visit(E), E->getType());
2436  case CK_FloatingToBoolean: {
2437    CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2438    return EmitFloatToBoolConversion(Visit(E));
2439  }
2440  case CK_MemberPointerToBoolean: {
2441    llvm::Value *MemPtr = Visit(E);
2442    const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
2443    return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);
2444  }
2445
2446  case CK_FloatingComplexToReal:
2447  case CK_IntegralComplexToReal:
2448    return CGF.EmitComplexExpr(E, false, true).first;
2449
2450  case CK_FloatingComplexToBoolean:
2451  case CK_IntegralComplexToBoolean: {
2452    CodeGenFunction::ComplexPairTy V = CGF.EmitComplexExpr(E);
2453
2454    // TODO: kill this function off, inline appropriate case here
2455    return EmitComplexToScalarConversion(V, E->getType(), DestTy,
2456                                         CE->getExprLoc());
2457  }
2458
2459  case CK_ZeroToOCLOpaqueType: {
2460    assert((DestTy->isEventT() || DestTy->isQueueT() ||
2461            DestTy->isOCLIntelSubgroupAVCType()) &&
2462           "CK_ZeroToOCLEvent cast on non-event type");
2463    return llvm::Constant::getNullValue(ConvertType(DestTy));
2464  }
2465
2466  case CK_IntToOCLSampler:
2467    return CGF.CGM.createOpenCLIntToSamplerConversion(E, CGF);
2468
2469  } // end of switch
2470
2471  llvm_unreachable("unknown scalar cast");
2472}
2473
2474Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
2475  CodeGenFunction::StmtExprEvaluation eval(CGF);
2476  Address RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(),
2477                                           !E->getType()->isVoidType());
2478  if (!RetAlloca.isValid())
2479    return nullptr;
2480  return CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(RetAlloca, E->getType()),
2481                              E->getExprLoc());
2482}
2483
2484Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
2485  CodeGenFunction::RunCleanupsScope Scope(CGF);
2486  Value *V = Visit(E->getSubExpr());
2487  // Defend against dominance problems caused by jumps out of expression
2488  // evaluation through the shared cleanup block.
2489  Scope.ForceCleanup({&V});
2490  return V;
2491}
2492
2493//===----------------------------------------------------------------------===//
2494//                             Unary Operators
2495//===----------------------------------------------------------------------===//
2496
2497static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E,
2498                                           llvm::Value *InVal, bool IsInc,
2499                                           FPOptions FPFeatures) {
2500  BinOpInfo BinOp;
2501  BinOp.LHS = InVal;
2502  BinOp.RHS = llvm::ConstantInt::get(InVal->getType(), 1, false);
2503  BinOp.Ty = E->getType();
2504  BinOp.Opcode = IsInc ? BO_Add : BO_Sub;
2505  BinOp.FPFeatures = FPFeatures;
2506  BinOp.E = E;
2507  return BinOp;
2508}
2509
2510llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior(
2511    const UnaryOperator *E, llvm::Value *InVal, bool IsInc) {
2512  llvm::Value *Amount =
2513      llvm::ConstantInt::get(InVal->getType(), IsInc ? 1 : -1, true);
2514  StringRef Name = IsInc ? "inc" : "dec";
2515  switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
2516  case LangOptions::SOB_Defined:
2517    return Builder.CreateAdd(InVal, Amount, Name);
2518  case LangOptions::SOB_Undefined:
2519    if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
2520      return Builder.CreateNSWAdd(InVal, Amount, Name);
2521    [[fallthrough]];
2522  case LangOptions::SOB_Trapping:
2523    if (!E->canOverflow())
2524      return Builder.CreateNSWAdd(InVal, Amount, Name);
2525    return EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(
2526        E, InVal, IsInc, E->getFPFeaturesInEffect(CGF.getLangOpts())));
2527  }
2528  llvm_unreachable("Unknown SignedOverflowBehaviorTy");
2529}
2530
2531namespace {
2532/// Handles check and update for lastprivate conditional variables.
2533class OMPLastprivateConditionalUpdateRAII {
2534private:
2535  CodeGenFunction &CGF;
2536  const UnaryOperator *E;
2537
2538public:
2539  OMPLastprivateConditionalUpdateRAII(CodeGenFunction &CGF,
2540                                      const UnaryOperator *E)
2541      : CGF(CGF), E(E) {}
2542  ~OMPLastprivateConditionalUpdateRAII() {
2543    if (CGF.getLangOpts().OpenMP)
2544      CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(
2545          CGF, E->getSubExpr());
2546  }
2547};
2548} // namespace
2549
2550llvm::Value *
2551ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
2552                                           bool isInc, bool isPre) {
2553  OMPLastprivateConditionalUpdateRAII OMPRegion(CGF, E);
2554  QualType type = E->getSubExpr()->getType();
2555  llvm::PHINode *atomicPHI = nullptr;
2556  llvm::Value *value;
2557  llvm::Value *input;
2558
2559  int amount = (isInc ? 1 : -1);
2560  bool isSubtraction = !isInc;
2561
2562  if (const AtomicType *atomicTy = type->getAs<AtomicType>()) {
2563    type = atomicTy->getValueType();
2564    if (isInc && type->isBooleanType()) {
2565      llvm::Value *True = CGF.EmitToMemory(Builder.getTrue(), type);
2566      if (isPre) {
2567        Builder.CreateStore(True, LV.getAddress(CGF), LV.isVolatileQualified())
2568            ->setAtomic(llvm::AtomicOrdering::SequentiallyConsistent);
2569        return Builder.getTrue();
2570      }
2571      // For atomic bool increment, we just store true and return it for
2572      // preincrement, do an atomic swap with true for postincrement
2573      return Builder.CreateAtomicRMW(
2574          llvm::AtomicRMWInst::Xchg, LV.getAddress(CGF), True,
2575          llvm::AtomicOrdering::SequentiallyConsistent);
2576    }
2577    // Special case for atomic increment / decrement on integers, emit
2578    // atomicrmw instructions.  We skip this if we want to be doing overflow
2579    // checking, and fall into the slow path with the atomic cmpxchg loop.
2580    if (!type->isBooleanType() && type->isIntegerType() &&
2581        !(type->isUnsignedIntegerType() &&
2582          CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
2583        CGF.getLangOpts().getSignedOverflowBehavior() !=
2584            LangOptions::SOB_Trapping) {
2585      llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add :
2586        llvm::AtomicRMWInst::Sub;
2587      llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add :
2588        llvm::Instruction::Sub;
2589      llvm::Value *amt = CGF.EmitToMemory(
2590          llvm::ConstantInt::get(ConvertType(type), 1, true), type);
2591      llvm::Value *old =
2592          Builder.CreateAtomicRMW(aop, LV.getAddress(CGF), amt,
2593                                  llvm::AtomicOrdering::SequentiallyConsistent);
2594      return isPre ? Builder.CreateBinOp(op, old, amt) : old;
2595    }
2596    value = EmitLoadOfLValue(LV, E->getExprLoc());
2597    input = value;
2598    // For every other atomic operation, we need to emit a load-op-cmpxchg loop
2599    llvm::BasicBlock *startBB = Builder.GetInsertBlock();
2600    llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
2601    value = CGF.EmitToMemory(value, type);
2602    Builder.CreateBr(opBB);
2603    Builder.SetInsertPoint(opBB);
2604    atomicPHI = Builder.CreatePHI(value->getType(), 2);
2605    atomicPHI->addIncoming(value, startBB);
2606    value = atomicPHI;
2607  } else {
2608    value = EmitLoadOfLValue(LV, E->getExprLoc());
2609    input = value;
2610  }
2611
2612  // Special case of integer increment that we have to check first: bool++.
2613  // Due to promotion rules, we get:
2614  //   bool++ -> bool = bool + 1
2615  //          -> bool = (int)bool + 1
2616  //          -> bool = ((int)bool + 1 != 0)
2617  // An interesting aspect of this is that increment is always true.
2618  // Decrement does not have this property.
2619  if (isInc && type->isBooleanType()) {
2620    value = Builder.getTrue();
2621
2622  // Most common case by far: integer increment.
2623  } else if (type->isIntegerType()) {
2624    QualType promotedType;
2625    bool canPerformLossyDemotionCheck = false;
2626    if (CGF.getContext().isPromotableIntegerType(type)) {
2627      promotedType = CGF.getContext().getPromotedIntegerType(type);
2628      assert(promotedType != type && "Shouldn't promote to the same type.");
2629      canPerformLossyDemotionCheck = true;
2630      canPerformLossyDemotionCheck &=
2631          CGF.getContext().getCanonicalType(type) !=
2632          CGF.getContext().getCanonicalType(promotedType);
2633      canPerformLossyDemotionCheck &=
2634          PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(
2635              type, promotedType);
2636      assert((!canPerformLossyDemotionCheck ||
2637              type->isSignedIntegerOrEnumerationType() ||
2638              promotedType->isSignedIntegerOrEnumerationType() ||
2639              ConvertType(type)->getScalarSizeInBits() ==
2640                  ConvertType(promotedType)->getScalarSizeInBits()) &&
2641             "The following check expects that if we do promotion to different "
2642             "underlying canonical type, at least one of the types (either "
2643             "base or promoted) will be signed, or the bitwidths will match.");
2644    }
2645    if (CGF.SanOpts.hasOneOf(
2646            SanitizerKind::ImplicitIntegerArithmeticValueChange) &&
2647        canPerformLossyDemotionCheck) {
2648      // While `x += 1` (for `x` with width less than int) is modeled as
2649      // promotion+arithmetics+demotion, and we can catch lossy demotion with
2650      // ease; inc/dec with width less than int can't overflow because of
2651      // promotion rules, so we omit promotion+demotion, which means that we can
2652      // not catch lossy "demotion". Because we still want to catch these cases
2653      // when the sanitizer is enabled, we perform the promotion, then perform
2654      // the increment/decrement in the wider type, and finally
2655      // perform the demotion. This will catch lossy demotions.
2656
2657      value = EmitScalarConversion(value, type, promotedType, E->getExprLoc());
2658      Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
2659      value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2660      // Do pass non-default ScalarConversionOpts so that sanitizer check is
2661      // emitted.
2662      value = EmitScalarConversion(value, promotedType, type, E->getExprLoc(),
2663                                   ScalarConversionOpts(CGF.SanOpts));
2664
2665      // Note that signed integer inc/dec with width less than int can't
2666      // overflow because of promotion rules; we're just eliding a few steps
2667      // here.
2668    } else if (E->canOverflow() && type->isSignedIntegerOrEnumerationType()) {
2669      value = EmitIncDecConsiderOverflowBehavior(E, value, isInc);
2670    } else if (E->canOverflow() && type->isUnsignedIntegerType() &&
2671               CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) {
2672      value = EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(
2673          E, value, isInc, E->getFPFeaturesInEffect(CGF.getLangOpts())));
2674    } else {
2675      llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
2676      value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2677    }
2678
2679  // Next most common: pointer increment.
2680  } else if (const PointerType *ptr = type->getAs<PointerType>()) {
2681    QualType type = ptr->getPointeeType();
2682
2683    // VLA types don't have constant size.
2684    if (const VariableArrayType *vla
2685          = CGF.getContext().getAsVariableArrayType(type)) {
2686      llvm::Value *numElts = CGF.getVLASize(vla).NumElts;
2687      if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize");
2688      llvm::Type *elemTy = CGF.ConvertTypeForMem(vla->getElementType());
2689      if (CGF.getLangOpts().isSignedOverflowDefined())
2690        value = Builder.CreateGEP(elemTy, value, numElts, "vla.inc");
2691      else
2692        value = CGF.EmitCheckedInBoundsGEP(
2693            elemTy, value, numElts, /*SignedIndices=*/false, isSubtraction,
2694            E->getExprLoc(), "vla.inc");
2695
2696    // Arithmetic on function pointers (!) is just +-1.
2697    } else if (type->isFunctionType()) {
2698      llvm::Value *amt = Builder.getInt32(amount);
2699
2700      if (CGF.getLangOpts().isSignedOverflowDefined())
2701        value = Builder.CreateGEP(CGF.Int8Ty, value, amt, "incdec.funcptr");
2702      else
2703        value =
2704            CGF.EmitCheckedInBoundsGEP(CGF.Int8Ty, value, amt,
2705                                       /*SignedIndices=*/false, isSubtraction,
2706                                       E->getExprLoc(), "incdec.funcptr");
2707
2708    // For everything else, we can just do a simple increment.
2709    } else {
2710      llvm::Value *amt = Builder.getInt32(amount);
2711      llvm::Type *elemTy = CGF.ConvertTypeForMem(type);
2712      if (CGF.getLangOpts().isSignedOverflowDefined())
2713        value = Builder.CreateGEP(elemTy, value, amt, "incdec.ptr");
2714      else
2715        value = CGF.EmitCheckedInBoundsGEP(
2716            elemTy, value, amt, /*SignedIndices=*/false, isSubtraction,
2717            E->getExprLoc(), "incdec.ptr");
2718    }
2719
2720  // Vector increment/decrement.
2721  } else if (type->isVectorType()) {
2722    if (type->hasIntegerRepresentation()) {
2723      llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount);
2724
2725      value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2726    } else {
2727      value = Builder.CreateFAdd(
2728                  value,
2729                  llvm::ConstantFP::get(value->getType(), amount),
2730                  isInc ? "inc" : "dec");
2731    }
2732
2733  // Floating point.
2734  } else if (type->isRealFloatingType()) {
2735    // Add the inc/dec to the real part.
2736    llvm::Value *amt;
2737    CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
2738
2739    if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
2740      // Another special case: half FP increment should be done via float
2741      if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
2742        value = Builder.CreateCall(
2743            CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
2744                                 CGF.CGM.FloatTy),
2745            input, "incdec.conv");
2746      } else {
2747        value = Builder.CreateFPExt(input, CGF.CGM.FloatTy, "incdec.conv");
2748      }
2749    }
2750
2751    if (value->getType()->isFloatTy())
2752      amt = llvm::ConstantFP::get(VMContext,
2753                                  llvm::APFloat(static_cast<float>(amount)));
2754    else if (value->getType()->isDoubleTy())
2755      amt = llvm::ConstantFP::get(VMContext,
2756                                  llvm::APFloat(static_cast<double>(amount)));
2757    else {
2758      // Remaining types are Half, Bfloat16, LongDouble, __ibm128 or __float128.
2759      // Convert from float.
2760      llvm::APFloat F(static_cast<float>(amount));
2761      bool ignored;
2762      const llvm::fltSemantics *FS;
2763      // Don't use getFloatTypeSemantics because Half isn't
2764      // necessarily represented using the "half" LLVM type.
2765      if (value->getType()->isFP128Ty())
2766        FS = &CGF.getTarget().getFloat128Format();
2767      else if (value->getType()->isHalfTy())
2768        FS = &CGF.getTarget().getHalfFormat();
2769      else if (value->getType()->isBFloatTy())
2770        FS = &CGF.getTarget().getBFloat16Format();
2771      else if (value->getType()->isPPC_FP128Ty())
2772        FS = &CGF.getTarget().getIbm128Format();
2773      else
2774        FS = &CGF.getTarget().getLongDoubleFormat();
2775      F.convert(*FS, llvm::APFloat::rmTowardZero, &ignored);
2776      amt = llvm::ConstantFP::get(VMContext, F);
2777    }
2778    value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec");
2779
2780    if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
2781      if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
2782        value = Builder.CreateCall(
2783            CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16,
2784                                 CGF.CGM.FloatTy),
2785            value, "incdec.conv");
2786      } else {
2787        value = Builder.CreateFPTrunc(value, input->getType(), "incdec.conv");
2788      }
2789    }
2790
2791  // Fixed-point types.
2792  } else if (type->isFixedPointType()) {
2793    // Fixed-point types are tricky. In some cases, it isn't possible to
2794    // represent a 1 or a -1 in the type at all. Piggyback off of
2795    // EmitFixedPointBinOp to avoid having to reimplement saturation.
2796    BinOpInfo Info;
2797    Info.E = E;
2798    Info.Ty = E->getType();
2799    Info.Opcode = isInc ? BO_Add : BO_Sub;
2800    Info.LHS = value;
2801    Info.RHS = llvm::ConstantInt::get(value->getType(), 1, false);
2802    // If the type is signed, it's better to represent this as +(-1) or -(-1),
2803    // since -1 is guaranteed to be representable.
2804    if (type->isSignedFixedPointType()) {
2805      Info.Opcode = isInc ? BO_Sub : BO_Add;
2806      Info.RHS = Builder.CreateNeg(Info.RHS);
2807    }
2808    // Now, convert from our invented integer literal to the type of the unary
2809    // op. This will upscale and saturate if necessary. This value can become
2810    // undef in some cases.
2811    llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
2812    auto DstSema = CGF.getContext().getFixedPointSemantics(Info.Ty);
2813    Info.RHS = FPBuilder.CreateIntegerToFixed(Info.RHS, true, DstSema);
2814    value = EmitFixedPointBinOp(Info);
2815
2816  // Objective-C pointer types.
2817  } else {
2818    const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();
2819
2820    CharUnits size = CGF.getContext().getTypeSizeInChars(OPT->getObjectType());
2821    if (!isInc) size = -size;
2822    llvm::Value *sizeValue =
2823      llvm::ConstantInt::get(CGF.SizeTy, size.getQuantity());
2824
2825    if (CGF.getLangOpts().isSignedOverflowDefined())
2826      value = Builder.CreateGEP(CGF.Int8Ty, value, sizeValue, "incdec.objptr");
2827    else
2828      value = CGF.EmitCheckedInBoundsGEP(
2829          CGF.Int8Ty, value, sizeValue, /*SignedIndices=*/false, isSubtraction,
2830          E->getExprLoc(), "incdec.objptr");
2831    value = Builder.CreateBitCast(value, input->getType());
2832  }
2833
2834  if (atomicPHI) {
2835    llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
2836    llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
2837    auto Pair = CGF.EmitAtomicCompareExchange(
2838        LV, RValue::get(atomicPHI), RValue::get(value), E->getExprLoc());
2839    llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), type);
2840    llvm::Value *success = Pair.second;
2841    atomicPHI->addIncoming(old, curBlock);
2842    Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
2843    Builder.SetInsertPoint(contBB);
2844    return isPre ? value : input;
2845  }
2846
2847  // Store the updated result through the lvalue.
2848  if (LV.isBitField())
2849    CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, &value);
2850  else
2851    CGF.EmitStoreThroughLValue(RValue::get(value), LV);
2852
2853  // If this is a postinc, return the value read from memory, otherwise use the
2854  // updated value.
2855  return isPre ? value : input;
2856}
2857
2858
2859Value *ScalarExprEmitter::VisitUnaryPlus(const UnaryOperator *E,
2860                                         QualType PromotionType) {
2861  QualType promotionTy = PromotionType.isNull()
2862                             ? getPromotionType(E->getSubExpr()->getType())
2863                             : PromotionType;
2864  Value *result = VisitPlus(E, promotionTy);
2865  if (result && !promotionTy.isNull())
2866    result = EmitUnPromotedValue(result, E->getType());
2867  return result;
2868}
2869
2870Value *ScalarExprEmitter::VisitPlus(const UnaryOperator *E,
2871                                    QualType PromotionType) {
2872  // This differs from gcc, though, most likely due to a bug in gcc.
2873  TestAndClearIgnoreResultAssign();
2874  if (!PromotionType.isNull())
2875    return CGF.EmitPromotedScalarExpr(E->getSubExpr(), PromotionType);
2876  return Visit(E->getSubExpr());
2877}
2878
2879Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E,
2880                                          QualType PromotionType) {
2881  QualType promotionTy = PromotionType.isNull()
2882                             ? getPromotionType(E->getSubExpr()->getType())
2883                             : PromotionType;
2884  Value *result = VisitMinus(E, promotionTy);
2885  if (result && !promotionTy.isNull())
2886    result = EmitUnPromotedValue(result, E->getType());
2887  return result;
2888}
2889
2890Value *ScalarExprEmitter::VisitMinus(const UnaryOperator *E,
2891                                     QualType PromotionType) {
2892  TestAndClearIgnoreResultAssign();
2893  Value *Op;
2894  if (!PromotionType.isNull())
2895    Op = CGF.EmitPromotedScalarExpr(E->getSubExpr(), PromotionType);
2896  else
2897    Op = Visit(E->getSubExpr());
2898
2899  // Generate a unary FNeg for FP ops.
2900  if (Op->getType()->isFPOrFPVectorTy())
2901    return Builder.CreateFNeg(Op, "fneg");
2902
2903  // Emit unary minus with EmitSub so we handle overflow cases etc.
2904  BinOpInfo BinOp;
2905  BinOp.RHS = Op;
2906  BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType());
2907  BinOp.Ty = E->getType();
2908  BinOp.Opcode = BO_Sub;
2909  BinOp.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
2910  BinOp.E = E;
2911  return EmitSub(BinOp);
2912}
2913
2914Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
2915  TestAndClearIgnoreResultAssign();
2916  Value *Op = Visit(E->getSubExpr());
2917  return Builder.CreateNot(Op, "not");
2918}
2919
2920Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
2921  // Perform vector logical not on comparison with zero vector.
2922  if (E->getType()->isVectorType() &&
2923      E->getType()->castAs<VectorType>()->getVectorKind() ==
2924          VectorKind::Generic) {
2925    Value *Oper = Visit(E->getSubExpr());
2926    Value *Zero = llvm::Constant::getNullValue(Oper->getType());
2927    Value *Result;
2928    if (Oper->getType()->isFPOrFPVectorTy()) {
2929      CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
2930          CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
2931      Result = Builder.CreateFCmp(llvm::CmpInst::FCMP_OEQ, Oper, Zero, "cmp");
2932    } else
2933      Result = Builder.CreateICmp(llvm::CmpInst::ICMP_EQ, Oper, Zero, "cmp");
2934    return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
2935  }
2936
2937  // Compare operand to zero.
2938  Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr());
2939
2940  // Invert value.
2941  // TODO: Could dynamically modify easy computations here.  For example, if
2942  // the operand is an icmp ne, turn into icmp eq.
2943  BoolVal = Builder.CreateNot(BoolVal, "lnot");
2944
2945  // ZExt result to the expr type.
2946  return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext");
2947}
2948
2949Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
2950  // Try folding the offsetof to a constant.
2951  Expr::EvalResult EVResult;
2952  if (E->EvaluateAsInt(EVResult, CGF.getContext())) {
2953    llvm::APSInt Value = EVResult.Val.getInt();
2954    return Builder.getInt(Value);
2955  }
2956
2957  // Loop over the components of the offsetof to compute the value.
2958  unsigned n = E->getNumComponents();
2959  llvm::Type* ResultType = ConvertType(E->getType());
2960  llvm::Value* Result = llvm::Constant::getNullValue(ResultType);
2961  QualType CurrentType = E->getTypeSourceInfo()->getType();
2962  for (unsigned i = 0; i != n; ++i) {
2963    OffsetOfNode ON = E->getComponent(i);
2964    llvm::Value *Offset = nullptr;
2965    switch (ON.getKind()) {
2966    case OffsetOfNode::Array: {
2967      // Compute the index
2968      Expr *IdxExpr = E->getIndexExpr(ON.getArrayExprIndex());
2969      llvm::Value* Idx = CGF.EmitScalarExpr(IdxExpr);
2970      bool IdxSigned = IdxExpr->getType()->isSignedIntegerOrEnumerationType();
2971      Idx = Builder.CreateIntCast(Idx, ResultType, IdxSigned, "conv");
2972
2973      // Save the element type
2974      CurrentType =
2975          CGF.getContext().getAsArrayType(CurrentType)->getElementType();
2976
2977      // Compute the element size
2978      llvm::Value* ElemSize = llvm::ConstantInt::get(ResultType,
2979          CGF.getContext().getTypeSizeInChars(CurrentType).getQuantity());
2980
2981      // Multiply out to compute the result
2982      Offset = Builder.CreateMul(Idx, ElemSize);
2983      break;
2984    }
2985
2986    case OffsetOfNode::Field: {
2987      FieldDecl *MemberDecl = ON.getField();
2988      RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl();
2989      const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
2990
2991      // Compute the index of the field in its parent.
2992      unsigned i = 0;
2993      // FIXME: It would be nice if we didn't have to loop here!
2994      for (RecordDecl::field_iterator Field = RD->field_begin(),
2995                                      FieldEnd = RD->field_end();
2996           Field != FieldEnd; ++Field, ++i) {
2997        if (*Field == MemberDecl)
2998          break;
2999      }
3000      assert(i < RL.getFieldCount() && "offsetof field in wrong type");
3001
3002      // Compute the offset to the field
3003      int64_t OffsetInt = RL.getFieldOffset(i) /
3004                          CGF.getContext().getCharWidth();
3005      Offset = llvm::ConstantInt::get(ResultType, OffsetInt);
3006
3007      // Save the element type.
3008      CurrentType = MemberDecl->getType();
3009      break;
3010    }
3011
3012    case OffsetOfNode::Identifier:
3013      llvm_unreachable("dependent __builtin_offsetof");
3014
3015    case OffsetOfNode::Base: {
3016      if (ON.getBase()->isVirtual()) {
3017        CGF.ErrorUnsupported(E, "virtual base in offsetof");
3018        continue;
3019      }
3020
3021      RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl();
3022      const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
3023
3024      // Save the element type.
3025      CurrentType = ON.getBase()->getType();
3026
3027      // Compute the offset to the base.
3028      auto *BaseRT = CurrentType->castAs<RecordType>();
3029      auto *BaseRD = cast<CXXRecordDecl>(BaseRT->getDecl());
3030      CharUnits OffsetInt = RL.getBaseClassOffset(BaseRD);
3031      Offset = llvm::ConstantInt::get(ResultType, OffsetInt.getQuantity());
3032      break;
3033    }
3034    }
3035    Result = Builder.CreateAdd(Result, Offset);
3036  }
3037  return Result;
3038}
3039
3040/// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of
3041/// argument of the sizeof expression as an integer.
3042Value *
3043ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
3044                              const UnaryExprOrTypeTraitExpr *E) {
3045  QualType TypeToSize = E->getTypeOfArgument();
3046  if (auto Kind = E->getKind();
3047      Kind == UETT_SizeOf || Kind == UETT_DataSizeOf) {
3048    if (const VariableArrayType *VAT =
3049            CGF.getContext().getAsVariableArrayType(TypeToSize)) {
3050      if (E->isArgumentType()) {
3051        // sizeof(type) - make sure to emit the VLA size.
3052        CGF.EmitVariablyModifiedType(TypeToSize);
3053      } else {
3054        // C99 6.5.3.4p2: If the argument is an expression of type
3055        // VLA, it is evaluated.
3056        CGF.EmitIgnoredExpr(E->getArgumentExpr());
3057      }
3058
3059      auto VlaSize = CGF.getVLASize(VAT);
3060      llvm::Value *size = VlaSize.NumElts;
3061
3062      // Scale the number of non-VLA elements by the non-VLA element size.
3063      CharUnits eltSize = CGF.getContext().getTypeSizeInChars(VlaSize.Type);
3064      if (!eltSize.isOne())
3065        size = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), size);
3066
3067      return size;
3068    }
3069  } else if (E->getKind() == UETT_OpenMPRequiredSimdAlign) {
3070    auto Alignment =
3071        CGF.getContext()
3072            .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign(
3073                E->getTypeOfArgument()->getPointeeType()))
3074            .getQuantity();
3075    return llvm::ConstantInt::get(CGF.SizeTy, Alignment);
3076  } else if (E->getKind() == UETT_VectorElements) {
3077    auto *VecTy = cast<llvm::VectorType>(ConvertType(E->getTypeOfArgument()));
3078    return Builder.CreateElementCount(CGF.SizeTy, VecTy->getElementCount());
3079  }
3080
3081  // If this isn't sizeof(vla), the result must be constant; use the constant
3082  // folding logic so we don't have to duplicate it here.
3083  return Builder.getInt(E->EvaluateKnownConstInt(CGF.getContext()));
3084}
3085
3086Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E,
3087                                         QualType PromotionType) {
3088  QualType promotionTy = PromotionType.isNull()
3089                             ? getPromotionType(E->getSubExpr()->getType())
3090                             : PromotionType;
3091  Value *result = VisitReal(E, promotionTy);
3092  if (result && !promotionTy.isNull())
3093    result = EmitUnPromotedValue(result, E->getType());
3094  return result;
3095}
3096
3097Value *ScalarExprEmitter::VisitReal(const UnaryOperator *E,
3098                                    QualType PromotionType) {
3099  Expr *Op = E->getSubExpr();
3100  if (Op->getType()->isAnyComplexType()) {
3101    // If it's an l-value, load through the appropriate subobject l-value.
3102    // Note that we have to ask E because Op might be an l-value that
3103    // this won't work for, e.g. an Obj-C property.
3104    if (E->isGLValue())  {
3105      if (!PromotionType.isNull()) {
3106        CodeGenFunction::ComplexPairTy result = CGF.EmitComplexExpr(
3107            Op, /*IgnoreReal*/ IgnoreResultAssign, /*IgnoreImag*/ true);
3108        if (result.first)
3109          result.first = CGF.EmitPromotedValue(result, PromotionType).first;
3110        return result.first;
3111      } else {
3112        return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getExprLoc())
3113            .getScalarVal();
3114      }
3115    }
3116    // Otherwise, calculate and project.
3117    return CGF.EmitComplexExpr(Op, false, true).first;
3118  }
3119
3120  if (!PromotionType.isNull())
3121    return CGF.EmitPromotedScalarExpr(Op, PromotionType);
3122  return Visit(Op);
3123}
3124
3125Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E,
3126                                         QualType PromotionType) {
3127  QualType promotionTy = PromotionType.isNull()
3128                             ? getPromotionType(E->getSubExpr()->getType())
3129                             : PromotionType;
3130  Value *result = VisitImag(E, promotionTy);
3131  if (result && !promotionTy.isNull())
3132    result = EmitUnPromotedValue(result, E->getType());
3133  return result;
3134}
3135
3136Value *ScalarExprEmitter::VisitImag(const UnaryOperator *E,
3137                                    QualType PromotionType) {
3138  Expr *Op = E->getSubExpr();
3139  if (Op->getType()->isAnyComplexType()) {
3140    // If it's an l-value, load through the appropriate subobject l-value.
3141    // Note that we have to ask E because Op might be an l-value that
3142    // this won't work for, e.g. an Obj-C property.
3143    if (Op->isGLValue()) {
3144      if (!PromotionType.isNull()) {
3145        CodeGenFunction::ComplexPairTy result = CGF.EmitComplexExpr(
3146            Op, /*IgnoreReal*/ true, /*IgnoreImag*/ IgnoreResultAssign);
3147        if (result.second)
3148          result.second = CGF.EmitPromotedValue(result, PromotionType).second;
3149        return result.second;
3150      } else {
3151        return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getExprLoc())
3152            .getScalarVal();
3153      }
3154    }
3155    // Otherwise, calculate and project.
3156    return CGF.EmitComplexExpr(Op, true, false).second;
3157  }
3158
3159  // __imag on a scalar returns zero.  Emit the subexpr to ensure side
3160  // effects are evaluated, but not the actual value.
3161  if (Op->isGLValue())
3162    CGF.EmitLValue(Op);
3163  else if (!PromotionType.isNull())
3164    CGF.EmitPromotedScalarExpr(Op, PromotionType);
3165  else
3166    CGF.EmitScalarExpr(Op, true);
3167  if (!PromotionType.isNull())
3168    return llvm::Constant::getNullValue(ConvertType(PromotionType));
3169  return llvm::Constant::getNullValue(ConvertType(E->getType()));
3170}
3171
3172//===----------------------------------------------------------------------===//
3173//                           Binary Operators
3174//===----------------------------------------------------------------------===//
3175
3176Value *ScalarExprEmitter::EmitPromotedValue(Value *result,
3177                                            QualType PromotionType) {
3178  return CGF.Builder.CreateFPExt(result, ConvertType(PromotionType), "ext");
3179}
3180
3181Value *ScalarExprEmitter::EmitUnPromotedValue(Value *result,
3182                                              QualType ExprType) {
3183  return CGF.Builder.CreateFPTrunc(result, ConvertType(ExprType), "unpromotion");
3184}
3185
3186Value *ScalarExprEmitter::EmitPromoted(const Expr *E, QualType PromotionType) {
3187  E = E->IgnoreParens();
3188  if (auto BO = dyn_cast<BinaryOperator>(E)) {
3189    switch (BO->getOpcode()) {
3190#define HANDLE_BINOP(OP)                                                       \
3191  case BO_##OP:                                                                \
3192    return Emit##OP(EmitBinOps(BO, PromotionType));
3193      HANDLE_BINOP(Add)
3194      HANDLE_BINOP(Sub)
3195      HANDLE_BINOP(Mul)
3196      HANDLE_BINOP(Div)
3197#undef HANDLE_BINOP
3198    default:
3199      break;
3200    }
3201  } else if (auto UO = dyn_cast<UnaryOperator>(E)) {
3202    switch (UO->getOpcode()) {
3203    case UO_Imag:
3204      return VisitImag(UO, PromotionType);
3205    case UO_Real:
3206      return VisitReal(UO, PromotionType);
3207    case UO_Minus:
3208      return VisitMinus(UO, PromotionType);
3209    case UO_Plus:
3210      return VisitPlus(UO, PromotionType);
3211    default:
3212      break;
3213    }
3214  }
3215  auto result = Visit(const_cast<Expr *>(E));
3216  if (result) {
3217    if (!PromotionType.isNull())
3218      return EmitPromotedValue(result, PromotionType);
3219    else
3220      return EmitUnPromotedValue(result, E->getType());
3221  }
3222  return result;
3223}
3224
3225BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E,
3226                                        QualType PromotionType) {
3227  TestAndClearIgnoreResultAssign();
3228  BinOpInfo Result;
3229  Result.LHS = CGF.EmitPromotedScalarExpr(E->getLHS(), PromotionType);
3230  Result.RHS = CGF.EmitPromotedScalarExpr(E->getRHS(), PromotionType);
3231  if (!PromotionType.isNull())
3232    Result.Ty = PromotionType;
3233  else
3234    Result.Ty  = E->getType();
3235  Result.Opcode = E->getOpcode();
3236  Result.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
3237  Result.E = E;
3238  return Result;
3239}
3240
3241LValue ScalarExprEmitter::EmitCompoundAssignLValue(
3242                                              const CompoundAssignOperator *E,
3243                        Value *(ScalarExprEmitter::*Func)(const BinOpInfo &),
3244                                                   Value *&Result) {
3245  QualType LHSTy = E->getLHS()->getType();
3246  BinOpInfo OpInfo;
3247
3248  if (E->getComputationResultType()->isAnyComplexType())
3249    return CGF.EmitScalarCompoundAssignWithComplex(E, Result);
3250
3251  // Emit the RHS first.  __block variables need to have the rhs evaluated
3252  // first, plus this should improve codegen a little.
3253
3254  QualType PromotionTypeCR;
3255  PromotionTypeCR = getPromotionType(E->getComputationResultType());
3256  if (PromotionTypeCR.isNull())
3257      PromotionTypeCR = E->getComputationResultType();
3258  QualType PromotionTypeLHS = getPromotionType(E->getComputationLHSType());
3259  QualType PromotionTypeRHS = getPromotionType(E->getRHS()->getType());
3260  if (!PromotionTypeRHS.isNull())
3261    OpInfo.RHS = CGF.EmitPromotedScalarExpr(E->getRHS(), PromotionTypeRHS);
3262  else
3263    OpInfo.RHS = Visit(E->getRHS());
3264  OpInfo.Ty = PromotionTypeCR;
3265  OpInfo.Opcode = E->getOpcode();
3266  OpInfo.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
3267  OpInfo.E = E;
3268  // Load/convert the LHS.
3269  LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
3270
3271  llvm::PHINode *atomicPHI = nullptr;
3272  if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) {
3273    QualType type = atomicTy->getValueType();
3274    if (!type->isBooleanType() && type->isIntegerType() &&
3275        !(type->isUnsignedIntegerType() &&
3276          CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
3277        CGF.getLangOpts().getSignedOverflowBehavior() !=
3278            LangOptions::SOB_Trapping) {
3279      llvm::AtomicRMWInst::BinOp AtomicOp = llvm::AtomicRMWInst::BAD_BINOP;
3280      llvm::Instruction::BinaryOps Op;
3281      switch (OpInfo.Opcode) {
3282        // We don't have atomicrmw operands for *, %, /, <<, >>
3283        case BO_MulAssign: case BO_DivAssign:
3284        case BO_RemAssign:
3285        case BO_ShlAssign:
3286        case BO_ShrAssign:
3287          break;
3288        case BO_AddAssign:
3289          AtomicOp = llvm::AtomicRMWInst::Add;
3290          Op = llvm::Instruction::Add;
3291          break;
3292        case BO_SubAssign:
3293          AtomicOp = llvm::AtomicRMWInst::Sub;
3294          Op = llvm::Instruction::Sub;
3295          break;
3296        case BO_AndAssign:
3297          AtomicOp = llvm::AtomicRMWInst::And;
3298          Op = llvm::Instruction::And;
3299          break;
3300        case BO_XorAssign:
3301          AtomicOp = llvm::AtomicRMWInst::Xor;
3302          Op = llvm::Instruction::Xor;
3303          break;
3304        case BO_OrAssign:
3305          AtomicOp = llvm::AtomicRMWInst::Or;
3306          Op = llvm::Instruction::Or;
3307          break;
3308        default:
3309          llvm_unreachable("Invalid compound assignment type");
3310      }
3311      if (AtomicOp != llvm::AtomicRMWInst::BAD_BINOP) {
3312        llvm::Value *Amt = CGF.EmitToMemory(
3313            EmitScalarConversion(OpInfo.RHS, E->getRHS()->getType(), LHSTy,
3314                                 E->getExprLoc()),
3315            LHSTy);
3316        Value *OldVal = Builder.CreateAtomicRMW(
3317            AtomicOp, LHSLV.getAddress(CGF), Amt,
3318            llvm::AtomicOrdering::SequentiallyConsistent);
3319
3320        // Since operation is atomic, the result type is guaranteed to be the
3321        // same as the input in LLVM terms.
3322        Result = Builder.CreateBinOp(Op, OldVal, Amt);
3323        return LHSLV;
3324      }
3325    }
3326    // FIXME: For floating point types, we should be saving and restoring the
3327    // floating point environment in the loop.
3328    llvm::BasicBlock *startBB = Builder.GetInsertBlock();
3329    llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
3330    OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
3331    OpInfo.LHS = CGF.EmitToMemory(OpInfo.LHS, type);
3332    Builder.CreateBr(opBB);
3333    Builder.SetInsertPoint(opBB);
3334    atomicPHI = Builder.CreatePHI(OpInfo.LHS->getType(), 2);
3335    atomicPHI->addIncoming(OpInfo.LHS, startBB);
3336    OpInfo.LHS = atomicPHI;
3337  }
3338  else
3339    OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
3340
3341  CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, OpInfo.FPFeatures);
3342  SourceLocation Loc = E->getExprLoc();
3343  if (!PromotionTypeLHS.isNull())
3344    OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy, PromotionTypeLHS,
3345                                      E->getExprLoc());
3346  else
3347    OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy,
3348                                      E->getComputationLHSType(), Loc);
3349
3350  // Expand the binary operator.
3351  Result = (this->*Func)(OpInfo);
3352
3353  // Convert the result back to the LHS type,
3354  // potentially with Implicit Conversion sanitizer check.
3355  Result = EmitScalarConversion(Result, PromotionTypeCR, LHSTy, Loc,
3356                                ScalarConversionOpts(CGF.SanOpts));
3357
3358  if (atomicPHI) {
3359    llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
3360    llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
3361    auto Pair = CGF.EmitAtomicCompareExchange(
3362        LHSLV, RValue::get(atomicPHI), RValue::get(Result), E->getExprLoc());
3363    llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), LHSTy);
3364    llvm::Value *success = Pair.second;
3365    atomicPHI->addIncoming(old, curBlock);
3366    Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
3367    Builder.SetInsertPoint(contBB);
3368    return LHSLV;
3369  }
3370
3371  // Store the result value into the LHS lvalue. Bit-fields are handled
3372  // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
3373  // 'An assignment expression has the value of the left operand after the
3374  // assignment...'.
3375  if (LHSLV.isBitField())
3376    CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, &Result);
3377  else
3378    CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV);
3379
3380  if (CGF.getLangOpts().OpenMP)
3381    CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF,
3382                                                                  E->getLHS());
3383  return LHSLV;
3384}
3385
3386Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
3387                      Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
3388  bool Ignore = TestAndClearIgnoreResultAssign();
3389  Value *RHS = nullptr;
3390  LValue LHS = EmitCompoundAssignLValue(E, Func, RHS);
3391
3392  // If the result is clearly ignored, return now.
3393  if (Ignore)
3394    return nullptr;
3395
3396  // The result of an assignment in C is the assigned r-value.
3397  if (!CGF.getLangOpts().CPlusPlus)
3398    return RHS;
3399
3400  // If the lvalue is non-volatile, return the computed value of the assignment.
3401  if (!LHS.isVolatileQualified())
3402    return RHS;
3403
3404  // Otherwise, reload the value.
3405  return EmitLoadOfLValue(LHS, E->getExprLoc());
3406}
3407
3408void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
3409    const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) {
3410  SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;
3411
3412  if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) {
3413    Checks.push_back(std::make_pair(Builder.CreateICmpNE(Ops.RHS, Zero),
3414                                    SanitizerKind::IntegerDivideByZero));
3415  }
3416
3417  const auto *BO = cast<BinaryOperator>(Ops.E);
3418  if (CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow) &&
3419      Ops.Ty->hasSignedIntegerRepresentation() &&
3420      !IsWidenedIntegerOp(CGF.getContext(), BO->getLHS()) &&
3421      Ops.mayHaveIntegerOverflow()) {
3422    llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
3423
3424    llvm::Value *IntMin =
3425      Builder.getInt(llvm::APInt::getSignedMinValue(Ty->getBitWidth()));
3426    llvm::Value *NegOne = llvm::Constant::getAllOnesValue(Ty);
3427
3428    llvm::Value *LHSCmp = Builder.CreateICmpNE(Ops.LHS, IntMin);
3429    llvm::Value *RHSCmp = Builder.CreateICmpNE(Ops.RHS, NegOne);
3430    llvm::Value *NotOverflow = Builder.CreateOr(LHSCmp, RHSCmp, "or");
3431    Checks.push_back(
3432        std::make_pair(NotOverflow, SanitizerKind::SignedIntegerOverflow));
3433  }
3434
3435  if (Checks.size() > 0)
3436    EmitBinOpCheck(Checks, Ops);
3437}
3438
3439Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
3440  {
3441    CodeGenFunction::SanitizerScope SanScope(&CGF);
3442    if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
3443         CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
3444        Ops.Ty->isIntegerType() &&
3445        (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
3446      llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
3447      EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true);
3448    } else if (CGF.SanOpts.has(SanitizerKind::FloatDivideByZero) &&
3449               Ops.Ty->isRealFloatingType() &&
3450               Ops.mayHaveFloatDivisionByZero()) {
3451      llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
3452      llvm::Value *NonZero = Builder.CreateFCmpUNE(Ops.RHS, Zero);
3453      EmitBinOpCheck(std::make_pair(NonZero, SanitizerKind::FloatDivideByZero),
3454                     Ops);
3455    }
3456  }
3457
3458  if (Ops.Ty->isConstantMatrixType()) {
3459    llvm::MatrixBuilder MB(Builder);
3460    // We need to check the types of the operands of the operator to get the
3461    // correct matrix dimensions.
3462    auto *BO = cast<BinaryOperator>(Ops.E);
3463    (void)BO;
3464    assert(
3465        isa<ConstantMatrixType>(BO->getLHS()->getType().getCanonicalType()) &&
3466        "first operand must be a matrix");
3467    assert(BO->getRHS()->getType().getCanonicalType()->isArithmeticType() &&
3468           "second operand must be an arithmetic type");
3469    CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
3470    return MB.CreateScalarDiv(Ops.LHS, Ops.RHS,
3471                              Ops.Ty->hasUnsignedIntegerRepresentation());
3472  }
3473
3474  if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
3475    llvm::Value *Val;
3476    CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
3477    Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
3478    CGF.SetDivFPAccuracy(Val);
3479    return Val;
3480  }
3481  else if (Ops.isFixedPointOp())
3482    return EmitFixedPointBinOp(Ops);
3483  else if (Ops.Ty->hasUnsignedIntegerRepresentation())
3484    return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div");
3485  else
3486    return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div");
3487}
3488
3489Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
3490  // Rem in C can't be a floating point type: C99 6.5.5p2.
3491  if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
3492       CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
3493      Ops.Ty->isIntegerType() &&
3494      (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
3495    CodeGenFunction::SanitizerScope SanScope(&CGF);
3496    llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
3497    EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false);
3498  }
3499
3500  if (Ops.Ty->hasUnsignedIntegerRepresentation())
3501    return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem");
3502  else
3503    return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem");
3504}
3505
3506Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
3507  unsigned IID;
3508  unsigned OpID = 0;
3509  SanitizerHandler OverflowKind;
3510
3511  bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType();
3512  switch (Ops.Opcode) {
3513  case BO_Add:
3514  case BO_AddAssign:
3515    OpID = 1;
3516    IID = isSigned ? llvm::Intrinsic::sadd_with_overflow :
3517                     llvm::Intrinsic::uadd_with_overflow;
3518    OverflowKind = SanitizerHandler::AddOverflow;
3519    break;
3520  case BO_Sub:
3521  case BO_SubAssign:
3522    OpID = 2;
3523    IID = isSigned ? llvm::Intrinsic::ssub_with_overflow :
3524                     llvm::Intrinsic::usub_with_overflow;
3525    OverflowKind = SanitizerHandler::SubOverflow;
3526    break;
3527  case BO_Mul:
3528  case BO_MulAssign:
3529    OpID = 3;
3530    IID = isSigned ? llvm::Intrinsic::smul_with_overflow :
3531                     llvm::Intrinsic::umul_with_overflow;
3532    OverflowKind = SanitizerHandler::MulOverflow;
3533    break;
3534  default:
3535    llvm_unreachable("Unsupported operation for overflow detection");
3536  }
3537  OpID <<= 1;
3538  if (isSigned)
3539    OpID |= 1;
3540
3541  CodeGenFunction::SanitizerScope SanScope(&CGF);
3542  llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty);
3543
3544  llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, opTy);
3545
3546  Value *resultAndOverflow = Builder.CreateCall(intrinsic, {Ops.LHS, Ops.RHS});
3547  Value *result = Builder.CreateExtractValue(resultAndOverflow, 0);
3548  Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1);
3549
3550  // Handle overflow with llvm.trap if no custom handler has been specified.
3551  const std::string *handlerName =
3552    &CGF.getLangOpts().OverflowHandler;
3553  if (handlerName->empty()) {
3554    // If the signed-integer-overflow sanitizer is enabled, emit a call to its
3555    // runtime. Otherwise, this is a -ftrapv check, so just emit a trap.
3556    if (!isSigned || CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) {
3557      llvm::Value *NotOverflow = Builder.CreateNot(overflow);
3558      SanitizerMask Kind = isSigned ? SanitizerKind::SignedIntegerOverflow
3559                              : SanitizerKind::UnsignedIntegerOverflow;
3560      EmitBinOpCheck(std::make_pair(NotOverflow, Kind), Ops);
3561    } else
3562      CGF.EmitTrapCheck(Builder.CreateNot(overflow), OverflowKind);
3563    return result;
3564  }
3565
3566  // Branch in case of overflow.
3567  llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
3568  llvm::BasicBlock *continueBB =
3569      CGF.createBasicBlock("nooverflow", CGF.CurFn, initialBB->getNextNode());
3570  llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
3571
3572  Builder.CreateCondBr(overflow, overflowBB, continueBB);
3573
3574  // If an overflow handler is set, then we want to call it and then use its
3575  // result, if it returns.
3576  Builder.SetInsertPoint(overflowBB);
3577
3578  // Get the overflow handler.
3579  llvm::Type *Int8Ty = CGF.Int8Ty;
3580  llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty };
3581  llvm::FunctionType *handlerTy =
3582      llvm::FunctionType::get(CGF.Int64Ty, argTypes, true);
3583  llvm::FunctionCallee handler =
3584      CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName);
3585
3586  // Sign extend the args to 64-bit, so that we can use the same handler for
3587  // all types of overflow.
3588  llvm::Value *lhs = Builder.CreateSExt(Ops.LHS, CGF.Int64Ty);
3589  llvm::Value *rhs = Builder.CreateSExt(Ops.RHS, CGF.Int64Ty);
3590
3591  // Call the handler with the two arguments, the operation, and the size of
3592  // the result.
3593  llvm::Value *handlerArgs[] = {
3594    lhs,
3595    rhs,
3596    Builder.getInt8(OpID),
3597    Builder.getInt8(cast<llvm::IntegerType>(opTy)->getBitWidth())
3598  };
3599  llvm::Value *handlerResult =
3600    CGF.EmitNounwindRuntimeCall(handler, handlerArgs);
3601
3602  // Truncate the result back to the desired size.
3603  handlerResult = Builder.CreateTrunc(handlerResult, opTy);
3604  Builder.CreateBr(continueBB);
3605
3606  Builder.SetInsertPoint(continueBB);
3607  llvm::PHINode *phi = Builder.CreatePHI(opTy, 2);
3608  phi->addIncoming(result, initialBB);
3609  phi->addIncoming(handlerResult, overflowBB);
3610
3611  return phi;
3612}
3613
3614/// Emit pointer + index arithmetic.
3615static Value *emitPointerArithmetic(CodeGenFunction &CGF,
3616                                    const BinOpInfo &op,
3617                                    bool isSubtraction) {
3618  // Must have binary (not unary) expr here.  Unary pointer
3619  // increment/decrement doesn't use this path.
3620  const BinaryOperator *expr = cast<BinaryOperator>(op.E);
3621
3622  Value *pointer = op.LHS;
3623  Expr *pointerOperand = expr->getLHS();
3624  Value *index = op.RHS;
3625  Expr *indexOperand = expr->getRHS();
3626
3627  // In a subtraction, the LHS is always the pointer.
3628  if (!isSubtraction && !pointer->getType()->isPointerTy()) {
3629    std::swap(pointer, index);
3630    std::swap(pointerOperand, indexOperand);
3631  }
3632
3633  bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType();
3634
3635  unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth();
3636  auto &DL = CGF.CGM.getDataLayout();
3637  auto PtrTy = cast<llvm::PointerType>(pointer->getType());
3638
3639  // Some versions of glibc and gcc use idioms (particularly in their malloc
3640  // routines) that add a pointer-sized integer (known to be a pointer value)
3641  // to a null pointer in order to cast the value back to an integer or as
3642  // part of a pointer alignment algorithm.  This is undefined behavior, but
3643  // we'd like to be able to compile programs that use it.
3644  //
3645  // Normally, we'd generate a GEP with a null-pointer base here in response
3646  // to that code, but it's also UB to dereference a pointer created that
3647  // way.  Instead (as an acknowledged hack to tolerate the idiom) we will
3648  // generate a direct cast of the integer value to a pointer.
3649  //
3650  // The idiom (p = nullptr + N) is not met if any of the following are true:
3651  //
3652  //   The operation is subtraction.
3653  //   The index is not pointer-sized.
3654  //   The pointer type is not byte-sized.
3655  //
3656  if (BinaryOperator::isNullPointerArithmeticExtension(CGF.getContext(),
3657                                                       op.Opcode,
3658                                                       expr->getLHS(),
3659                                                       expr->getRHS()))
3660    return CGF.Builder.CreateIntToPtr(index, pointer->getType());
3661
3662  if (width != DL.getIndexTypeSizeInBits(PtrTy)) {
3663    // Zero-extend or sign-extend the pointer value according to
3664    // whether the index is signed or not.
3665    index = CGF.Builder.CreateIntCast(index, DL.getIndexType(PtrTy), isSigned,
3666                                      "idx.ext");
3667  }
3668
3669  // If this is subtraction, negate the index.
3670  if (isSubtraction)
3671    index = CGF.Builder.CreateNeg(index, "idx.neg");
3672
3673  if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
3674    CGF.EmitBoundsCheck(op.E, pointerOperand, index, indexOperand->getType(),
3675                        /*Accessed*/ false);
3676
3677  const PointerType *pointerType
3678    = pointerOperand->getType()->getAs<PointerType>();
3679  if (!pointerType) {
3680    QualType objectType = pointerOperand->getType()
3681                                        ->castAs<ObjCObjectPointerType>()
3682                                        ->getPointeeType();
3683    llvm::Value *objectSize
3684      = CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(objectType));
3685
3686    index = CGF.Builder.CreateMul(index, objectSize);
3687
3688    Value *result =
3689        CGF.Builder.CreateGEP(CGF.Int8Ty, pointer, index, "add.ptr");
3690    return CGF.Builder.CreateBitCast(result, pointer->getType());
3691  }
3692
3693  QualType elementType = pointerType->getPointeeType();
3694  if (const VariableArrayType *vla
3695        = CGF.getContext().getAsVariableArrayType(elementType)) {
3696    // The element count here is the total number of non-VLA elements.
3697    llvm::Value *numElements = CGF.getVLASize(vla).NumElts;
3698
3699    // Effectively, the multiply by the VLA size is part of the GEP.
3700    // GEP indexes are signed, and scaling an index isn't permitted to
3701    // signed-overflow, so we use the same semantics for our explicit
3702    // multiply.  We suppress this if overflow is not undefined behavior.
3703    llvm::Type *elemTy = CGF.ConvertTypeForMem(vla->getElementType());
3704    if (CGF.getLangOpts().isSignedOverflowDefined()) {
3705      index = CGF.Builder.CreateMul(index, numElements, "vla.index");
3706      pointer = CGF.Builder.CreateGEP(elemTy, pointer, index, "add.ptr");
3707    } else {
3708      index = CGF.Builder.CreateNSWMul(index, numElements, "vla.index");
3709      pointer = CGF.EmitCheckedInBoundsGEP(
3710          elemTy, pointer, index, isSigned, isSubtraction, op.E->getExprLoc(),
3711          "add.ptr");
3712    }
3713    return pointer;
3714  }
3715
3716  // Explicitly handle GNU void* and function pointer arithmetic extensions. The
3717  // GNU void* casts amount to no-ops since our void* type is i8*, but this is
3718  // future proof.
3719  llvm::Type *elemTy;
3720  if (elementType->isVoidType() || elementType->isFunctionType())
3721    elemTy = CGF.Int8Ty;
3722  else
3723    elemTy = CGF.ConvertTypeForMem(elementType);
3724
3725  if (CGF.getLangOpts().isSignedOverflowDefined())
3726    return CGF.Builder.CreateGEP(elemTy, pointer, index, "add.ptr");
3727
3728  return CGF.EmitCheckedInBoundsGEP(
3729      elemTy, pointer, index, isSigned, isSubtraction, op.E->getExprLoc(),
3730      "add.ptr");
3731}
3732
3733// Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and
3734// Addend. Use negMul and negAdd to negate the first operand of the Mul or
3735// the add operand respectively. This allows fmuladd to represent a*b-c, or
3736// c-a*b. Patterns in LLVM should catch the negated forms and translate them to
3737// efficient operations.
3738static Value* buildFMulAdd(llvm::Instruction *MulOp, Value *Addend,
3739                           const CodeGenFunction &CGF, CGBuilderTy &Builder,
3740                           bool negMul, bool negAdd) {
3741  Value *MulOp0 = MulOp->getOperand(0);
3742  Value *MulOp1 = MulOp->getOperand(1);
3743  if (negMul)
3744    MulOp0 = Builder.CreateFNeg(MulOp0, "neg");
3745  if (negAdd)
3746    Addend = Builder.CreateFNeg(Addend, "neg");
3747
3748  Value *FMulAdd = nullptr;
3749  if (Builder.getIsFPConstrained()) {
3750    assert(isa<llvm::ConstrainedFPIntrinsic>(MulOp) &&
3751           "Only constrained operation should be created when Builder is in FP "
3752           "constrained mode");
3753    FMulAdd = Builder.CreateConstrainedFPCall(
3754        CGF.CGM.getIntrinsic(llvm::Intrinsic::experimental_constrained_fmuladd,
3755                             Addend->getType()),
3756        {MulOp0, MulOp1, Addend});
3757  } else {
3758    FMulAdd = Builder.CreateCall(
3759        CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()),
3760        {MulOp0, MulOp1, Addend});
3761  }
3762  MulOp->eraseFromParent();
3763
3764  return FMulAdd;
3765}
3766
3767// Check whether it would be legal to emit an fmuladd intrinsic call to
3768// represent op and if so, build the fmuladd.
3769//
3770// Checks that (a) the operation is fusable, and (b) -ffp-contract=on.
3771// Does NOT check the type of the operation - it's assumed that this function
3772// will be called from contexts where it's known that the type is contractable.
3773static Value* tryEmitFMulAdd(const BinOpInfo &op,
3774                         const CodeGenFunction &CGF, CGBuilderTy &Builder,
3775                         bool isSub=false) {
3776
3777  assert((op.Opcode == BO_Add || op.Opcode == BO_AddAssign ||
3778          op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) &&
3779         "Only fadd/fsub can be the root of an fmuladd.");
3780
3781  // Check whether this op is marked as fusable.
3782  if (!op.FPFeatures.allowFPContractWithinStatement())
3783    return nullptr;
3784
3785  Value *LHS = op.LHS;
3786  Value *RHS = op.RHS;
3787
3788  // Peek through fneg to look for fmul. Make sure fneg has no users, and that
3789  // it is the only use of its operand.
3790  bool NegLHS = false;
3791  if (auto *LHSUnOp = dyn_cast<llvm::UnaryOperator>(LHS)) {
3792    if (LHSUnOp->getOpcode() == llvm::Instruction::FNeg &&
3793        LHSUnOp->use_empty() && LHSUnOp->getOperand(0)->hasOneUse()) {
3794      LHS = LHSUnOp->getOperand(0);
3795      NegLHS = true;
3796    }
3797  }
3798
3799  bool NegRHS = false;
3800  if (auto *RHSUnOp = dyn_cast<llvm::UnaryOperator>(RHS)) {
3801    if (RHSUnOp->getOpcode() == llvm::Instruction::FNeg &&
3802        RHSUnOp->use_empty() && RHSUnOp->getOperand(0)->hasOneUse()) {
3803      RHS = RHSUnOp->getOperand(0);
3804      NegRHS = true;
3805    }
3806  }
3807
3808  // We have a potentially fusable op. Look for a mul on one of the operands.
3809  // Also, make sure that the mul result isn't used directly. In that case,
3810  // there's no point creating a muladd operation.
3811  if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(LHS)) {
3812    if (LHSBinOp->getOpcode() == llvm::Instruction::FMul &&
3813        (LHSBinOp->use_empty() || NegLHS)) {
3814      // If we looked through fneg, erase it.
3815      if (NegLHS)
3816        cast<llvm::Instruction>(op.LHS)->eraseFromParent();
3817      return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, NegLHS, isSub);
3818    }
3819  }
3820  if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(RHS)) {
3821    if (RHSBinOp->getOpcode() == llvm::Instruction::FMul &&
3822        (RHSBinOp->use_empty() || NegRHS)) {
3823      // If we looked through fneg, erase it.
3824      if (NegRHS)
3825        cast<llvm::Instruction>(op.RHS)->eraseFromParent();
3826      return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub ^ NegRHS, false);
3827    }
3828  }
3829
3830  if (auto *LHSBinOp = dyn_cast<llvm::CallBase>(LHS)) {
3831    if (LHSBinOp->getIntrinsicID() ==
3832            llvm::Intrinsic::experimental_constrained_fmul &&
3833        (LHSBinOp->use_empty() || NegLHS)) {
3834      // If we looked through fneg, erase it.
3835      if (NegLHS)
3836        cast<llvm::Instruction>(op.LHS)->eraseFromParent();
3837      return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, NegLHS, isSub);
3838    }
3839  }
3840  if (auto *RHSBinOp = dyn_cast<llvm::CallBase>(RHS)) {
3841    if (RHSBinOp->getIntrinsicID() ==
3842            llvm::Intrinsic::experimental_constrained_fmul &&
3843        (RHSBinOp->use_empty() || NegRHS)) {
3844      // If we looked through fneg, erase it.
3845      if (NegRHS)
3846        cast<llvm::Instruction>(op.RHS)->eraseFromParent();
3847      return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub ^ NegRHS, false);
3848    }
3849  }
3850
3851  return nullptr;
3852}
3853
3854Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
3855  if (op.LHS->getType()->isPointerTy() ||
3856      op.RHS->getType()->isPointerTy())
3857    return emitPointerArithmetic(CGF, op, CodeGenFunction::NotSubtraction);
3858
3859  if (op.Ty->isSignedIntegerOrEnumerationType()) {
3860    switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
3861    case LangOptions::SOB_Defined:
3862      return Builder.CreateAdd(op.LHS, op.RHS, "add");
3863    case LangOptions::SOB_Undefined:
3864      if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
3865        return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
3866      [[fallthrough]];
3867    case LangOptions::SOB_Trapping:
3868      if (CanElideOverflowCheck(CGF.getContext(), op))
3869        return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
3870      return EmitOverflowCheckedBinOp(op);
3871    }
3872  }
3873
3874  // For vector and matrix adds, try to fold into a fmuladd.
3875  if (op.LHS->getType()->isFPOrFPVectorTy()) {
3876    CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
3877    // Try to form an fmuladd.
3878    if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder))
3879      return FMulAdd;
3880  }
3881
3882  if (op.Ty->isConstantMatrixType()) {
3883    llvm::MatrixBuilder MB(Builder);
3884    CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
3885    return MB.CreateAdd(op.LHS, op.RHS);
3886  }
3887
3888  if (op.Ty->isUnsignedIntegerType() &&
3889      CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
3890      !CanElideOverflowCheck(CGF.getContext(), op))
3891    return EmitOverflowCheckedBinOp(op);
3892
3893  if (op.LHS->getType()->isFPOrFPVectorTy()) {
3894    CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
3895    return Builder.CreateFAdd(op.LHS, op.RHS, "add");
3896  }
3897
3898  if (op.isFixedPointOp())
3899    return EmitFixedPointBinOp(op);
3900
3901  return Builder.CreateAdd(op.LHS, op.RHS, "add");
3902}
3903
3904/// The resulting value must be calculated with exact precision, so the operands
3905/// may not be the same type.
3906Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) {
3907  using llvm::APSInt;
3908  using llvm::ConstantInt;
3909
3910  // This is either a binary operation where at least one of the operands is
3911  // a fixed-point type, or a unary operation where the operand is a fixed-point
3912  // type. The result type of a binary operation is determined by
3913  // Sema::handleFixedPointConversions().
3914  QualType ResultTy = op.Ty;
3915  QualType LHSTy, RHSTy;
3916  if (const auto *BinOp = dyn_cast<BinaryOperator>(op.E)) {
3917    RHSTy = BinOp->getRHS()->getType();
3918    if (const auto *CAO = dyn_cast<CompoundAssignOperator>(BinOp)) {
3919      // For compound assignment, the effective type of the LHS at this point
3920      // is the computation LHS type, not the actual LHS type, and the final
3921      // result type is not the type of the expression but rather the
3922      // computation result type.
3923      LHSTy = CAO->getComputationLHSType();
3924      ResultTy = CAO->getComputationResultType();
3925    } else
3926      LHSTy = BinOp->getLHS()->getType();
3927  } else if (const auto *UnOp = dyn_cast<UnaryOperator>(op.E)) {
3928    LHSTy = UnOp->getSubExpr()->getType();
3929    RHSTy = UnOp->getSubExpr()->getType();
3930  }
3931  ASTContext &Ctx = CGF.getContext();
3932  Value *LHS = op.LHS;
3933  Value *RHS = op.RHS;
3934
3935  auto LHSFixedSema = Ctx.getFixedPointSemantics(LHSTy);
3936  auto RHSFixedSema = Ctx.getFixedPointSemantics(RHSTy);
3937  auto ResultFixedSema = Ctx.getFixedPointSemantics(ResultTy);
3938  auto CommonFixedSema = LHSFixedSema.getCommonSemantics(RHSFixedSema);
3939
3940  // Perform the actual operation.
3941  Value *Result;
3942  llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
3943  switch (op.Opcode) {
3944  case BO_AddAssign:
3945  case BO_Add:
3946    Result = FPBuilder.CreateAdd(LHS, LHSFixedSema, RHS, RHSFixedSema);
3947    break;
3948  case BO_SubAssign:
3949  case BO_Sub:
3950    Result = FPBuilder.CreateSub(LHS, LHSFixedSema, RHS, RHSFixedSema);
3951    break;
3952  case BO_MulAssign:
3953  case BO_Mul:
3954    Result = FPBuilder.CreateMul(LHS, LHSFixedSema, RHS, RHSFixedSema);
3955    break;
3956  case BO_DivAssign:
3957  case BO_Div:
3958    Result = FPBuilder.CreateDiv(LHS, LHSFixedSema, RHS, RHSFixedSema);
3959    break;
3960  case BO_ShlAssign:
3961  case BO_Shl:
3962    Result = FPBuilder.CreateShl(LHS, LHSFixedSema, RHS);
3963    break;
3964  case BO_ShrAssign:
3965  case BO_Shr:
3966    Result = FPBuilder.CreateShr(LHS, LHSFixedSema, RHS);
3967    break;
3968  case BO_LT:
3969    return FPBuilder.CreateLT(LHS, LHSFixedSema, RHS, RHSFixedSema);
3970  case BO_GT:
3971    return FPBuilder.CreateGT(LHS, LHSFixedSema, RHS, RHSFixedSema);
3972  case BO_LE:
3973    return FPBuilder.CreateLE(LHS, LHSFixedSema, RHS, RHSFixedSema);
3974  case BO_GE:
3975    return FPBuilder.CreateGE(LHS, LHSFixedSema, RHS, RHSFixedSema);
3976  case BO_EQ:
3977    // For equality operations, we assume any padding bits on unsigned types are
3978    // zero'd out. They could be overwritten through non-saturating operations
3979    // that cause overflow, but this leads to undefined behavior.
3980    return FPBuilder.CreateEQ(LHS, LHSFixedSema, RHS, RHSFixedSema);
3981  case BO_NE:
3982    return FPBuilder.CreateNE(LHS, LHSFixedSema, RHS, RHSFixedSema);
3983  case BO_Cmp:
3984  case BO_LAnd:
3985  case BO_LOr:
3986    llvm_unreachable("Found unimplemented fixed point binary operation");
3987  case BO_PtrMemD:
3988  case BO_PtrMemI:
3989  case BO_Rem:
3990  case BO_Xor:
3991  case BO_And:
3992  case BO_Or:
3993  case BO_Assign:
3994  case BO_RemAssign:
3995  case BO_AndAssign:
3996  case BO_XorAssign:
3997  case BO_OrAssign:
3998  case BO_Comma:
3999    llvm_unreachable("Found unsupported binary operation for fixed point types.");
4000  }
4001
4002  bool IsShift = BinaryOperator::isShiftOp(op.Opcode) ||
4003                 BinaryOperator::isShiftAssignOp(op.Opcode);
4004  // Convert to the result type.
4005  return FPBuilder.CreateFixedToFixed(Result, IsShift ? LHSFixedSema
4006                                                      : CommonFixedSema,
4007                                      ResultFixedSema);
4008}
4009
4010Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
4011  // The LHS is always a pointer if either side is.
4012  if (!op.LHS->getType()->isPointerTy()) {
4013    if (op.Ty->isSignedIntegerOrEnumerationType()) {
4014      switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
4015      case LangOptions::SOB_Defined:
4016        return Builder.CreateSub(op.LHS, op.RHS, "sub");
4017      case LangOptions::SOB_Undefined:
4018        if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
4019          return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
4020        [[fallthrough]];
4021      case LangOptions::SOB_Trapping:
4022        if (CanElideOverflowCheck(CGF.getContext(), op))
4023          return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
4024        return EmitOverflowCheckedBinOp(op);
4025      }
4026    }
4027
4028    // For vector and matrix subs, try to fold into a fmuladd.
4029    if (op.LHS->getType()->isFPOrFPVectorTy()) {
4030      CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4031      // Try to form an fmuladd.
4032      if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true))
4033        return FMulAdd;
4034    }
4035
4036    if (op.Ty->isConstantMatrixType()) {
4037      llvm::MatrixBuilder MB(Builder);
4038      CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4039      return MB.CreateSub(op.LHS, op.RHS);
4040    }
4041
4042    if (op.Ty->isUnsignedIntegerType() &&
4043        CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
4044        !CanElideOverflowCheck(CGF.getContext(), op))
4045      return EmitOverflowCheckedBinOp(op);
4046
4047    if (op.LHS->getType()->isFPOrFPVectorTy()) {
4048      CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4049      return Builder.CreateFSub(op.LHS, op.RHS, "sub");
4050    }
4051
4052    if (op.isFixedPointOp())
4053      return EmitFixedPointBinOp(op);
4054
4055    return Builder.CreateSub(op.LHS, op.RHS, "sub");
4056  }
4057
4058  // If the RHS is not a pointer, then we have normal pointer
4059  // arithmetic.
4060  if (!op.RHS->getType()->isPointerTy())
4061    return emitPointerArithmetic(CGF, op, CodeGenFunction::IsSubtraction);
4062
4063  // Otherwise, this is a pointer subtraction.
4064
4065  // Do the raw subtraction part.
4066  llvm::Value *LHS
4067    = Builder.CreatePtrToInt(op.LHS, CGF.PtrDiffTy, "sub.ptr.lhs.cast");
4068  llvm::Value *RHS
4069    = Builder.CreatePtrToInt(op.RHS, CGF.PtrDiffTy, "sub.ptr.rhs.cast");
4070  Value *diffInChars = Builder.CreateSub(LHS, RHS, "sub.ptr.sub");
4071
4072  // Okay, figure out the element size.
4073  const BinaryOperator *expr = cast<BinaryOperator>(op.E);
4074  QualType elementType = expr->getLHS()->getType()->getPointeeType();
4075
4076  llvm::Value *divisor = nullptr;
4077
4078  // For a variable-length array, this is going to be non-constant.
4079  if (const VariableArrayType *vla
4080        = CGF.getContext().getAsVariableArrayType(elementType)) {
4081    auto VlaSize = CGF.getVLASize(vla);
4082    elementType = VlaSize.Type;
4083    divisor = VlaSize.NumElts;
4084
4085    // Scale the number of non-VLA elements by the non-VLA element size.
4086    CharUnits eltSize = CGF.getContext().getTypeSizeInChars(elementType);
4087    if (!eltSize.isOne())
4088      divisor = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), divisor);
4089
4090  // For everything elese, we can just compute it, safe in the
4091  // assumption that Sema won't let anything through that we can't
4092  // safely compute the size of.
4093  } else {
4094    CharUnits elementSize;
4095    // Handle GCC extension for pointer arithmetic on void* and
4096    // function pointer types.
4097    if (elementType->isVoidType() || elementType->isFunctionType())
4098      elementSize = CharUnits::One();
4099    else
4100      elementSize = CGF.getContext().getTypeSizeInChars(elementType);
4101
4102    // Don't even emit the divide for element size of 1.
4103    if (elementSize.isOne())
4104      return diffInChars;
4105
4106    divisor = CGF.CGM.getSize(elementSize);
4107  }
4108
4109  // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
4110  // pointer difference in C is only defined in the case where both operands
4111  // are pointing to elements of an array.
4112  return Builder.CreateExactSDiv(diffInChars, divisor, "sub.ptr.div");
4113}
4114
4115Value *ScalarExprEmitter::GetWidthMinusOneValue(Value* LHS,Value* RHS) {
4116  llvm::IntegerType *Ty;
4117  if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
4118    Ty = cast<llvm::IntegerType>(VT->getElementType());
4119  else
4120    Ty = cast<llvm::IntegerType>(LHS->getType());
4121  return llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth() - 1);
4122}
4123
4124Value *ScalarExprEmitter::ConstrainShiftValue(Value *LHS, Value *RHS,
4125                                              const Twine &Name) {
4126  llvm::IntegerType *Ty;
4127  if (auto *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
4128    Ty = cast<llvm::IntegerType>(VT->getElementType());
4129  else
4130    Ty = cast<llvm::IntegerType>(LHS->getType());
4131
4132  if (llvm::isPowerOf2_64(Ty->getBitWidth()))
4133        return Builder.CreateAnd(RHS, GetWidthMinusOneValue(LHS, RHS), Name);
4134
4135  return Builder.CreateURem(
4136      RHS, llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth()), Name);
4137}
4138
4139Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
4140  // TODO: This misses out on the sanitizer check below.
4141  if (Ops.isFixedPointOp())
4142    return EmitFixedPointBinOp(Ops);
4143
4144  // LLVM requires the LHS and RHS to be the same type: promote or truncate the
4145  // RHS to the same size as the LHS.
4146  Value *RHS = Ops.RHS;
4147  if (Ops.LHS->getType() != RHS->getType())
4148    RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
4149
4150  bool SanitizeSignedBase = CGF.SanOpts.has(SanitizerKind::ShiftBase) &&
4151                            Ops.Ty->hasSignedIntegerRepresentation() &&
4152                            !CGF.getLangOpts().isSignedOverflowDefined() &&
4153                            !CGF.getLangOpts().CPlusPlus20;
4154  bool SanitizeUnsignedBase =
4155      CGF.SanOpts.has(SanitizerKind::UnsignedShiftBase) &&
4156      Ops.Ty->hasUnsignedIntegerRepresentation();
4157  bool SanitizeBase = SanitizeSignedBase || SanitizeUnsignedBase;
4158  bool SanitizeExponent = CGF.SanOpts.has(SanitizerKind::ShiftExponent);
4159  // OpenCL 6.3j: shift values are effectively % word size of LHS.
4160  if (CGF.getLangOpts().OpenCL)
4161    RHS = ConstrainShiftValue(Ops.LHS, RHS, "shl.mask");
4162  else if ((SanitizeBase || SanitizeExponent) &&
4163           isa<llvm::IntegerType>(Ops.LHS->getType())) {
4164    CodeGenFunction::SanitizerScope SanScope(&CGF);
4165    SmallVector<std::pair<Value *, SanitizerMask>, 2> Checks;
4166    llvm::Value *WidthMinusOne = GetWidthMinusOneValue(Ops.LHS, Ops.RHS);
4167    llvm::Value *ValidExponent = Builder.CreateICmpULE(Ops.RHS, WidthMinusOne);
4168
4169    if (SanitizeExponent) {
4170      Checks.push_back(
4171          std::make_pair(ValidExponent, SanitizerKind::ShiftExponent));
4172    }
4173
4174    if (SanitizeBase) {
4175      // Check whether we are shifting any non-zero bits off the top of the
4176      // integer. We only emit this check if exponent is valid - otherwise
4177      // instructions below will have undefined behavior themselves.
4178      llvm::BasicBlock *Orig = Builder.GetInsertBlock();
4179      llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
4180      llvm::BasicBlock *CheckShiftBase = CGF.createBasicBlock("check");
4181      Builder.CreateCondBr(ValidExponent, CheckShiftBase, Cont);
4182      llvm::Value *PromotedWidthMinusOne =
4183          (RHS == Ops.RHS) ? WidthMinusOne
4184                           : GetWidthMinusOneValue(Ops.LHS, RHS);
4185      CGF.EmitBlock(CheckShiftBase);
4186      llvm::Value *BitsShiftedOff = Builder.CreateLShr(
4187          Ops.LHS, Builder.CreateSub(PromotedWidthMinusOne, RHS, "shl.zeros",
4188                                     /*NUW*/ true, /*NSW*/ true),
4189          "shl.check");
4190      if (SanitizeUnsignedBase || CGF.getLangOpts().CPlusPlus) {
4191        // In C99, we are not permitted to shift a 1 bit into the sign bit.
4192        // Under C++11's rules, shifting a 1 bit into the sign bit is
4193        // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't
4194        // define signed left shifts, so we use the C99 and C++11 rules there).
4195        // Unsigned shifts can always shift into the top bit.
4196        llvm::Value *One = llvm::ConstantInt::get(BitsShiftedOff->getType(), 1);
4197        BitsShiftedOff = Builder.CreateLShr(BitsShiftedOff, One);
4198      }
4199      llvm::Value *Zero = llvm::ConstantInt::get(BitsShiftedOff->getType(), 0);
4200      llvm::Value *ValidBase = Builder.CreateICmpEQ(BitsShiftedOff, Zero);
4201      CGF.EmitBlock(Cont);
4202      llvm::PHINode *BaseCheck = Builder.CreatePHI(ValidBase->getType(), 2);
4203      BaseCheck->addIncoming(Builder.getTrue(), Orig);
4204      BaseCheck->addIncoming(ValidBase, CheckShiftBase);
4205      Checks.push_back(std::make_pair(
4206          BaseCheck, SanitizeSignedBase ? SanitizerKind::ShiftBase
4207                                        : SanitizerKind::UnsignedShiftBase));
4208    }
4209
4210    assert(!Checks.empty());
4211    EmitBinOpCheck(Checks, Ops);
4212  }
4213
4214  return Builder.CreateShl(Ops.LHS, RHS, "shl");
4215}
4216
4217Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
4218  // TODO: This misses out on the sanitizer check below.
4219  if (Ops.isFixedPointOp())
4220    return EmitFixedPointBinOp(Ops);
4221
4222  // LLVM requires the LHS and RHS to be the same type: promote or truncate the
4223  // RHS to the same size as the LHS.
4224  Value *RHS = Ops.RHS;
4225  if (Ops.LHS->getType() != RHS->getType())
4226    RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
4227
4228  // OpenCL 6.3j: shift values are effectively % word size of LHS.
4229  if (CGF.getLangOpts().OpenCL)
4230    RHS = ConstrainShiftValue(Ops.LHS, RHS, "shr.mask");
4231  else if (CGF.SanOpts.has(SanitizerKind::ShiftExponent) &&
4232           isa<llvm::IntegerType>(Ops.LHS->getType())) {
4233    CodeGenFunction::SanitizerScope SanScope(&CGF);
4234    llvm::Value *Valid =
4235        Builder.CreateICmpULE(RHS, GetWidthMinusOneValue(Ops.LHS, RHS));
4236    EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::ShiftExponent), Ops);
4237  }
4238
4239  if (Ops.Ty->hasUnsignedIntegerRepresentation())
4240    return Builder.CreateLShr(Ops.LHS, RHS, "shr");
4241  return Builder.CreateAShr(Ops.LHS, RHS, "shr");
4242}
4243
4244enum IntrinsicType { VCMPEQ, VCMPGT };
4245// return corresponding comparison intrinsic for given vector type
4246static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT,
4247                                        BuiltinType::Kind ElemKind) {
4248  switch (ElemKind) {
4249  default: llvm_unreachable("unexpected element type");
4250  case BuiltinType::Char_U:
4251  case BuiltinType::UChar:
4252    return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
4253                            llvm::Intrinsic::ppc_altivec_vcmpgtub_p;
4254  case BuiltinType::Char_S:
4255  case BuiltinType::SChar:
4256    return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
4257                            llvm::Intrinsic::ppc_altivec_vcmpgtsb_p;
4258  case BuiltinType::UShort:
4259    return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
4260                            llvm::Intrinsic::ppc_altivec_vcmpgtuh_p;
4261  case BuiltinType::Short:
4262    return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
4263                            llvm::Intrinsic::ppc_altivec_vcmpgtsh_p;
4264  case BuiltinType::UInt:
4265    return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
4266                            llvm::Intrinsic::ppc_altivec_vcmpgtuw_p;
4267  case BuiltinType::Int:
4268    return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
4269                            llvm::Intrinsic::ppc_altivec_vcmpgtsw_p;
4270  case BuiltinType::ULong:
4271  case BuiltinType::ULongLong:
4272    return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
4273                            llvm::Intrinsic::ppc_altivec_vcmpgtud_p;
4274  case BuiltinType::Long:
4275  case BuiltinType::LongLong:
4276    return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
4277                            llvm::Intrinsic::ppc_altivec_vcmpgtsd_p;
4278  case BuiltinType::Float:
4279    return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p :
4280                            llvm::Intrinsic::ppc_altivec_vcmpgtfp_p;
4281  case BuiltinType::Double:
4282    return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_vsx_xvcmpeqdp_p :
4283                            llvm::Intrinsic::ppc_vsx_xvcmpgtdp_p;
4284  case BuiltinType::UInt128:
4285    return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
4286                          : llvm::Intrinsic::ppc_altivec_vcmpgtuq_p;
4287  case BuiltinType::Int128:
4288    return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
4289                          : llvm::Intrinsic::ppc_altivec_vcmpgtsq_p;
4290  }
4291}
4292
4293Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,
4294                                      llvm::CmpInst::Predicate UICmpOpc,
4295                                      llvm::CmpInst::Predicate SICmpOpc,
4296                                      llvm::CmpInst::Predicate FCmpOpc,
4297                                      bool IsSignaling) {
4298  TestAndClearIgnoreResultAssign();
4299  Value *Result;
4300  QualType LHSTy = E->getLHS()->getType();
4301  QualType RHSTy = E->getRHS()->getType();
4302  if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) {
4303    assert(E->getOpcode() == BO_EQ ||
4304           E->getOpcode() == BO_NE);
4305    Value *LHS = CGF.EmitScalarExpr(E->getLHS());
4306    Value *RHS = CGF.EmitScalarExpr(E->getRHS());
4307    Result = CGF.CGM.getCXXABI().EmitMemberPointerComparison(
4308                   CGF, LHS, RHS, MPT, E->getOpcode() == BO_NE);
4309  } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) {
4310    BinOpInfo BOInfo = EmitBinOps(E);
4311    Value *LHS = BOInfo.LHS;
4312    Value *RHS = BOInfo.RHS;
4313
4314    // If AltiVec, the comparison results in a numeric type, so we use
4315    // intrinsics comparing vectors and giving 0 or 1 as a result
4316    if (LHSTy->isVectorType() && !E->getType()->isVectorType()) {
4317      // constants for mapping CR6 register bits to predicate result
4318      enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6;
4319
4320      llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic;
4321
4322      // in several cases vector arguments order will be reversed
4323      Value *FirstVecArg = LHS,
4324            *SecondVecArg = RHS;
4325
4326      QualType ElTy = LHSTy->castAs<VectorType>()->getElementType();
4327      BuiltinType::Kind ElementKind = ElTy->castAs<BuiltinType>()->getKind();
4328
4329      switch(E->getOpcode()) {
4330      default: llvm_unreachable("is not a comparison operation");
4331      case BO_EQ:
4332        CR6 = CR6_LT;
4333        ID = GetIntrinsic(VCMPEQ, ElementKind);
4334        break;
4335      case BO_NE:
4336        CR6 = CR6_EQ;
4337        ID = GetIntrinsic(VCMPEQ, ElementKind);
4338        break;
4339      case BO_LT:
4340        CR6 = CR6_LT;
4341        ID = GetIntrinsic(VCMPGT, ElementKind);
4342        std::swap(FirstVecArg, SecondVecArg);
4343        break;
4344      case BO_GT:
4345        CR6 = CR6_LT;
4346        ID = GetIntrinsic(VCMPGT, ElementKind);
4347        break;
4348      case BO_LE:
4349        if (ElementKind == BuiltinType::Float) {
4350          CR6 = CR6_LT;
4351          ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
4352          std::swap(FirstVecArg, SecondVecArg);
4353        }
4354        else {
4355          CR6 = CR6_EQ;
4356          ID = GetIntrinsic(VCMPGT, ElementKind);
4357        }
4358        break;
4359      case BO_GE:
4360        if (ElementKind == BuiltinType::Float) {
4361          CR6 = CR6_LT;
4362          ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
4363        }
4364        else {
4365          CR6 = CR6_EQ;
4366          ID = GetIntrinsic(VCMPGT, ElementKind);
4367          std::swap(FirstVecArg, SecondVecArg);
4368        }
4369        break;
4370      }
4371
4372      Value *CR6Param = Builder.getInt32(CR6);
4373      llvm::Function *F = CGF.CGM.getIntrinsic(ID);
4374      Result = Builder.CreateCall(F, {CR6Param, FirstVecArg, SecondVecArg});
4375
4376      // The result type of intrinsic may not be same as E->getType().
4377      // If E->getType() is not BoolTy, EmitScalarConversion will do the
4378      // conversion work. If E->getType() is BoolTy, EmitScalarConversion will
4379      // do nothing, if ResultTy is not i1 at the same time, it will cause
4380      // crash later.
4381      llvm::IntegerType *ResultTy = cast<llvm::IntegerType>(Result->getType());
4382      if (ResultTy->getBitWidth() > 1 &&
4383          E->getType() == CGF.getContext().BoolTy)
4384        Result = Builder.CreateTrunc(Result, Builder.getInt1Ty());
4385      return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
4386                                  E->getExprLoc());
4387    }
4388
4389    if (BOInfo.isFixedPointOp()) {
4390      Result = EmitFixedPointBinOp(BOInfo);
4391    } else if (LHS->getType()->isFPOrFPVectorTy()) {
4392      CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, BOInfo.FPFeatures);
4393      if (!IsSignaling)
4394        Result = Builder.CreateFCmp(FCmpOpc, LHS, RHS, "cmp");
4395      else
4396        Result = Builder.CreateFCmpS(FCmpOpc, LHS, RHS, "cmp");
4397    } else if (LHSTy->hasSignedIntegerRepresentation()) {
4398      Result = Builder.CreateICmp(SICmpOpc, LHS, RHS, "cmp");
4399    } else {
4400      // Unsigned integers and pointers.
4401
4402      if (CGF.CGM.getCodeGenOpts().StrictVTablePointers &&
4403          !isa<llvm::ConstantPointerNull>(LHS) &&
4404          !isa<llvm::ConstantPointerNull>(RHS)) {
4405
4406        // Dynamic information is required to be stripped for comparisons,
4407        // because it could leak the dynamic information.  Based on comparisons
4408        // of pointers to dynamic objects, the optimizer can replace one pointer
4409        // with another, which might be incorrect in presence of invariant
4410        // groups. Comparison with null is safe because null does not carry any
4411        // dynamic information.
4412        if (LHSTy.mayBeDynamicClass())
4413          LHS = Builder.CreateStripInvariantGroup(LHS);
4414        if (RHSTy.mayBeDynamicClass())
4415          RHS = Builder.CreateStripInvariantGroup(RHS);
4416      }
4417
4418      Result = Builder.CreateICmp(UICmpOpc, LHS, RHS, "cmp");
4419    }
4420
4421    // If this is a vector comparison, sign extend the result to the appropriate
4422    // vector integer type and return it (don't convert to bool).
4423    if (LHSTy->isVectorType())
4424      return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
4425
4426  } else {
4427    // Complex Comparison: can only be an equality comparison.
4428    CodeGenFunction::ComplexPairTy LHS, RHS;
4429    QualType CETy;
4430    if (auto *CTy = LHSTy->getAs<ComplexType>()) {
4431      LHS = CGF.EmitComplexExpr(E->getLHS());
4432      CETy = CTy->getElementType();
4433    } else {
4434      LHS.first = Visit(E->getLHS());
4435      LHS.second = llvm::Constant::getNullValue(LHS.first->getType());
4436      CETy = LHSTy;
4437    }
4438    if (auto *CTy = RHSTy->getAs<ComplexType>()) {
4439      RHS = CGF.EmitComplexExpr(E->getRHS());
4440      assert(CGF.getContext().hasSameUnqualifiedType(CETy,
4441                                                     CTy->getElementType()) &&
4442             "The element types must always match.");
4443      (void)CTy;
4444    } else {
4445      RHS.first = Visit(E->getRHS());
4446      RHS.second = llvm::Constant::getNullValue(RHS.first->getType());
4447      assert(CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) &&
4448             "The element types must always match.");
4449    }
4450
4451    Value *ResultR, *ResultI;
4452    if (CETy->isRealFloatingType()) {
4453      // As complex comparisons can only be equality comparisons, they
4454      // are never signaling comparisons.
4455      ResultR = Builder.CreateFCmp(FCmpOpc, LHS.first, RHS.first, "cmp.r");
4456      ResultI = Builder.CreateFCmp(FCmpOpc, LHS.second, RHS.second, "cmp.i");
4457    } else {
4458      // Complex comparisons can only be equality comparisons.  As such, signed
4459      // and unsigned opcodes are the same.
4460      ResultR = Builder.CreateICmp(UICmpOpc, LHS.first, RHS.first, "cmp.r");
4461      ResultI = Builder.CreateICmp(UICmpOpc, LHS.second, RHS.second, "cmp.i");
4462    }
4463
4464    if (E->getOpcode() == BO_EQ) {
4465      Result = Builder.CreateAnd(ResultR, ResultI, "and.ri");
4466    } else {
4467      assert(E->getOpcode() == BO_NE &&
4468             "Complex comparison other than == or != ?");
4469      Result = Builder.CreateOr(ResultR, ResultI, "or.ri");
4470    }
4471  }
4472
4473  return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
4474                              E->getExprLoc());
4475}
4476
4477Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
4478  bool Ignore = TestAndClearIgnoreResultAssign();
4479
4480  Value *RHS;
4481  LValue LHS;
4482
4483  switch (E->getLHS()->getType().getObjCLifetime()) {
4484  case Qualifiers::OCL_Strong:
4485    std::tie(LHS, RHS) = CGF.EmitARCStoreStrong(E, Ignore);
4486    break;
4487
4488  case Qualifiers::OCL_Autoreleasing:
4489    std::tie(LHS, RHS) = CGF.EmitARCStoreAutoreleasing(E);
4490    break;
4491
4492  case Qualifiers::OCL_ExplicitNone:
4493    std::tie(LHS, RHS) = CGF.EmitARCStoreUnsafeUnretained(E, Ignore);
4494    break;
4495
4496  case Qualifiers::OCL_Weak:
4497    RHS = Visit(E->getRHS());
4498    LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
4499    RHS = CGF.EmitARCStoreWeak(LHS.getAddress(CGF), RHS, Ignore);
4500    break;
4501
4502  case Qualifiers::OCL_None:
4503    // __block variables need to have the rhs evaluated first, plus
4504    // this should improve codegen just a little.
4505    RHS = Visit(E->getRHS());
4506    LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
4507
4508    // Store the value into the LHS.  Bit-fields are handled specially
4509    // because the result is altered by the store, i.e., [C99 6.5.16p1]
4510    // 'An assignment expression has the value of the left operand after
4511    // the assignment...'.
4512    if (LHS.isBitField()) {
4513      CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, &RHS);
4514    } else {
4515      CGF.EmitNullabilityCheck(LHS, RHS, E->getExprLoc());
4516      CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS);
4517    }
4518  }
4519
4520  // If the result is clearly ignored, return now.
4521  if (Ignore)
4522    return nullptr;
4523
4524  // The result of an assignment in C is the assigned r-value.
4525  if (!CGF.getLangOpts().CPlusPlus)
4526    return RHS;
4527
4528  // If the lvalue is non-volatile, return the computed value of the assignment.
4529  if (!LHS.isVolatileQualified())
4530    return RHS;
4531
4532  // Otherwise, reload the value.
4533  return EmitLoadOfLValue(LHS, E->getExprLoc());
4534}
4535
4536Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
4537  // Perform vector logical and on comparisons with zero vectors.
4538  if (E->getType()->isVectorType()) {
4539    CGF.incrementProfileCounter(E);
4540
4541    Value *LHS = Visit(E->getLHS());
4542    Value *RHS = Visit(E->getRHS());
4543    Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
4544    if (LHS->getType()->isFPOrFPVectorTy()) {
4545      CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
4546          CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
4547      LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
4548      RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
4549    } else {
4550      LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
4551      RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
4552    }
4553    Value *And = Builder.CreateAnd(LHS, RHS);
4554    return Builder.CreateSExt(And, ConvertType(E->getType()), "sext");
4555  }
4556
4557  bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
4558  llvm::Type *ResTy = ConvertType(E->getType());
4559
4560  // If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
4561  // If we have 1 && X, just emit X without inserting the control flow.
4562  bool LHSCondVal;
4563  if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
4564    if (LHSCondVal) { // If we have 1 && X, just emit X.
4565      CGF.incrementProfileCounter(E);
4566
4567      // If the top of the logical operator nest, reset the MCDC temp to 0.
4568      if (CGF.MCDCLogOpStack.empty())
4569        CGF.maybeResetMCDCCondBitmap(E);
4570
4571      CGF.MCDCLogOpStack.push_back(E);
4572
4573      Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
4574
4575      // If we're generating for profiling or coverage, generate a branch to a
4576      // block that increments the RHS counter needed to track branch condition
4577      // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
4578      // "FalseBlock" after the increment is done.
4579      if (InstrumentRegions &&
4580          CodeGenFunction::isInstrumentedCondition(E->getRHS())) {
4581        CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
4582        llvm::BasicBlock *FBlock = CGF.createBasicBlock("land.end");
4583        llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt");
4584        Builder.CreateCondBr(RHSCond, RHSBlockCnt, FBlock);
4585        CGF.EmitBlock(RHSBlockCnt);
4586        CGF.incrementProfileCounter(E->getRHS());
4587        CGF.EmitBranch(FBlock);
4588        CGF.EmitBlock(FBlock);
4589      }
4590
4591      CGF.MCDCLogOpStack.pop_back();
4592      // If the top of the logical operator nest, update the MCDC bitmap.
4593      if (CGF.MCDCLogOpStack.empty())
4594        CGF.maybeUpdateMCDCTestVectorBitmap(E);
4595
4596      // ZExt result to int or bool.
4597      return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext");
4598    }
4599
4600    // 0 && RHS: If it is safe, just elide the RHS, and return 0/false.
4601    if (!CGF.ContainsLabel(E->getRHS()))
4602      return llvm::Constant::getNullValue(ResTy);
4603  }
4604
4605  // If the top of the logical operator nest, reset the MCDC temp to 0.
4606  if (CGF.MCDCLogOpStack.empty())
4607    CGF.maybeResetMCDCCondBitmap(E);
4608
4609  CGF.MCDCLogOpStack.push_back(E);
4610
4611  llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end");
4612  llvm::BasicBlock *RHSBlock  = CGF.createBasicBlock("land.rhs");
4613
4614  CodeGenFunction::ConditionalEvaluation eval(CGF);
4615
4616  // Branch on the LHS first.  If it is false, go to the failure (cont) block.
4617  CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock,
4618                           CGF.getProfileCount(E->getRHS()));
4619
4620  // Any edges into the ContBlock are now from an (indeterminate number of)
4621  // edges from this first condition.  All of these values will be false.  Start
4622  // setting up the PHI node in the Cont Block for this.
4623  llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
4624                                            "", ContBlock);
4625  for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
4626       PI != PE; ++PI)
4627    PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI);
4628
4629  eval.begin(CGF);
4630  CGF.EmitBlock(RHSBlock);
4631  CGF.incrementProfileCounter(E);
4632  Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
4633  eval.end(CGF);
4634
4635  // Reaquire the RHS block, as there may be subblocks inserted.
4636  RHSBlock = Builder.GetInsertBlock();
4637
4638  // If we're generating for profiling or coverage, generate a branch on the
4639  // RHS to a block that increments the RHS true counter needed to track branch
4640  // condition coverage.
4641  if (InstrumentRegions &&
4642      CodeGenFunction::isInstrumentedCondition(E->getRHS())) {
4643    CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
4644    llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt");
4645    Builder.CreateCondBr(RHSCond, RHSBlockCnt, ContBlock);
4646    CGF.EmitBlock(RHSBlockCnt);
4647    CGF.incrementProfileCounter(E->getRHS());
4648    CGF.EmitBranch(ContBlock);
4649    PN->addIncoming(RHSCond, RHSBlockCnt);
4650  }
4651
4652  // Emit an unconditional branch from this block to ContBlock.
4653  {
4654    // There is no need to emit line number for unconditional branch.
4655    auto NL = ApplyDebugLocation::CreateEmpty(CGF);
4656    CGF.EmitBlock(ContBlock);
4657  }
4658  // Insert an entry into the phi node for the edge with the value of RHSCond.
4659  PN->addIncoming(RHSCond, RHSBlock);
4660
4661  CGF.MCDCLogOpStack.pop_back();
4662  // If the top of the logical operator nest, update the MCDC bitmap.
4663  if (CGF.MCDCLogOpStack.empty())
4664    CGF.maybeUpdateMCDCTestVectorBitmap(E);
4665
4666  // Artificial location to preserve the scope information
4667  {
4668    auto NL = ApplyDebugLocation::CreateArtificial(CGF);
4669    PN->setDebugLoc(Builder.getCurrentDebugLocation());
4670  }
4671
4672  // ZExt result to int.
4673  return Builder.CreateZExtOrBitCast(PN, ResTy, "land.ext");
4674}
4675
4676Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
4677  // Perform vector logical or on comparisons with zero vectors.
4678  if (E->getType()->isVectorType()) {
4679    CGF.incrementProfileCounter(E);
4680
4681    Value *LHS = Visit(E->getLHS());
4682    Value *RHS = Visit(E->getRHS());
4683    Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
4684    if (LHS->getType()->isFPOrFPVectorTy()) {
4685      CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
4686          CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
4687      LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
4688      RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
4689    } else {
4690      LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
4691      RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
4692    }
4693    Value *Or = Builder.CreateOr(LHS, RHS);
4694    return Builder.CreateSExt(Or, ConvertType(E->getType()), "sext");
4695  }
4696
4697  bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
4698  llvm::Type *ResTy = ConvertType(E->getType());
4699
4700  // If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
4701  // If we have 0 || X, just emit X without inserting the control flow.
4702  bool LHSCondVal;
4703  if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
4704    if (!LHSCondVal) { // If we have 0 || X, just emit X.
4705      CGF.incrementProfileCounter(E);
4706
4707      // If the top of the logical operator nest, reset the MCDC temp to 0.
4708      if (CGF.MCDCLogOpStack.empty())
4709        CGF.maybeResetMCDCCondBitmap(E);
4710
4711      CGF.MCDCLogOpStack.push_back(E);
4712
4713      Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
4714
4715      // If we're generating for profiling or coverage, generate a branch to a
4716      // block that increments the RHS counter need to track branch condition
4717      // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
4718      // "FalseBlock" after the increment is done.
4719      if (InstrumentRegions &&
4720          CodeGenFunction::isInstrumentedCondition(E->getRHS())) {
4721        CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
4722        llvm::BasicBlock *FBlock = CGF.createBasicBlock("lor.end");
4723        llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt");
4724        Builder.CreateCondBr(RHSCond, FBlock, RHSBlockCnt);
4725        CGF.EmitBlock(RHSBlockCnt);
4726        CGF.incrementProfileCounter(E->getRHS());
4727        CGF.EmitBranch(FBlock);
4728        CGF.EmitBlock(FBlock);
4729      }
4730
4731      CGF.MCDCLogOpStack.pop_back();
4732      // If the top of the logical operator nest, update the MCDC bitmap.
4733      if (CGF.MCDCLogOpStack.empty())
4734        CGF.maybeUpdateMCDCTestVectorBitmap(E);
4735
4736      // ZExt result to int or bool.
4737      return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext");
4738    }
4739
4740    // 1 || RHS: If it is safe, just elide the RHS, and return 1/true.
4741    if (!CGF.ContainsLabel(E->getRHS()))
4742      return llvm::ConstantInt::get(ResTy, 1);
4743  }
4744
4745  // If the top of the logical operator nest, reset the MCDC temp to 0.
4746  if (CGF.MCDCLogOpStack.empty())
4747    CGF.maybeResetMCDCCondBitmap(E);
4748
4749  CGF.MCDCLogOpStack.push_back(E);
4750
4751  llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end");
4752  llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs");
4753
4754  CodeGenFunction::ConditionalEvaluation eval(CGF);
4755
4756  // Branch on the LHS first.  If it is true, go to the success (cont) block.
4757  CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock,
4758                           CGF.getCurrentProfileCount() -
4759                               CGF.getProfileCount(E->getRHS()));
4760
4761  // Any edges into the ContBlock are now from an (indeterminate number of)
4762  // edges from this first condition.  All of these values will be true.  Start
4763  // setting up the PHI node in the Cont Block for this.
4764  llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
4765                                            "", ContBlock);
4766  for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
4767       PI != PE; ++PI)
4768    PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI);
4769
4770  eval.begin(CGF);
4771
4772  // Emit the RHS condition as a bool value.
4773  CGF.EmitBlock(RHSBlock);
4774  CGF.incrementProfileCounter(E);
4775  Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
4776
4777  eval.end(CGF);
4778
4779  // Reaquire the RHS block, as there may be subblocks inserted.
4780  RHSBlock = Builder.GetInsertBlock();
4781
4782  // If we're generating for profiling or coverage, generate a branch on the
4783  // RHS to a block that increments the RHS true counter needed to track branch
4784  // condition coverage.
4785  if (InstrumentRegions &&
4786      CodeGenFunction::isInstrumentedCondition(E->getRHS())) {
4787    CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
4788    llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt");
4789    Builder.CreateCondBr(RHSCond, ContBlock, RHSBlockCnt);
4790    CGF.EmitBlock(RHSBlockCnt);
4791    CGF.incrementProfileCounter(E->getRHS());
4792    CGF.EmitBranch(ContBlock);
4793    PN->addIncoming(RHSCond, RHSBlockCnt);
4794  }
4795
4796  // Emit an unconditional branch from this block to ContBlock.  Insert an entry
4797  // into the phi node for the edge with the value of RHSCond.
4798  CGF.EmitBlock(ContBlock);
4799  PN->addIncoming(RHSCond, RHSBlock);
4800
4801  CGF.MCDCLogOpStack.pop_back();
4802  // If the top of the logical operator nest, update the MCDC bitmap.
4803  if (CGF.MCDCLogOpStack.empty())
4804    CGF.maybeUpdateMCDCTestVectorBitmap(E);
4805
4806  // ZExt result to int.
4807  return Builder.CreateZExtOrBitCast(PN, ResTy, "lor.ext");
4808}
4809
4810Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
4811  CGF.EmitIgnoredExpr(E->getLHS());
4812  CGF.EnsureInsertPoint();
4813  return Visit(E->getRHS());
4814}
4815
4816//===----------------------------------------------------------------------===//
4817//                             Other Operators
4818//===----------------------------------------------------------------------===//
4819
4820/// isCheapEnoughToEvaluateUnconditionally - Return true if the specified
4821/// expression is cheap enough and side-effect-free enough to evaluate
4822/// unconditionally instead of conditionally.  This is used to convert control
4823/// flow into selects in some cases.
4824static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E,
4825                                                   CodeGenFunction &CGF) {
4826  // Anything that is an integer or floating point constant is fine.
4827  return E->IgnoreParens()->isEvaluatable(CGF.getContext());
4828
4829  // Even non-volatile automatic variables can't be evaluated unconditionally.
4830  // Referencing a thread_local may cause non-trivial initialization work to
4831  // occur. If we're inside a lambda and one of the variables is from the scope
4832  // outside the lambda, that function may have returned already. Reading its
4833  // locals is a bad idea. Also, these reads may introduce races there didn't
4834  // exist in the source-level program.
4835}
4836
4837
4838Value *ScalarExprEmitter::
4839VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
4840  TestAndClearIgnoreResultAssign();
4841
4842  // Bind the common expression if necessary.
4843  CodeGenFunction::OpaqueValueMapping binding(CGF, E);
4844
4845  Expr *condExpr = E->getCond();
4846  Expr *lhsExpr = E->getTrueExpr();
4847  Expr *rhsExpr = E->getFalseExpr();
4848
4849  // If the condition constant folds and can be elided, try to avoid emitting
4850  // the condition and the dead arm.
4851  bool CondExprBool;
4852  if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
4853    Expr *live = lhsExpr, *dead = rhsExpr;
4854    if (!CondExprBool) std::swap(live, dead);
4855
4856    // If the dead side doesn't have labels we need, just emit the Live part.
4857    if (!CGF.ContainsLabel(dead)) {
4858      if (CondExprBool)
4859        CGF.incrementProfileCounter(E);
4860      Value *Result = Visit(live);
4861
4862      // If the live part is a throw expression, it acts like it has a void
4863      // type, so evaluating it returns a null Value*.  However, a conditional
4864      // with non-void type must return a non-null Value*.
4865      if (!Result && !E->getType()->isVoidType())
4866        Result = llvm::UndefValue::get(CGF.ConvertType(E->getType()));
4867
4868      return Result;
4869    }
4870  }
4871
4872  // OpenCL: If the condition is a vector, we can treat this condition like
4873  // the select function.
4874  if ((CGF.getLangOpts().OpenCL && condExpr->getType()->isVectorType()) ||
4875      condExpr->getType()->isExtVectorType()) {
4876    CGF.incrementProfileCounter(E);
4877
4878    llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
4879    llvm::Value *LHS = Visit(lhsExpr);
4880    llvm::Value *RHS = Visit(rhsExpr);
4881
4882    llvm::Type *condType = ConvertType(condExpr->getType());
4883    auto *vecTy = cast<llvm::FixedVectorType>(condType);
4884
4885    unsigned numElem = vecTy->getNumElements();
4886    llvm::Type *elemType = vecTy->getElementType();
4887
4888    llvm::Value *zeroVec = llvm::Constant::getNullValue(vecTy);
4889    llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec);
4890    llvm::Value *tmp = Builder.CreateSExt(
4891        TestMSB, llvm::FixedVectorType::get(elemType, numElem), "sext");
4892    llvm::Value *tmp2 = Builder.CreateNot(tmp);
4893
4894    // Cast float to int to perform ANDs if necessary.
4895    llvm::Value *RHSTmp = RHS;
4896    llvm::Value *LHSTmp = LHS;
4897    bool wasCast = false;
4898    llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType());
4899    if (rhsVTy->getElementType()->isFloatingPointTy()) {
4900      RHSTmp = Builder.CreateBitCast(RHS, tmp2->getType());
4901      LHSTmp = Builder.CreateBitCast(LHS, tmp->getType());
4902      wasCast = true;
4903    }
4904
4905    llvm::Value *tmp3 = Builder.CreateAnd(RHSTmp, tmp2);
4906    llvm::Value *tmp4 = Builder.CreateAnd(LHSTmp, tmp);
4907    llvm::Value *tmp5 = Builder.CreateOr(tmp3, tmp4, "cond");
4908    if (wasCast)
4909      tmp5 = Builder.CreateBitCast(tmp5, RHS->getType());
4910
4911    return tmp5;
4912  }
4913
4914  if (condExpr->getType()->isVectorType() ||
4915      condExpr->getType()->isSveVLSBuiltinType()) {
4916    CGF.incrementProfileCounter(E);
4917
4918    llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
4919    llvm::Value *LHS = Visit(lhsExpr);
4920    llvm::Value *RHS = Visit(rhsExpr);
4921
4922    llvm::Type *CondType = ConvertType(condExpr->getType());
4923    auto *VecTy = cast<llvm::VectorType>(CondType);
4924    llvm::Value *ZeroVec = llvm::Constant::getNullValue(VecTy);
4925
4926    CondV = Builder.CreateICmpNE(CondV, ZeroVec, "vector_cond");
4927    return Builder.CreateSelect(CondV, LHS, RHS, "vector_select");
4928  }
4929
4930  // If this is a really simple expression (like x ? 4 : 5), emit this as a
4931  // select instead of as control flow.  We can only do this if it is cheap and
4932  // safe to evaluate the LHS and RHS unconditionally.
4933  if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, CGF) &&
4934      isCheapEnoughToEvaluateUnconditionally(rhsExpr, CGF)) {
4935    llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr);
4936    llvm::Value *StepV = Builder.CreateZExtOrBitCast(CondV, CGF.Int64Ty);
4937
4938    CGF.incrementProfileCounter(E, StepV);
4939
4940    llvm::Value *LHS = Visit(lhsExpr);
4941    llvm::Value *RHS = Visit(rhsExpr);
4942    if (!LHS) {
4943      // If the conditional has void type, make sure we return a null Value*.
4944      assert(!RHS && "LHS and RHS types must match");
4945      return nullptr;
4946    }
4947    return Builder.CreateSelect(CondV, LHS, RHS, "cond");
4948  }
4949
4950  // If the top of the logical operator nest, reset the MCDC temp to 0.
4951  if (CGF.MCDCLogOpStack.empty())
4952    CGF.maybeResetMCDCCondBitmap(condExpr);
4953
4954  llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
4955  llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
4956  llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
4957
4958  CodeGenFunction::ConditionalEvaluation eval(CGF);
4959  CGF.EmitBranchOnBoolExpr(condExpr, LHSBlock, RHSBlock,
4960                           CGF.getProfileCount(lhsExpr));
4961
4962  CGF.EmitBlock(LHSBlock);
4963
4964  // If the top of the logical operator nest, update the MCDC bitmap for the
4965  // ConditionalOperator prior to visiting its LHS and RHS blocks, since they
4966  // may also contain a boolean expression.
4967  if (CGF.MCDCLogOpStack.empty())
4968    CGF.maybeUpdateMCDCTestVectorBitmap(condExpr);
4969
4970  CGF.incrementProfileCounter(E);
4971  eval.begin(CGF);
4972  Value *LHS = Visit(lhsExpr);
4973  eval.end(CGF);
4974
4975  LHSBlock = Builder.GetInsertBlock();
4976  Builder.CreateBr(ContBlock);
4977
4978  CGF.EmitBlock(RHSBlock);
4979
4980  // If the top of the logical operator nest, update the MCDC bitmap for the
4981  // ConditionalOperator prior to visiting its LHS and RHS blocks, since they
4982  // may also contain a boolean expression.
4983  if (CGF.MCDCLogOpStack.empty())
4984    CGF.maybeUpdateMCDCTestVectorBitmap(condExpr);
4985
4986  eval.begin(CGF);
4987  Value *RHS = Visit(rhsExpr);
4988  eval.end(CGF);
4989
4990  RHSBlock = Builder.GetInsertBlock();
4991  CGF.EmitBlock(ContBlock);
4992
4993  // If the LHS or RHS is a throw expression, it will be legitimately null.
4994  if (!LHS)
4995    return RHS;
4996  if (!RHS)
4997    return LHS;
4998
4999  // Create a PHI node for the real part.
5000  llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), 2, "cond");
5001  PN->addIncoming(LHS, LHSBlock);
5002  PN->addIncoming(RHS, RHSBlock);
5003
5004  return PN;
5005}
5006
5007Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) {
5008  return Visit(E->getChosenSubExpr());
5009}
5010
5011Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
5012  QualType Ty = VE->getType();
5013
5014  if (Ty->isVariablyModifiedType())
5015    CGF.EmitVariablyModifiedType(Ty);
5016
5017  Address ArgValue = Address::invalid();
5018  Address ArgPtr = CGF.EmitVAArg(VE, ArgValue);
5019
5020  llvm::Type *ArgTy = ConvertType(VE->getType());
5021
5022  // If EmitVAArg fails, emit an error.
5023  if (!ArgPtr.isValid()) {
5024    CGF.ErrorUnsupported(VE, "va_arg expression");
5025    return llvm::UndefValue::get(ArgTy);
5026  }
5027
5028  // FIXME Volatility.
5029  llvm::Value *Val = Builder.CreateLoad(ArgPtr);
5030
5031  // If EmitVAArg promoted the type, we must truncate it.
5032  if (ArgTy != Val->getType()) {
5033    if (ArgTy->isPointerTy() && !Val->getType()->isPointerTy())
5034      Val = Builder.CreateIntToPtr(Val, ArgTy);
5035    else
5036      Val = Builder.CreateTrunc(Val, ArgTy);
5037  }
5038
5039  return Val;
5040}
5041
5042Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
5043  return CGF.EmitBlockLiteral(block);
5044}
5045
5046// Convert a vec3 to vec4, or vice versa.
5047static Value *ConvertVec3AndVec4(CGBuilderTy &Builder, CodeGenFunction &CGF,
5048                                 Value *Src, unsigned NumElementsDst) {
5049  static constexpr int Mask[] = {0, 1, 2, -1};
5050  return Builder.CreateShuffleVector(Src, llvm::ArrayRef(Mask, NumElementsDst));
5051}
5052
5053// Create cast instructions for converting LLVM value \p Src to LLVM type \p
5054// DstTy. \p Src has the same size as \p DstTy. Both are single value types
5055// but could be scalar or vectors of different lengths, and either can be
5056// pointer.
5057// There are 4 cases:
5058// 1. non-pointer -> non-pointer  : needs 1 bitcast
5059// 2. pointer -> pointer          : needs 1 bitcast or addrspacecast
5060// 3. pointer -> non-pointer
5061//   a) pointer -> intptr_t       : needs 1 ptrtoint
5062//   b) pointer -> non-intptr_t   : needs 1 ptrtoint then 1 bitcast
5063// 4. non-pointer -> pointer
5064//   a) intptr_t -> pointer       : needs 1 inttoptr
5065//   b) non-intptr_t -> pointer   : needs 1 bitcast then 1 inttoptr
5066// Note: for cases 3b and 4b two casts are required since LLVM casts do not
5067// allow casting directly between pointer types and non-integer non-pointer
5068// types.
5069static Value *createCastsForTypeOfSameSize(CGBuilderTy &Builder,
5070                                           const llvm::DataLayout &DL,
5071                                           Value *Src, llvm::Type *DstTy,
5072                                           StringRef Name = "") {
5073  auto SrcTy = Src->getType();
5074
5075  // Case 1.
5076  if (!SrcTy->isPointerTy() && !DstTy->isPointerTy())
5077    return Builder.CreateBitCast(Src, DstTy, Name);
5078
5079  // Case 2.
5080  if (SrcTy->isPointerTy() && DstTy->isPointerTy())
5081    return Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy, Name);
5082
5083  // Case 3.
5084  if (SrcTy->isPointerTy() && !DstTy->isPointerTy()) {
5085    // Case 3b.
5086    if (!DstTy->isIntegerTy())
5087      Src = Builder.CreatePtrToInt(Src, DL.getIntPtrType(SrcTy));
5088    // Cases 3a and 3b.
5089    return Builder.CreateBitOrPointerCast(Src, DstTy, Name);
5090  }
5091
5092  // Case 4b.
5093  if (!SrcTy->isIntegerTy())
5094    Src = Builder.CreateBitCast(Src, DL.getIntPtrType(DstTy));
5095  // Cases 4a and 4b.
5096  return Builder.CreateIntToPtr(Src, DstTy, Name);
5097}
5098
5099Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
5100  Value *Src  = CGF.EmitScalarExpr(E->getSrcExpr());
5101  llvm::Type *DstTy = ConvertType(E->getType());
5102
5103  llvm::Type *SrcTy = Src->getType();
5104  unsigned NumElementsSrc =
5105      isa<llvm::VectorType>(SrcTy)
5106          ? cast<llvm::FixedVectorType>(SrcTy)->getNumElements()
5107          : 0;
5108  unsigned NumElementsDst =
5109      isa<llvm::VectorType>(DstTy)
5110          ? cast<llvm::FixedVectorType>(DstTy)->getNumElements()
5111          : 0;
5112
5113  // Use bit vector expansion for ext_vector_type boolean vectors.
5114  if (E->getType()->isExtVectorBoolType())
5115    return CGF.emitBoolVecConversion(Src, NumElementsDst, "astype");
5116
5117  // Going from vec3 to non-vec3 is a special case and requires a shuffle
5118  // vector to get a vec4, then a bitcast if the target type is different.
5119  if (NumElementsSrc == 3 && NumElementsDst != 3) {
5120    Src = ConvertVec3AndVec4(Builder, CGF, Src, 4);
5121    Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
5122                                       DstTy);
5123
5124    Src->setName("astype");
5125    return Src;
5126  }
5127
5128  // Going from non-vec3 to vec3 is a special case and requires a bitcast
5129  // to vec4 if the original type is not vec4, then a shuffle vector to
5130  // get a vec3.
5131  if (NumElementsSrc != 3 && NumElementsDst == 3) {
5132    auto *Vec4Ty = llvm::FixedVectorType::get(
5133        cast<llvm::VectorType>(DstTy)->getElementType(), 4);
5134    Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
5135                                       Vec4Ty);
5136
5137    Src = ConvertVec3AndVec4(Builder, CGF, Src, 3);
5138    Src->setName("astype");
5139    return Src;
5140  }
5141
5142  return createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(),
5143                                      Src, DstTy, "astype");
5144}
5145
5146Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) {
5147  return CGF.EmitAtomicExpr(E).getScalarVal();
5148}
5149
5150//===----------------------------------------------------------------------===//
5151//                         Entry Point into this File
5152//===----------------------------------------------------------------------===//
5153
5154/// Emit the computation of the specified expression of scalar type, ignoring
5155/// the result.
5156Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) {
5157  assert(E && hasScalarEvaluationKind(E->getType()) &&
5158         "Invalid scalar expression to emit");
5159
5160  return ScalarExprEmitter(*this, IgnoreResultAssign)
5161      .Visit(const_cast<Expr *>(E));
5162}
5163
5164/// Emit a conversion from the specified type to the specified destination type,
5165/// both of which are LLVM scalar types.
5166Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy,
5167                                             QualType DstTy,
5168                                             SourceLocation Loc) {
5169  assert(hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) &&
5170         "Invalid scalar expression to emit");
5171  return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy, Loc);
5172}
5173
5174/// Emit a conversion from the specified complex type to the specified
5175/// destination type, where the destination type is an LLVM scalar type.
5176Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src,
5177                                                      QualType SrcTy,
5178                                                      QualType DstTy,
5179                                                      SourceLocation Loc) {
5180  assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) &&
5181         "Invalid complex -> scalar conversion");
5182  return ScalarExprEmitter(*this)
5183      .EmitComplexToScalarConversion(Src, SrcTy, DstTy, Loc);
5184}
5185
5186
5187Value *
5188CodeGenFunction::EmitPromotedScalarExpr(const Expr *E,
5189                                        QualType PromotionType) {
5190  if (!PromotionType.isNull())
5191    return ScalarExprEmitter(*this).EmitPromoted(E, PromotionType);
5192  else
5193    return ScalarExprEmitter(*this).Visit(const_cast<Expr *>(E));
5194}
5195
5196
5197llvm::Value *CodeGenFunction::
5198EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
5199                        bool isInc, bool isPre) {
5200  return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre);
5201}
5202
5203LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
5204  // object->isa or (*object).isa
5205  // Generate code as for: *(Class*)object
5206
5207  Expr *BaseExpr = E->getBase();
5208  Address Addr = Address::invalid();
5209  if (BaseExpr->isPRValue()) {
5210    llvm::Type *BaseTy =
5211        ConvertTypeForMem(BaseExpr->getType()->getPointeeType());
5212    Addr = Address(EmitScalarExpr(BaseExpr), BaseTy, getPointerAlign());
5213  } else {
5214    Addr = EmitLValue(BaseExpr).getAddress(*this);
5215  }
5216
5217  // Cast the address to Class*.
5218  Addr = Addr.withElementType(ConvertType(E->getType()));
5219  return MakeAddrLValue(Addr, E->getType());
5220}
5221
5222
5223LValue CodeGenFunction::EmitCompoundAssignmentLValue(
5224                                            const CompoundAssignOperator *E) {
5225  ScalarExprEmitter Scalar(*this);
5226  Value *Result = nullptr;
5227  switch (E->getOpcode()) {
5228#define COMPOUND_OP(Op)                                                       \
5229    case BO_##Op##Assign:                                                     \
5230      return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \
5231                                             Result)
5232  COMPOUND_OP(Mul);
5233  COMPOUND_OP(Div);
5234  COMPOUND_OP(Rem);
5235  COMPOUND_OP(Add);
5236  COMPOUND_OP(Sub);
5237  COMPOUND_OP(Shl);
5238  COMPOUND_OP(Shr);
5239  COMPOUND_OP(And);
5240  COMPOUND_OP(Xor);
5241  COMPOUND_OP(Or);
5242#undef COMPOUND_OP
5243
5244  case BO_PtrMemD:
5245  case BO_PtrMemI:
5246  case BO_Mul:
5247  case BO_Div:
5248  case BO_Rem:
5249  case BO_Add:
5250  case BO_Sub:
5251  case BO_Shl:
5252  case BO_Shr:
5253  case BO_LT:
5254  case BO_GT:
5255  case BO_LE:
5256  case BO_GE:
5257  case BO_EQ:
5258  case BO_NE:
5259  case BO_Cmp:
5260  case BO_And:
5261  case BO_Xor:
5262  case BO_Or:
5263  case BO_LAnd:
5264  case BO_LOr:
5265  case BO_Assign:
5266  case BO_Comma:
5267    llvm_unreachable("Not valid compound assignment operators");
5268  }
5269
5270  llvm_unreachable("Unhandled compound assignment operator");
5271}
5272
5273struct GEPOffsetAndOverflow {
5274  // The total (signed) byte offset for the GEP.
5275  llvm::Value *TotalOffset;
5276  // The offset overflow flag - true if the total offset overflows.
5277  llvm::Value *OffsetOverflows;
5278};
5279
5280/// Evaluate given GEPVal, which is either an inbounds GEP, or a constant,
5281/// and compute the total offset it applies from it's base pointer BasePtr.
5282/// Returns offset in bytes and a boolean flag whether an overflow happened
5283/// during evaluation.
5284static GEPOffsetAndOverflow EmitGEPOffsetInBytes(Value *BasePtr, Value *GEPVal,
5285                                                 llvm::LLVMContext &VMContext,
5286                                                 CodeGenModule &CGM,
5287                                                 CGBuilderTy &Builder) {
5288  const auto &DL = CGM.getDataLayout();
5289
5290  // The total (signed) byte offset for the GEP.
5291  llvm::Value *TotalOffset = nullptr;
5292
5293  // Was the GEP already reduced to a constant?
5294  if (isa<llvm::Constant>(GEPVal)) {
5295    // Compute the offset by casting both pointers to integers and subtracting:
5296    // GEPVal = BasePtr + ptr(Offset) <--> Offset = int(GEPVal) - int(BasePtr)
5297    Value *BasePtr_int =
5298        Builder.CreatePtrToInt(BasePtr, DL.getIntPtrType(BasePtr->getType()));
5299    Value *GEPVal_int =
5300        Builder.CreatePtrToInt(GEPVal, DL.getIntPtrType(GEPVal->getType()));
5301    TotalOffset = Builder.CreateSub(GEPVal_int, BasePtr_int);
5302    return {TotalOffset, /*OffsetOverflows=*/Builder.getFalse()};
5303  }
5304
5305  auto *GEP = cast<llvm::GEPOperator>(GEPVal);
5306  assert(GEP->getPointerOperand() == BasePtr &&
5307         "BasePtr must be the base of the GEP.");
5308  assert(GEP->isInBounds() && "Expected inbounds GEP");
5309
5310  auto *IntPtrTy = DL.getIntPtrType(GEP->getPointerOperandType());
5311
5312  // Grab references to the signed add/mul overflow intrinsics for intptr_t.
5313  auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
5314  auto *SAddIntrinsic =
5315      CGM.getIntrinsic(llvm::Intrinsic::sadd_with_overflow, IntPtrTy);
5316  auto *SMulIntrinsic =
5317      CGM.getIntrinsic(llvm::Intrinsic::smul_with_overflow, IntPtrTy);
5318
5319  // The offset overflow flag - true if the total offset overflows.
5320  llvm::Value *OffsetOverflows = Builder.getFalse();
5321
5322  /// Return the result of the given binary operation.
5323  auto eval = [&](BinaryOperator::Opcode Opcode, llvm::Value *LHS,
5324                  llvm::Value *RHS) -> llvm::Value * {
5325    assert((Opcode == BO_Add || Opcode == BO_Mul) && "Can't eval binop");
5326
5327    // If the operands are constants, return a constant result.
5328    if (auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS)) {
5329      if (auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS)) {
5330        llvm::APInt N;
5331        bool HasOverflow = mayHaveIntegerOverflow(LHSCI, RHSCI, Opcode,
5332                                                  /*Signed=*/true, N);
5333        if (HasOverflow)
5334          OffsetOverflows = Builder.getTrue();
5335        return llvm::ConstantInt::get(VMContext, N);
5336      }
5337    }
5338
5339    // Otherwise, compute the result with checked arithmetic.
5340    auto *ResultAndOverflow = Builder.CreateCall(
5341        (Opcode == BO_Add) ? SAddIntrinsic : SMulIntrinsic, {LHS, RHS});
5342    OffsetOverflows = Builder.CreateOr(
5343        Builder.CreateExtractValue(ResultAndOverflow, 1), OffsetOverflows);
5344    return Builder.CreateExtractValue(ResultAndOverflow, 0);
5345  };
5346
5347  // Determine the total byte offset by looking at each GEP operand.
5348  for (auto GTI = llvm::gep_type_begin(GEP), GTE = llvm::gep_type_end(GEP);
5349       GTI != GTE; ++GTI) {
5350    llvm::Value *LocalOffset;
5351    auto *Index = GTI.getOperand();
5352    // Compute the local offset contributed by this indexing step:
5353    if (auto *STy = GTI.getStructTypeOrNull()) {
5354      // For struct indexing, the local offset is the byte position of the
5355      // specified field.
5356      unsigned FieldNo = cast<llvm::ConstantInt>(Index)->getZExtValue();
5357      LocalOffset = llvm::ConstantInt::get(
5358          IntPtrTy, DL.getStructLayout(STy)->getElementOffset(FieldNo));
5359    } else {
5360      // Otherwise this is array-like indexing. The local offset is the index
5361      // multiplied by the element size.
5362      auto *ElementSize =
5363          llvm::ConstantInt::get(IntPtrTy, GTI.getSequentialElementStride(DL));
5364      auto *IndexS = Builder.CreateIntCast(Index, IntPtrTy, /*isSigned=*/true);
5365      LocalOffset = eval(BO_Mul, ElementSize, IndexS);
5366    }
5367
5368    // If this is the first offset, set it as the total offset. Otherwise, add
5369    // the local offset into the running total.
5370    if (!TotalOffset || TotalOffset == Zero)
5371      TotalOffset = LocalOffset;
5372    else
5373      TotalOffset = eval(BO_Add, TotalOffset, LocalOffset);
5374  }
5375
5376  return {TotalOffset, OffsetOverflows};
5377}
5378
5379Value *
5380CodeGenFunction::EmitCheckedInBoundsGEP(llvm::Type *ElemTy, Value *Ptr,
5381                                        ArrayRef<Value *> IdxList,
5382                                        bool SignedIndices, bool IsSubtraction,
5383                                        SourceLocation Loc, const Twine &Name) {
5384  llvm::Type *PtrTy = Ptr->getType();
5385  Value *GEPVal = Builder.CreateInBoundsGEP(ElemTy, Ptr, IdxList, Name);
5386
5387  // If the pointer overflow sanitizer isn't enabled, do nothing.
5388  if (!SanOpts.has(SanitizerKind::PointerOverflow))
5389    return GEPVal;
5390
5391  // Perform nullptr-and-offset check unless the nullptr is defined.
5392  bool PerformNullCheck = !NullPointerIsDefined(
5393      Builder.GetInsertBlock()->getParent(), PtrTy->getPointerAddressSpace());
5394  // Check for overflows unless the GEP got constant-folded,
5395  // and only in the default address space
5396  bool PerformOverflowCheck =
5397      !isa<llvm::Constant>(GEPVal) && PtrTy->getPointerAddressSpace() == 0;
5398
5399  if (!(PerformNullCheck || PerformOverflowCheck))
5400    return GEPVal;
5401
5402  const auto &DL = CGM.getDataLayout();
5403
5404  SanitizerScope SanScope(this);
5405  llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy);
5406
5407  GEPOffsetAndOverflow EvaluatedGEP =
5408      EmitGEPOffsetInBytes(Ptr, GEPVal, getLLVMContext(), CGM, Builder);
5409
5410  assert((!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) ||
5411          EvaluatedGEP.OffsetOverflows == Builder.getFalse()) &&
5412         "If the offset got constant-folded, we don't expect that there was an "
5413         "overflow.");
5414
5415  auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
5416
5417  // Common case: if the total offset is zero, and we are using C++ semantics,
5418  // where nullptr+0 is defined, don't emit a check.
5419  if (EvaluatedGEP.TotalOffset == Zero && CGM.getLangOpts().CPlusPlus)
5420    return GEPVal;
5421
5422  // Now that we've computed the total offset, add it to the base pointer (with
5423  // wrapping semantics).
5424  auto *IntPtr = Builder.CreatePtrToInt(Ptr, IntPtrTy);
5425  auto *ComputedGEP = Builder.CreateAdd(IntPtr, EvaluatedGEP.TotalOffset);
5426
5427  llvm::SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks;
5428
5429  if (PerformNullCheck) {
5430    // In C++, if the base pointer evaluates to a null pointer value,
5431    // the only valid  pointer this inbounds GEP can produce is also
5432    // a null pointer, so the offset must also evaluate to zero.
5433    // Likewise, if we have non-zero base pointer, we can not get null pointer
5434    // as a result, so the offset can not be -intptr_t(BasePtr).
5435    // In other words, both pointers are either null, or both are non-null,
5436    // or the behaviour is undefined.
5437    //
5438    // C, however, is more strict in this regard, and gives more
5439    // optimization opportunities: in C, additionally, nullptr+0 is undefined.
5440    // So both the input to the 'gep inbounds' AND the output must not be null.
5441    auto *BaseIsNotNullptr = Builder.CreateIsNotNull(Ptr);
5442    auto *ResultIsNotNullptr = Builder.CreateIsNotNull(ComputedGEP);
5443    auto *Valid =
5444        CGM.getLangOpts().CPlusPlus
5445            ? Builder.CreateICmpEQ(BaseIsNotNullptr, ResultIsNotNullptr)
5446            : Builder.CreateAnd(BaseIsNotNullptr, ResultIsNotNullptr);
5447    Checks.emplace_back(Valid, SanitizerKind::PointerOverflow);
5448  }
5449
5450  if (PerformOverflowCheck) {
5451    // The GEP is valid if:
5452    // 1) The total offset doesn't overflow, and
5453    // 2) The sign of the difference between the computed address and the base
5454    // pointer matches the sign of the total offset.
5455    llvm::Value *ValidGEP;
5456    auto *NoOffsetOverflow = Builder.CreateNot(EvaluatedGEP.OffsetOverflows);
5457    if (SignedIndices) {
5458      // GEP is computed as `unsigned base + signed offset`, therefore:
5459      // * If offset was positive, then the computed pointer can not be
5460      //   [unsigned] less than the base pointer, unless it overflowed.
5461      // * If offset was negative, then the computed pointer can not be
5462      //   [unsigned] greater than the bas pointere, unless it overflowed.
5463      auto *PosOrZeroValid = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
5464      auto *PosOrZeroOffset =
5465          Builder.CreateICmpSGE(EvaluatedGEP.TotalOffset, Zero);
5466      llvm::Value *NegValid = Builder.CreateICmpULT(ComputedGEP, IntPtr);
5467      ValidGEP =
5468          Builder.CreateSelect(PosOrZeroOffset, PosOrZeroValid, NegValid);
5469    } else if (!IsSubtraction) {
5470      // GEP is computed as `unsigned base + unsigned offset`,  therefore the
5471      // computed pointer can not be [unsigned] less than base pointer,
5472      // unless there was an overflow.
5473      // Equivalent to `@llvm.uadd.with.overflow(%base, %offset)`.
5474      ValidGEP = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
5475    } else {
5476      // GEP is computed as `unsigned base - unsigned offset`, therefore the
5477      // computed pointer can not be [unsigned] greater than base pointer,
5478      // unless there was an overflow.
5479      // Equivalent to `@llvm.usub.with.overflow(%base, sub(0, %offset))`.
5480      ValidGEP = Builder.CreateICmpULE(ComputedGEP, IntPtr);
5481    }
5482    ValidGEP = Builder.CreateAnd(ValidGEP, NoOffsetOverflow);
5483    Checks.emplace_back(ValidGEP, SanitizerKind::PointerOverflow);
5484  }
5485
5486  assert(!Checks.empty() && "Should have produced some checks.");
5487
5488  llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc)};
5489  // Pass the computed GEP to the runtime to avoid emitting poisoned arguments.
5490  llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP};
5491  EmitCheck(Checks, SanitizerHandler::PointerOverflow, StaticArgs, DynamicArgs);
5492
5493  return GEPVal;
5494}
5495