1//===- SemaChecking.cpp - Extra Semantic Checking -------------------------===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8// 9// This file implements extra semantic analysis beyond what is enforced 10// by the C type system. 11// 12//===----------------------------------------------------------------------===// 13 14#include "clang/AST/APValue.h" 15#include "clang/AST/ASTContext.h" 16#include "clang/AST/Attr.h" 17#include "clang/AST/AttrIterator.h" 18#include "clang/AST/CharUnits.h" 19#include "clang/AST/Decl.h" 20#include "clang/AST/DeclBase.h" 21#include "clang/AST/DeclCXX.h" 22#include "clang/AST/DeclObjC.h" 23#include "clang/AST/DeclarationName.h" 24#include "clang/AST/EvaluatedExprVisitor.h" 25#include "clang/AST/Expr.h" 26#include "clang/AST/ExprCXX.h" 27#include "clang/AST/ExprObjC.h" 28#include "clang/AST/ExprOpenMP.h" 29#include "clang/AST/FormatString.h" 30#include "clang/AST/NSAPI.h" 31#include "clang/AST/NonTrivialTypeVisitor.h" 32#include "clang/AST/OperationKinds.h" 33#include "clang/AST/RecordLayout.h" 34#include "clang/AST/Stmt.h" 35#include "clang/AST/TemplateBase.h" 36#include "clang/AST/Type.h" 37#include "clang/AST/TypeLoc.h" 38#include "clang/AST/UnresolvedSet.h" 39#include "clang/Basic/AddressSpaces.h" 40#include "clang/Basic/CharInfo.h" 41#include "clang/Basic/Diagnostic.h" 42#include "clang/Basic/IdentifierTable.h" 43#include "clang/Basic/LLVM.h" 44#include "clang/Basic/LangOptions.h" 45#include "clang/Basic/OpenCLOptions.h" 46#include "clang/Basic/OperatorKinds.h" 47#include "clang/Basic/PartialDiagnostic.h" 48#include "clang/Basic/SourceLocation.h" 49#include "clang/Basic/SourceManager.h" 50#include "clang/Basic/Specifiers.h" 51#include "clang/Basic/SyncScope.h" 52#include "clang/Basic/TargetBuiltins.h" 53#include "clang/Basic/TargetCXXABI.h" 54#include "clang/Basic/TargetInfo.h" 55#include "clang/Basic/TypeTraits.h" 56#include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering. 57#include "clang/Sema/Initialization.h" 58#include "clang/Sema/Lookup.h" 59#include "clang/Sema/Ownership.h" 60#include "clang/Sema/Scope.h" 61#include "clang/Sema/ScopeInfo.h" 62#include "clang/Sema/Sema.h" 63#include "clang/Sema/SemaInternal.h" 64#include "llvm/ADT/APFloat.h" 65#include "llvm/ADT/APInt.h" 66#include "llvm/ADT/APSInt.h" 67#include "llvm/ADT/ArrayRef.h" 68#include "llvm/ADT/DenseMap.h" 69#include "llvm/ADT/FoldingSet.h" 70#include "llvm/ADT/STLExtras.h" 71#include "llvm/ADT/SmallBitVector.h" 72#include "llvm/ADT/SmallPtrSet.h" 73#include "llvm/ADT/SmallString.h" 74#include "llvm/ADT/SmallVector.h" 75#include "llvm/ADT/StringRef.h" 76#include "llvm/ADT/StringSet.h" 77#include "llvm/ADT/StringSwitch.h" 78#include "llvm/ADT/Triple.h" 79#include "llvm/Support/AtomicOrdering.h" 80#include "llvm/Support/Casting.h" 81#include "llvm/Support/Compiler.h" 82#include "llvm/Support/ConvertUTF.h" 83#include "llvm/Support/ErrorHandling.h" 84#include "llvm/Support/Format.h" 85#include "llvm/Support/Locale.h" 86#include "llvm/Support/MathExtras.h" 87#include "llvm/Support/SaveAndRestore.h" 88#include "llvm/Support/raw_ostream.h" 89#include <algorithm> 90#include <bitset> 91#include <cassert> 92#include <cctype> 93#include <cstddef> 94#include <cstdint> 95#include <functional> 96#include <limits> 97#include <optional> 98#include <string> 99#include <tuple> 100#include <utility> 101 102using namespace clang; 103using namespace sema; 104 105SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL, 106 unsigned ByteNo) const { 107 return SL->getLocationOfByte(ByteNo, getSourceManager(), LangOpts, 108 Context.getTargetInfo()); 109} 110 111static constexpr unsigned short combineFAPK(Sema::FormatArgumentPassingKind A, 112 Sema::FormatArgumentPassingKind B) { 113 return (A << 8) | B; 114} 115 116/// Checks that a call expression's argument count is at least the desired 117/// number. This is useful when doing custom type-checking on a variadic 118/// function. Returns true on error. 119static bool checkArgCountAtLeast(Sema &S, CallExpr *Call, 120 unsigned MinArgCount) { 121 unsigned ArgCount = Call->getNumArgs(); 122 if (ArgCount >= MinArgCount) 123 return false; 124 125 return S.Diag(Call->getEndLoc(), diag::err_typecheck_call_too_few_args) 126 << 0 /*function call*/ << MinArgCount << ArgCount 127 << Call->getSourceRange(); 128} 129 130/// Checks that a call expression's argument count is at most the desired 131/// number. This is useful when doing custom type-checking on a variadic 132/// function. Returns true on error. 133static bool checkArgCountAtMost(Sema &S, CallExpr *Call, unsigned MaxArgCount) { 134 unsigned ArgCount = Call->getNumArgs(); 135 if (ArgCount <= MaxArgCount) 136 return false; 137 return S.Diag(Call->getEndLoc(), 138 diag::err_typecheck_call_too_many_args_at_most) 139 << 0 /*function call*/ << MaxArgCount << ArgCount 140 << Call->getSourceRange(); 141} 142 143/// Checks that a call expression's argument count is in the desired range. This 144/// is useful when doing custom type-checking on a variadic function. Returns 145/// true on error. 146static bool checkArgCountRange(Sema &S, CallExpr *Call, unsigned MinArgCount, 147 unsigned MaxArgCount) { 148 return checkArgCountAtLeast(S, Call, MinArgCount) || 149 checkArgCountAtMost(S, Call, MaxArgCount); 150} 151 152/// Checks that a call expression's argument count is the desired number. 153/// This is useful when doing custom type-checking. Returns true on error. 154static bool checkArgCount(Sema &S, CallExpr *Call, unsigned DesiredArgCount) { 155 unsigned ArgCount = Call->getNumArgs(); 156 if (ArgCount == DesiredArgCount) 157 return false; 158 159 if (checkArgCountAtLeast(S, Call, DesiredArgCount)) 160 return true; 161 assert(ArgCount > DesiredArgCount && "should have diagnosed this"); 162 163 // Highlight all the excess arguments. 164 SourceRange Range(Call->getArg(DesiredArgCount)->getBeginLoc(), 165 Call->getArg(ArgCount - 1)->getEndLoc()); 166 167 return S.Diag(Range.getBegin(), diag::err_typecheck_call_too_many_args) 168 << 0 /*function call*/ << DesiredArgCount << ArgCount 169 << Call->getArg(1)->getSourceRange(); 170} 171 172static bool convertArgumentToType(Sema &S, Expr *&Value, QualType Ty) { 173 if (Value->isTypeDependent()) 174 return false; 175 176 InitializedEntity Entity = 177 InitializedEntity::InitializeParameter(S.Context, Ty, false); 178 ExprResult Result = 179 S.PerformCopyInitialization(Entity, SourceLocation(), Value); 180 if (Result.isInvalid()) 181 return true; 182 Value = Result.get(); 183 return false; 184} 185 186/// Check that the first argument to __builtin_annotation is an integer 187/// and the second argument is a non-wide string literal. 188static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) { 189 if (checkArgCount(S, TheCall, 2)) 190 return true; 191 192 // First argument should be an integer. 193 Expr *ValArg = TheCall->getArg(0); 194 QualType Ty = ValArg->getType(); 195 if (!Ty->isIntegerType()) { 196 S.Diag(ValArg->getBeginLoc(), diag::err_builtin_annotation_first_arg) 197 << ValArg->getSourceRange(); 198 return true; 199 } 200 201 // Second argument should be a constant string. 202 Expr *StrArg = TheCall->getArg(1)->IgnoreParenCasts(); 203 StringLiteral *Literal = dyn_cast<StringLiteral>(StrArg); 204 if (!Literal || !Literal->isOrdinary()) { 205 S.Diag(StrArg->getBeginLoc(), diag::err_builtin_annotation_second_arg) 206 << StrArg->getSourceRange(); 207 return true; 208 } 209 210 TheCall->setType(Ty); 211 return false; 212} 213 214static bool SemaBuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) { 215 // We need at least one argument. 216 if (TheCall->getNumArgs() < 1) { 217 S.Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 218 << 0 << 1 << TheCall->getNumArgs() 219 << TheCall->getCallee()->getSourceRange(); 220 return true; 221 } 222 223 // All arguments should be wide string literals. 224 for (Expr *Arg : TheCall->arguments()) { 225 auto *Literal = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts()); 226 if (!Literal || !Literal->isWide()) { 227 S.Diag(Arg->getBeginLoc(), diag::err_msvc_annotation_wide_str) 228 << Arg->getSourceRange(); 229 return true; 230 } 231 } 232 233 return false; 234} 235 236/// Check that the argument to __builtin_addressof is a glvalue, and set the 237/// result type to the corresponding pointer type. 238static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) { 239 if (checkArgCount(S, TheCall, 1)) 240 return true; 241 242 ExprResult Arg(TheCall->getArg(0)); 243 QualType ResultType = S.CheckAddressOfOperand(Arg, TheCall->getBeginLoc()); 244 if (ResultType.isNull()) 245 return true; 246 247 TheCall->setArg(0, Arg.get()); 248 TheCall->setType(ResultType); 249 return false; 250} 251 252/// Check that the argument to __builtin_function_start is a function. 253static bool SemaBuiltinFunctionStart(Sema &S, CallExpr *TheCall) { 254 if (checkArgCount(S, TheCall, 1)) 255 return true; 256 257 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(0)); 258 if (Arg.isInvalid()) 259 return true; 260 261 TheCall->setArg(0, Arg.get()); 262 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>( 263 Arg.get()->getAsBuiltinConstantDeclRef(S.getASTContext())); 264 265 if (!FD) { 266 S.Diag(TheCall->getBeginLoc(), diag::err_function_start_invalid_type) 267 << TheCall->getSourceRange(); 268 return true; 269 } 270 271 return !S.checkAddressOfFunctionIsAvailable(FD, /*Complain=*/true, 272 TheCall->getBeginLoc()); 273} 274 275/// Check the number of arguments and set the result type to 276/// the argument type. 277static bool SemaBuiltinPreserveAI(Sema &S, CallExpr *TheCall) { 278 if (checkArgCount(S, TheCall, 1)) 279 return true; 280 281 TheCall->setType(TheCall->getArg(0)->getType()); 282 return false; 283} 284 285/// Check that the value argument for __builtin_is_aligned(value, alignment) and 286/// __builtin_aligned_{up,down}(value, alignment) is an integer or a pointer 287/// type (but not a function pointer) and that the alignment is a power-of-two. 288static bool SemaBuiltinAlignment(Sema &S, CallExpr *TheCall, unsigned ID) { 289 if (checkArgCount(S, TheCall, 2)) 290 return true; 291 292 clang::Expr *Source = TheCall->getArg(0); 293 bool IsBooleanAlignBuiltin = ID == Builtin::BI__builtin_is_aligned; 294 295 auto IsValidIntegerType = [](QualType Ty) { 296 return Ty->isIntegerType() && !Ty->isEnumeralType() && !Ty->isBooleanType(); 297 }; 298 QualType SrcTy = Source->getType(); 299 // We should also be able to use it with arrays (but not functions!). 300 if (SrcTy->canDecayToPointerType() && SrcTy->isArrayType()) { 301 SrcTy = S.Context.getDecayedType(SrcTy); 302 } 303 if ((!SrcTy->isPointerType() && !IsValidIntegerType(SrcTy)) || 304 SrcTy->isFunctionPointerType()) { 305 // FIXME: this is not quite the right error message since we don't allow 306 // floating point types, or member pointers. 307 S.Diag(Source->getExprLoc(), diag::err_typecheck_expect_scalar_operand) 308 << SrcTy; 309 return true; 310 } 311 312 clang::Expr *AlignOp = TheCall->getArg(1); 313 if (!IsValidIntegerType(AlignOp->getType())) { 314 S.Diag(AlignOp->getExprLoc(), diag::err_typecheck_expect_int) 315 << AlignOp->getType(); 316 return true; 317 } 318 Expr::EvalResult AlignResult; 319 unsigned MaxAlignmentBits = S.Context.getIntWidth(SrcTy) - 1; 320 // We can't check validity of alignment if it is value dependent. 321 if (!AlignOp->isValueDependent() && 322 AlignOp->EvaluateAsInt(AlignResult, S.Context, 323 Expr::SE_AllowSideEffects)) { 324 llvm::APSInt AlignValue = AlignResult.Val.getInt(); 325 llvm::APSInt MaxValue( 326 llvm::APInt::getOneBitSet(MaxAlignmentBits + 1, MaxAlignmentBits)); 327 if (AlignValue < 1) { 328 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_small) << 1; 329 return true; 330 } 331 if (llvm::APSInt::compareValues(AlignValue, MaxValue) > 0) { 332 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_big) 333 << toString(MaxValue, 10); 334 return true; 335 } 336 if (!AlignValue.isPowerOf2()) { 337 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_not_power_of_two); 338 return true; 339 } 340 if (AlignValue == 1) { 341 S.Diag(AlignOp->getExprLoc(), diag::warn_alignment_builtin_useless) 342 << IsBooleanAlignBuiltin; 343 } 344 } 345 346 ExprResult SrcArg = S.PerformCopyInitialization( 347 InitializedEntity::InitializeParameter(S.Context, SrcTy, false), 348 SourceLocation(), Source); 349 if (SrcArg.isInvalid()) 350 return true; 351 TheCall->setArg(0, SrcArg.get()); 352 ExprResult AlignArg = 353 S.PerformCopyInitialization(InitializedEntity::InitializeParameter( 354 S.Context, AlignOp->getType(), false), 355 SourceLocation(), AlignOp); 356 if (AlignArg.isInvalid()) 357 return true; 358 TheCall->setArg(1, AlignArg.get()); 359 // For align_up/align_down, the return type is the same as the (potentially 360 // decayed) argument type including qualifiers. For is_aligned(), the result 361 // is always bool. 362 TheCall->setType(IsBooleanAlignBuiltin ? S.Context.BoolTy : SrcTy); 363 return false; 364} 365 366static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall, 367 unsigned BuiltinID) { 368 if (checkArgCount(S, TheCall, 3)) 369 return true; 370 371 // First two arguments should be integers. 372 for (unsigned I = 0; I < 2; ++I) { 373 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(I)); 374 if (Arg.isInvalid()) return true; 375 TheCall->setArg(I, Arg.get()); 376 377 QualType Ty = Arg.get()->getType(); 378 if (!Ty->isIntegerType()) { 379 S.Diag(Arg.get()->getBeginLoc(), diag::err_overflow_builtin_must_be_int) 380 << Ty << Arg.get()->getSourceRange(); 381 return true; 382 } 383 } 384 385 // Third argument should be a pointer to a non-const integer. 386 // IRGen correctly handles volatile, restrict, and address spaces, and 387 // the other qualifiers aren't possible. 388 { 389 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(2)); 390 if (Arg.isInvalid()) return true; 391 TheCall->setArg(2, Arg.get()); 392 393 QualType Ty = Arg.get()->getType(); 394 const auto *PtrTy = Ty->getAs<PointerType>(); 395 if (!PtrTy || 396 !PtrTy->getPointeeType()->isIntegerType() || 397 PtrTy->getPointeeType().isConstQualified()) { 398 S.Diag(Arg.get()->getBeginLoc(), 399 diag::err_overflow_builtin_must_be_ptr_int) 400 << Ty << Arg.get()->getSourceRange(); 401 return true; 402 } 403 } 404 405 // Disallow signed bit-precise integer args larger than 128 bits to mul 406 // function until we improve backend support. 407 if (BuiltinID == Builtin::BI__builtin_mul_overflow) { 408 for (unsigned I = 0; I < 3; ++I) { 409 const auto Arg = TheCall->getArg(I); 410 // Third argument will be a pointer. 411 auto Ty = I < 2 ? Arg->getType() : Arg->getType()->getPointeeType(); 412 if (Ty->isBitIntType() && Ty->isSignedIntegerType() && 413 S.getASTContext().getIntWidth(Ty) > 128) 414 return S.Diag(Arg->getBeginLoc(), 415 diag::err_overflow_builtin_bit_int_max_size) 416 << 128; 417 } 418 } 419 420 return false; 421} 422 423namespace { 424struct BuiltinDumpStructGenerator { 425 Sema &S; 426 CallExpr *TheCall; 427 SourceLocation Loc = TheCall->getBeginLoc(); 428 SmallVector<Expr *, 32> Actions; 429 DiagnosticErrorTrap ErrorTracker; 430 PrintingPolicy Policy; 431 432 BuiltinDumpStructGenerator(Sema &S, CallExpr *TheCall) 433 : S(S), TheCall(TheCall), ErrorTracker(S.getDiagnostics()), 434 Policy(S.Context.getPrintingPolicy()) { 435 Policy.AnonymousTagLocations = false; 436 } 437 438 Expr *makeOpaqueValueExpr(Expr *Inner) { 439 auto *OVE = new (S.Context) 440 OpaqueValueExpr(Loc, Inner->getType(), Inner->getValueKind(), 441 Inner->getObjectKind(), Inner); 442 Actions.push_back(OVE); 443 return OVE; 444 } 445 446 Expr *getStringLiteral(llvm::StringRef Str) { 447 Expr *Lit = S.Context.getPredefinedStringLiteralFromCache(Str); 448 // Wrap the literal in parentheses to attach a source location. 449 return new (S.Context) ParenExpr(Loc, Loc, Lit); 450 } 451 452 bool callPrintFunction(llvm::StringRef Format, 453 llvm::ArrayRef<Expr *> Exprs = {}) { 454 SmallVector<Expr *, 8> Args; 455 assert(TheCall->getNumArgs() >= 2); 456 Args.reserve((TheCall->getNumArgs() - 2) + /*Format*/ 1 + Exprs.size()); 457 Args.assign(TheCall->arg_begin() + 2, TheCall->arg_end()); 458 Args.push_back(getStringLiteral(Format)); 459 Args.insert(Args.end(), Exprs.begin(), Exprs.end()); 460 461 // Register a note to explain why we're performing the call. 462 Sema::CodeSynthesisContext Ctx; 463 Ctx.Kind = Sema::CodeSynthesisContext::BuildingBuiltinDumpStructCall; 464 Ctx.PointOfInstantiation = Loc; 465 Ctx.CallArgs = Args.data(); 466 Ctx.NumCallArgs = Args.size(); 467 S.pushCodeSynthesisContext(Ctx); 468 469 ExprResult RealCall = 470 S.BuildCallExpr(/*Scope=*/nullptr, TheCall->getArg(1), 471 TheCall->getBeginLoc(), Args, TheCall->getRParenLoc()); 472 473 S.popCodeSynthesisContext(); 474 if (!RealCall.isInvalid()) 475 Actions.push_back(RealCall.get()); 476 // Bail out if we've hit any errors, even if we managed to build the 477 // call. We don't want to produce more than one error. 478 return RealCall.isInvalid() || ErrorTracker.hasErrorOccurred(); 479 } 480 481 Expr *getIndentString(unsigned Depth) { 482 if (!Depth) 483 return nullptr; 484 485 llvm::SmallString<32> Indent; 486 Indent.resize(Depth * Policy.Indentation, ' '); 487 return getStringLiteral(Indent); 488 } 489 490 Expr *getTypeString(QualType T) { 491 return getStringLiteral(T.getAsString(Policy)); 492 } 493 494 bool appendFormatSpecifier(QualType T, llvm::SmallVectorImpl<char> &Str) { 495 llvm::raw_svector_ostream OS(Str); 496 497 // Format 'bool', 'char', 'signed char', 'unsigned char' as numbers, rather 498 // than trying to print a single character. 499 if (auto *BT = T->getAs<BuiltinType>()) { 500 switch (BT->getKind()) { 501 case BuiltinType::Bool: 502 OS << "%d"; 503 return true; 504 case BuiltinType::Char_U: 505 case BuiltinType::UChar: 506 OS << "%hhu"; 507 return true; 508 case BuiltinType::Char_S: 509 case BuiltinType::SChar: 510 OS << "%hhd"; 511 return true; 512 default: 513 break; 514 } 515 } 516 517 analyze_printf::PrintfSpecifier Specifier; 518 if (Specifier.fixType(T, S.getLangOpts(), S.Context, /*IsObjCLiteral=*/false)) { 519 // We were able to guess how to format this. 520 if (Specifier.getConversionSpecifier().getKind() == 521 analyze_printf::PrintfConversionSpecifier::sArg) { 522 // Wrap double-quotes around a '%s' specifier and limit its maximum 523 // length. Ideally we'd also somehow escape special characters in the 524 // contents but printf doesn't support that. 525 // FIXME: '%s' formatting is not safe in general. 526 OS << '"'; 527 Specifier.setPrecision(analyze_printf::OptionalAmount(32u)); 528 Specifier.toString(OS); 529 OS << '"'; 530 // FIXME: It would be nice to include a '...' if the string doesn't fit 531 // in the length limit. 532 } else { 533 Specifier.toString(OS); 534 } 535 return true; 536 } 537 538 if (T->isPointerType()) { 539 // Format all pointers with '%p'. 540 OS << "%p"; 541 return true; 542 } 543 544 return false; 545 } 546 547 bool dumpUnnamedRecord(const RecordDecl *RD, Expr *E, unsigned Depth) { 548 Expr *IndentLit = getIndentString(Depth); 549 Expr *TypeLit = getTypeString(S.Context.getRecordType(RD)); 550 if (IndentLit ? callPrintFunction("%s%s", {IndentLit, TypeLit}) 551 : callPrintFunction("%s", {TypeLit})) 552 return true; 553 554 return dumpRecordValue(RD, E, IndentLit, Depth); 555 } 556 557 // Dump a record value. E should be a pointer or lvalue referring to an RD. 558 bool dumpRecordValue(const RecordDecl *RD, Expr *E, Expr *RecordIndent, 559 unsigned Depth) { 560 // FIXME: Decide what to do if RD is a union. At least we should probably 561 // turn off printing `const char*` members with `%s`, because that is very 562 // likely to crash if that's not the active member. Whatever we decide, we 563 // should document it. 564 565 // Build an OpaqueValueExpr so we can refer to E more than once without 566 // triggering re-evaluation. 567 Expr *RecordArg = makeOpaqueValueExpr(E); 568 bool RecordArgIsPtr = RecordArg->getType()->isPointerType(); 569 570 if (callPrintFunction(" {\n")) 571 return true; 572 573 // Dump each base class, regardless of whether they're aggregates. 574 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 575 for (const auto &Base : CXXRD->bases()) { 576 QualType BaseType = 577 RecordArgIsPtr ? S.Context.getPointerType(Base.getType()) 578 : S.Context.getLValueReferenceType(Base.getType()); 579 ExprResult BasePtr = S.BuildCStyleCastExpr( 580 Loc, S.Context.getTrivialTypeSourceInfo(BaseType, Loc), Loc, 581 RecordArg); 582 if (BasePtr.isInvalid() || 583 dumpUnnamedRecord(Base.getType()->getAsRecordDecl(), BasePtr.get(), 584 Depth + 1)) 585 return true; 586 } 587 } 588 589 Expr *FieldIndentArg = getIndentString(Depth + 1); 590 591 // Dump each field. 592 for (auto *D : RD->decls()) { 593 auto *IFD = dyn_cast<IndirectFieldDecl>(D); 594 auto *FD = IFD ? IFD->getAnonField() : dyn_cast<FieldDecl>(D); 595 if (!FD || FD->isUnnamedBitfield() || FD->isAnonymousStructOrUnion()) 596 continue; 597 598 llvm::SmallString<20> Format = llvm::StringRef("%s%s %s "); 599 llvm::SmallVector<Expr *, 5> Args = {FieldIndentArg, 600 getTypeString(FD->getType()), 601 getStringLiteral(FD->getName())}; 602 603 if (FD->isBitField()) { 604 Format += ": %zu "; 605 QualType SizeT = S.Context.getSizeType(); 606 llvm::APInt BitWidth(S.Context.getIntWidth(SizeT), 607 FD->getBitWidthValue(S.Context)); 608 Args.push_back(IntegerLiteral::Create(S.Context, BitWidth, SizeT, Loc)); 609 } 610 611 Format += "="; 612 613 ExprResult Field = 614 IFD ? S.BuildAnonymousStructUnionMemberReference( 615 CXXScopeSpec(), Loc, IFD, 616 DeclAccessPair::make(IFD, AS_public), RecordArg, Loc) 617 : S.BuildFieldReferenceExpr( 618 RecordArg, RecordArgIsPtr, Loc, CXXScopeSpec(), FD, 619 DeclAccessPair::make(FD, AS_public), 620 DeclarationNameInfo(FD->getDeclName(), Loc)); 621 if (Field.isInvalid()) 622 return true; 623 624 auto *InnerRD = FD->getType()->getAsRecordDecl(); 625 auto *InnerCXXRD = dyn_cast_or_null<CXXRecordDecl>(InnerRD); 626 if (InnerRD && (!InnerCXXRD || InnerCXXRD->isAggregate())) { 627 // Recursively print the values of members of aggregate record type. 628 if (callPrintFunction(Format, Args) || 629 dumpRecordValue(InnerRD, Field.get(), FieldIndentArg, Depth + 1)) 630 return true; 631 } else { 632 Format += " "; 633 if (appendFormatSpecifier(FD->getType(), Format)) { 634 // We know how to print this field. 635 Args.push_back(Field.get()); 636 } else { 637 // We don't know how to print this field. Print out its address 638 // with a format specifier that a smart tool will be able to 639 // recognize and treat specially. 640 Format += "*%p"; 641 ExprResult FieldAddr = 642 S.BuildUnaryOp(nullptr, Loc, UO_AddrOf, Field.get()); 643 if (FieldAddr.isInvalid()) 644 return true; 645 Args.push_back(FieldAddr.get()); 646 } 647 Format += "\n"; 648 if (callPrintFunction(Format, Args)) 649 return true; 650 } 651 } 652 653 return RecordIndent ? callPrintFunction("%s}\n", RecordIndent) 654 : callPrintFunction("}\n"); 655 } 656 657 Expr *buildWrapper() { 658 auto *Wrapper = PseudoObjectExpr::Create(S.Context, TheCall, Actions, 659 PseudoObjectExpr::NoResult); 660 TheCall->setType(Wrapper->getType()); 661 TheCall->setValueKind(Wrapper->getValueKind()); 662 return Wrapper; 663 } 664}; 665} // namespace 666 667static ExprResult SemaBuiltinDumpStruct(Sema &S, CallExpr *TheCall) { 668 if (checkArgCountAtLeast(S, TheCall, 2)) 669 return ExprError(); 670 671 ExprResult PtrArgResult = S.DefaultLvalueConversion(TheCall->getArg(0)); 672 if (PtrArgResult.isInvalid()) 673 return ExprError(); 674 TheCall->setArg(0, PtrArgResult.get()); 675 676 // First argument should be a pointer to a struct. 677 QualType PtrArgType = PtrArgResult.get()->getType(); 678 if (!PtrArgType->isPointerType() || 679 !PtrArgType->getPointeeType()->isRecordType()) { 680 S.Diag(PtrArgResult.get()->getBeginLoc(), 681 diag::err_expected_struct_pointer_argument) 682 << 1 << TheCall->getDirectCallee() << PtrArgType; 683 return ExprError(); 684 } 685 const RecordDecl *RD = PtrArgType->getPointeeType()->getAsRecordDecl(); 686 687 // Second argument is a callable, but we can't fully validate it until we try 688 // calling it. 689 QualType FnArgType = TheCall->getArg(1)->getType(); 690 if (!FnArgType->isFunctionType() && !FnArgType->isFunctionPointerType() && 691 !FnArgType->isBlockPointerType() && 692 !(S.getLangOpts().CPlusPlus && FnArgType->isRecordType())) { 693 auto *BT = FnArgType->getAs<BuiltinType>(); 694 switch (BT ? BT->getKind() : BuiltinType::Void) { 695 case BuiltinType::Dependent: 696 case BuiltinType::Overload: 697 case BuiltinType::BoundMember: 698 case BuiltinType::PseudoObject: 699 case BuiltinType::UnknownAny: 700 case BuiltinType::BuiltinFn: 701 // This might be a callable. 702 break; 703 704 default: 705 S.Diag(TheCall->getArg(1)->getBeginLoc(), 706 diag::err_expected_callable_argument) 707 << 2 << TheCall->getDirectCallee() << FnArgType; 708 return ExprError(); 709 } 710 } 711 712 BuiltinDumpStructGenerator Generator(S, TheCall); 713 714 // Wrap parentheses around the given pointer. This is not necessary for 715 // correct code generation, but it means that when we pretty-print the call 716 // arguments in our diagnostics we will produce '(&s)->n' instead of the 717 // incorrect '&s->n'. 718 Expr *PtrArg = PtrArgResult.get(); 719 PtrArg = new (S.Context) 720 ParenExpr(PtrArg->getBeginLoc(), 721 S.getLocForEndOfToken(PtrArg->getEndLoc()), PtrArg); 722 if (Generator.dumpUnnamedRecord(RD, PtrArg, 0)) 723 return ExprError(); 724 725 return Generator.buildWrapper(); 726} 727 728static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) { 729 if (checkArgCount(S, BuiltinCall, 2)) 730 return true; 731 732 SourceLocation BuiltinLoc = BuiltinCall->getBeginLoc(); 733 Expr *Builtin = BuiltinCall->getCallee()->IgnoreImpCasts(); 734 Expr *Call = BuiltinCall->getArg(0); 735 Expr *Chain = BuiltinCall->getArg(1); 736 737 if (Call->getStmtClass() != Stmt::CallExprClass) { 738 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_not_call) 739 << Call->getSourceRange(); 740 return true; 741 } 742 743 auto CE = cast<CallExpr>(Call); 744 if (CE->getCallee()->getType()->isBlockPointerType()) { 745 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_block_call) 746 << Call->getSourceRange(); 747 return true; 748 } 749 750 const Decl *TargetDecl = CE->getCalleeDecl(); 751 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) 752 if (FD->getBuiltinID()) { 753 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_builtin_call) 754 << Call->getSourceRange(); 755 return true; 756 } 757 758 if (isa<CXXPseudoDestructorExpr>(CE->getCallee()->IgnoreParens())) { 759 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_pdtor_call) 760 << Call->getSourceRange(); 761 return true; 762 } 763 764 ExprResult ChainResult = S.UsualUnaryConversions(Chain); 765 if (ChainResult.isInvalid()) 766 return true; 767 if (!ChainResult.get()->getType()->isPointerType()) { 768 S.Diag(BuiltinLoc, diag::err_second_argument_to_cwsc_not_pointer) 769 << Chain->getSourceRange(); 770 return true; 771 } 772 773 QualType ReturnTy = CE->getCallReturnType(S.Context); 774 QualType ArgTys[2] = { ReturnTy, ChainResult.get()->getType() }; 775 QualType BuiltinTy = S.Context.getFunctionType( 776 ReturnTy, ArgTys, FunctionProtoType::ExtProtoInfo()); 777 QualType BuiltinPtrTy = S.Context.getPointerType(BuiltinTy); 778 779 Builtin = 780 S.ImpCastExprToType(Builtin, BuiltinPtrTy, CK_BuiltinFnToFnPtr).get(); 781 782 BuiltinCall->setType(CE->getType()); 783 BuiltinCall->setValueKind(CE->getValueKind()); 784 BuiltinCall->setObjectKind(CE->getObjectKind()); 785 BuiltinCall->setCallee(Builtin); 786 BuiltinCall->setArg(1, ChainResult.get()); 787 788 return false; 789} 790 791namespace { 792 793class ScanfDiagnosticFormatHandler 794 : public analyze_format_string::FormatStringHandler { 795 // Accepts the argument index (relative to the first destination index) of the 796 // argument whose size we want. 797 using ComputeSizeFunction = 798 llvm::function_ref<std::optional<llvm::APSInt>(unsigned)>; 799 800 // Accepts the argument index (relative to the first destination index), the 801 // destination size, and the source size). 802 using DiagnoseFunction = 803 llvm::function_ref<void(unsigned, unsigned, unsigned)>; 804 805 ComputeSizeFunction ComputeSizeArgument; 806 DiagnoseFunction Diagnose; 807 808public: 809 ScanfDiagnosticFormatHandler(ComputeSizeFunction ComputeSizeArgument, 810 DiagnoseFunction Diagnose) 811 : ComputeSizeArgument(ComputeSizeArgument), Diagnose(Diagnose) {} 812 813 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS, 814 const char *StartSpecifier, 815 unsigned specifierLen) override { 816 if (!FS.consumesDataArgument()) 817 return true; 818 819 unsigned NulByte = 0; 820 switch ((FS.getConversionSpecifier().getKind())) { 821 default: 822 return true; 823 case analyze_format_string::ConversionSpecifier::sArg: 824 case analyze_format_string::ConversionSpecifier::ScanListArg: 825 NulByte = 1; 826 break; 827 case analyze_format_string::ConversionSpecifier::cArg: 828 break; 829 } 830 831 analyze_format_string::OptionalAmount FW = FS.getFieldWidth(); 832 if (FW.getHowSpecified() != 833 analyze_format_string::OptionalAmount::HowSpecified::Constant) 834 return true; 835 836 unsigned SourceSize = FW.getConstantAmount() + NulByte; 837 838 std::optional<llvm::APSInt> DestSizeAPS = 839 ComputeSizeArgument(FS.getArgIndex()); 840 if (!DestSizeAPS) 841 return true; 842 843 unsigned DestSize = DestSizeAPS->getZExtValue(); 844 845 if (DestSize < SourceSize) 846 Diagnose(FS.getArgIndex(), DestSize, SourceSize); 847 848 return true; 849 } 850}; 851 852class EstimateSizeFormatHandler 853 : public analyze_format_string::FormatStringHandler { 854 size_t Size; 855 856public: 857 EstimateSizeFormatHandler(StringRef Format) 858 : Size(std::min(Format.find(0), Format.size()) + 859 1 /* null byte always written by sprintf */) {} 860 861 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, 862 const char *, unsigned SpecifierLen, 863 const TargetInfo &) override { 864 865 const size_t FieldWidth = computeFieldWidth(FS); 866 const size_t Precision = computePrecision(FS); 867 868 // The actual format. 869 switch (FS.getConversionSpecifier().getKind()) { 870 // Just a char. 871 case analyze_format_string::ConversionSpecifier::cArg: 872 case analyze_format_string::ConversionSpecifier::CArg: 873 Size += std::max(FieldWidth, (size_t)1); 874 break; 875 // Just an integer. 876 case analyze_format_string::ConversionSpecifier::dArg: 877 case analyze_format_string::ConversionSpecifier::DArg: 878 case analyze_format_string::ConversionSpecifier::iArg: 879 case analyze_format_string::ConversionSpecifier::oArg: 880 case analyze_format_string::ConversionSpecifier::OArg: 881 case analyze_format_string::ConversionSpecifier::uArg: 882 case analyze_format_string::ConversionSpecifier::UArg: 883 case analyze_format_string::ConversionSpecifier::xArg: 884 case analyze_format_string::ConversionSpecifier::XArg: 885 Size += std::max(FieldWidth, Precision); 886 break; 887 888 // %g style conversion switches between %f or %e style dynamically. 889 // %f always takes less space, so default to it. 890 case analyze_format_string::ConversionSpecifier::gArg: 891 case analyze_format_string::ConversionSpecifier::GArg: 892 893 // Floating point number in the form '[+]ddd.ddd'. 894 case analyze_format_string::ConversionSpecifier::fArg: 895 case analyze_format_string::ConversionSpecifier::FArg: 896 Size += std::max(FieldWidth, 1 /* integer part */ + 897 (Precision ? 1 + Precision 898 : 0) /* period + decimal */); 899 break; 900 901 // Floating point number in the form '[-]d.ddde[+-]dd'. 902 case analyze_format_string::ConversionSpecifier::eArg: 903 case analyze_format_string::ConversionSpecifier::EArg: 904 Size += 905 std::max(FieldWidth, 906 1 /* integer part */ + 907 (Precision ? 1 + Precision : 0) /* period + decimal */ + 908 1 /* e or E letter */ + 2 /* exponent */); 909 break; 910 911 // Floating point number in the form '[-]0xh.hhhhp��dd'. 912 case analyze_format_string::ConversionSpecifier::aArg: 913 case analyze_format_string::ConversionSpecifier::AArg: 914 Size += 915 std::max(FieldWidth, 916 2 /* 0x */ + 1 /* integer part */ + 917 (Precision ? 1 + Precision : 0) /* period + decimal */ + 918 1 /* p or P letter */ + 1 /* + or - */ + 1 /* value */); 919 break; 920 921 // Just a string. 922 case analyze_format_string::ConversionSpecifier::sArg: 923 case analyze_format_string::ConversionSpecifier::SArg: 924 Size += FieldWidth; 925 break; 926 927 // Just a pointer in the form '0xddd'. 928 case analyze_format_string::ConversionSpecifier::pArg: 929 Size += std::max(FieldWidth, 2 /* leading 0x */ + Precision); 930 break; 931 932 // A plain percent. 933 case analyze_format_string::ConversionSpecifier::PercentArg: 934 Size += 1; 935 break; 936 937 default: 938 break; 939 } 940 941 Size += FS.hasPlusPrefix() || FS.hasSpacePrefix(); 942 943 if (FS.hasAlternativeForm()) { 944 switch (FS.getConversionSpecifier().getKind()) { 945 default: 946 break; 947 // Force a leading '0'. 948 case analyze_format_string::ConversionSpecifier::oArg: 949 Size += 1; 950 break; 951 // Force a leading '0x'. 952 case analyze_format_string::ConversionSpecifier::xArg: 953 case analyze_format_string::ConversionSpecifier::XArg: 954 Size += 2; 955 break; 956 // Force a period '.' before decimal, even if precision is 0. 957 case analyze_format_string::ConversionSpecifier::aArg: 958 case analyze_format_string::ConversionSpecifier::AArg: 959 case analyze_format_string::ConversionSpecifier::eArg: 960 case analyze_format_string::ConversionSpecifier::EArg: 961 case analyze_format_string::ConversionSpecifier::fArg: 962 case analyze_format_string::ConversionSpecifier::FArg: 963 case analyze_format_string::ConversionSpecifier::gArg: 964 case analyze_format_string::ConversionSpecifier::GArg: 965 Size += (Precision ? 0 : 1); 966 break; 967 } 968 } 969 assert(SpecifierLen <= Size && "no underflow"); 970 Size -= SpecifierLen; 971 return true; 972 } 973 974 size_t getSizeLowerBound() const { return Size; } 975 976private: 977 static size_t computeFieldWidth(const analyze_printf::PrintfSpecifier &FS) { 978 const analyze_format_string::OptionalAmount &FW = FS.getFieldWidth(); 979 size_t FieldWidth = 0; 980 if (FW.getHowSpecified() == analyze_format_string::OptionalAmount::Constant) 981 FieldWidth = FW.getConstantAmount(); 982 return FieldWidth; 983 } 984 985 static size_t computePrecision(const analyze_printf::PrintfSpecifier &FS) { 986 const analyze_format_string::OptionalAmount &FW = FS.getPrecision(); 987 size_t Precision = 0; 988 989 // See man 3 printf for default precision value based on the specifier. 990 switch (FW.getHowSpecified()) { 991 case analyze_format_string::OptionalAmount::NotSpecified: 992 switch (FS.getConversionSpecifier().getKind()) { 993 default: 994 break; 995 case analyze_format_string::ConversionSpecifier::dArg: // %d 996 case analyze_format_string::ConversionSpecifier::DArg: // %D 997 case analyze_format_string::ConversionSpecifier::iArg: // %i 998 Precision = 1; 999 break; 1000 case analyze_format_string::ConversionSpecifier::oArg: // %d 1001 case analyze_format_string::ConversionSpecifier::OArg: // %D 1002 case analyze_format_string::ConversionSpecifier::uArg: // %d 1003 case analyze_format_string::ConversionSpecifier::UArg: // %D 1004 case analyze_format_string::ConversionSpecifier::xArg: // %d 1005 case analyze_format_string::ConversionSpecifier::XArg: // %D 1006 Precision = 1; 1007 break; 1008 case analyze_format_string::ConversionSpecifier::fArg: // %f 1009 case analyze_format_string::ConversionSpecifier::FArg: // %F 1010 case analyze_format_string::ConversionSpecifier::eArg: // %e 1011 case analyze_format_string::ConversionSpecifier::EArg: // %E 1012 case analyze_format_string::ConversionSpecifier::gArg: // %g 1013 case analyze_format_string::ConversionSpecifier::GArg: // %G 1014 Precision = 6; 1015 break; 1016 case analyze_format_string::ConversionSpecifier::pArg: // %d 1017 Precision = 1; 1018 break; 1019 } 1020 break; 1021 case analyze_format_string::OptionalAmount::Constant: 1022 Precision = FW.getConstantAmount(); 1023 break; 1024 default: 1025 break; 1026 } 1027 return Precision; 1028 } 1029}; 1030 1031} // namespace 1032 1033void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, 1034 CallExpr *TheCall) { 1035 if (TheCall->isValueDependent() || TheCall->isTypeDependent() || 1036 isConstantEvaluated()) 1037 return; 1038 1039 bool UseDABAttr = false; 1040 const FunctionDecl *UseDecl = FD; 1041 1042 const auto *DABAttr = FD->getAttr<DiagnoseAsBuiltinAttr>(); 1043 if (DABAttr) { 1044 UseDecl = DABAttr->getFunction(); 1045 assert(UseDecl && "Missing FunctionDecl in DiagnoseAsBuiltin attribute!"); 1046 UseDABAttr = true; 1047 } 1048 1049 unsigned BuiltinID = UseDecl->getBuiltinID(/*ConsiderWrappers=*/true); 1050 1051 if (!BuiltinID) 1052 return; 1053 1054 const TargetInfo &TI = getASTContext().getTargetInfo(); 1055 unsigned SizeTypeWidth = TI.getTypeWidth(TI.getSizeType()); 1056 1057 auto TranslateIndex = [&](unsigned Index) -> std::optional<unsigned> { 1058 // If we refer to a diagnose_as_builtin attribute, we need to change the 1059 // argument index to refer to the arguments of the called function. Unless 1060 // the index is out of bounds, which presumably means it's a variadic 1061 // function. 1062 if (!UseDABAttr) 1063 return Index; 1064 unsigned DABIndices = DABAttr->argIndices_size(); 1065 unsigned NewIndex = Index < DABIndices 1066 ? DABAttr->argIndices_begin()[Index] 1067 : Index - DABIndices + FD->getNumParams(); 1068 if (NewIndex >= TheCall->getNumArgs()) 1069 return std::nullopt; 1070 return NewIndex; 1071 }; 1072 1073 auto ComputeExplicitObjectSizeArgument = 1074 [&](unsigned Index) -> std::optional<llvm::APSInt> { 1075 std::optional<unsigned> IndexOptional = TranslateIndex(Index); 1076 if (!IndexOptional) 1077 return std::nullopt; 1078 unsigned NewIndex = *IndexOptional; 1079 Expr::EvalResult Result; 1080 Expr *SizeArg = TheCall->getArg(NewIndex); 1081 if (!SizeArg->EvaluateAsInt(Result, getASTContext())) 1082 return std::nullopt; 1083 llvm::APSInt Integer = Result.Val.getInt(); 1084 Integer.setIsUnsigned(true); 1085 return Integer; 1086 }; 1087 1088 auto ComputeSizeArgument = 1089 [&](unsigned Index) -> std::optional<llvm::APSInt> { 1090 // If the parameter has a pass_object_size attribute, then we should use its 1091 // (potentially) more strict checking mode. Otherwise, conservatively assume 1092 // type 0. 1093 int BOSType = 0; 1094 // This check can fail for variadic functions. 1095 if (Index < FD->getNumParams()) { 1096 if (const auto *POS = 1097 FD->getParamDecl(Index)->getAttr<PassObjectSizeAttr>()) 1098 BOSType = POS->getType(); 1099 } 1100 1101 std::optional<unsigned> IndexOptional = TranslateIndex(Index); 1102 if (!IndexOptional) 1103 return std::nullopt; 1104 unsigned NewIndex = *IndexOptional; 1105 1106 if (NewIndex >= TheCall->getNumArgs()) 1107 return std::nullopt; 1108 1109 const Expr *ObjArg = TheCall->getArg(NewIndex); 1110 uint64_t Result; 1111 if (!ObjArg->tryEvaluateObjectSize(Result, getASTContext(), BOSType)) 1112 return std::nullopt; 1113 1114 // Get the object size in the target's size_t width. 1115 return llvm::APSInt::getUnsigned(Result).extOrTrunc(SizeTypeWidth); 1116 }; 1117 1118 auto ComputeStrLenArgument = 1119 [&](unsigned Index) -> std::optional<llvm::APSInt> { 1120 std::optional<unsigned> IndexOptional = TranslateIndex(Index); 1121 if (!IndexOptional) 1122 return std::nullopt; 1123 unsigned NewIndex = *IndexOptional; 1124 1125 const Expr *ObjArg = TheCall->getArg(NewIndex); 1126 uint64_t Result; 1127 if (!ObjArg->tryEvaluateStrLen(Result, getASTContext())) 1128 return std::nullopt; 1129 // Add 1 for null byte. 1130 return llvm::APSInt::getUnsigned(Result + 1).extOrTrunc(SizeTypeWidth); 1131 }; 1132 1133 std::optional<llvm::APSInt> SourceSize; 1134 std::optional<llvm::APSInt> DestinationSize; 1135 unsigned DiagID = 0; 1136 bool IsChkVariant = false; 1137 1138 auto GetFunctionName = [&]() { 1139 StringRef FunctionName = getASTContext().BuiltinInfo.getName(BuiltinID); 1140 // Skim off the details of whichever builtin was called to produce a better 1141 // diagnostic, as it's unlikely that the user wrote the __builtin 1142 // explicitly. 1143 if (IsChkVariant) { 1144 FunctionName = FunctionName.drop_front(std::strlen("__builtin___")); 1145 FunctionName = FunctionName.drop_back(std::strlen("_chk")); 1146 } else if (FunctionName.startswith("__builtin_")) { 1147 FunctionName = FunctionName.drop_front(std::strlen("__builtin_")); 1148 } 1149 return FunctionName; 1150 }; 1151 1152 switch (BuiltinID) { 1153 default: 1154 return; 1155 case Builtin::BI__builtin_strcpy: 1156 case Builtin::BIstrcpy: { 1157 DiagID = diag::warn_fortify_strlen_overflow; 1158 SourceSize = ComputeStrLenArgument(1); 1159 DestinationSize = ComputeSizeArgument(0); 1160 break; 1161 } 1162 1163 case Builtin::BI__builtin___strcpy_chk: { 1164 DiagID = diag::warn_fortify_strlen_overflow; 1165 SourceSize = ComputeStrLenArgument(1); 1166 DestinationSize = ComputeExplicitObjectSizeArgument(2); 1167 IsChkVariant = true; 1168 break; 1169 } 1170 1171 case Builtin::BIscanf: 1172 case Builtin::BIfscanf: 1173 case Builtin::BIsscanf: { 1174 unsigned FormatIndex = 1; 1175 unsigned DataIndex = 2; 1176 if (BuiltinID == Builtin::BIscanf) { 1177 FormatIndex = 0; 1178 DataIndex = 1; 1179 } 1180 1181 const auto *FormatExpr = 1182 TheCall->getArg(FormatIndex)->IgnoreParenImpCasts(); 1183 1184 const auto *Format = dyn_cast<StringLiteral>(FormatExpr); 1185 if (!Format) 1186 return; 1187 1188 if (!Format->isOrdinary() && !Format->isUTF8()) 1189 return; 1190 1191 auto Diagnose = [&](unsigned ArgIndex, unsigned DestSize, 1192 unsigned SourceSize) { 1193 DiagID = diag::warn_fortify_scanf_overflow; 1194 unsigned Index = ArgIndex + DataIndex; 1195 StringRef FunctionName = GetFunctionName(); 1196 DiagRuntimeBehavior(TheCall->getArg(Index)->getBeginLoc(), TheCall, 1197 PDiag(DiagID) << FunctionName << (Index + 1) 1198 << DestSize << SourceSize); 1199 }; 1200 1201 StringRef FormatStrRef = Format->getString(); 1202 auto ShiftedComputeSizeArgument = [&](unsigned Index) { 1203 return ComputeSizeArgument(Index + DataIndex); 1204 }; 1205 ScanfDiagnosticFormatHandler H(ShiftedComputeSizeArgument, Diagnose); 1206 const char *FormatBytes = FormatStrRef.data(); 1207 const ConstantArrayType *T = 1208 Context.getAsConstantArrayType(Format->getType()); 1209 assert(T && "String literal not of constant array type!"); 1210 size_t TypeSize = T->getSize().getZExtValue(); 1211 1212 // In case there's a null byte somewhere. 1213 size_t StrLen = 1214 std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0)); 1215 1216 analyze_format_string::ParseScanfString(H, FormatBytes, 1217 FormatBytes + StrLen, getLangOpts(), 1218 Context.getTargetInfo()); 1219 1220 // Unlike the other cases, in this one we have already issued the diagnostic 1221 // here, so no need to continue (because unlike the other cases, here the 1222 // diagnostic refers to the argument number). 1223 return; 1224 } 1225 1226 case Builtin::BIsprintf: 1227 case Builtin::BI__builtin___sprintf_chk: { 1228 size_t FormatIndex = BuiltinID == Builtin::BIsprintf ? 1 : 3; 1229 auto *FormatExpr = TheCall->getArg(FormatIndex)->IgnoreParenImpCasts(); 1230 1231 if (auto *Format = dyn_cast<StringLiteral>(FormatExpr)) { 1232 1233 if (!Format->isOrdinary() && !Format->isUTF8()) 1234 return; 1235 1236 StringRef FormatStrRef = Format->getString(); 1237 EstimateSizeFormatHandler H(FormatStrRef); 1238 const char *FormatBytes = FormatStrRef.data(); 1239 const ConstantArrayType *T = 1240 Context.getAsConstantArrayType(Format->getType()); 1241 assert(T && "String literal not of constant array type!"); 1242 size_t TypeSize = T->getSize().getZExtValue(); 1243 1244 // In case there's a null byte somewhere. 1245 size_t StrLen = 1246 std::min(std::max(TypeSize, size_t(1)) - 1, FormatStrRef.find(0)); 1247 if (!analyze_format_string::ParsePrintfString( 1248 H, FormatBytes, FormatBytes + StrLen, getLangOpts(), 1249 Context.getTargetInfo(), false)) { 1250 DiagID = diag::warn_fortify_source_format_overflow; 1251 SourceSize = llvm::APSInt::getUnsigned(H.getSizeLowerBound()) 1252 .extOrTrunc(SizeTypeWidth); 1253 if (BuiltinID == Builtin::BI__builtin___sprintf_chk) { 1254 DestinationSize = ComputeExplicitObjectSizeArgument(2); 1255 IsChkVariant = true; 1256 } else { 1257 DestinationSize = ComputeSizeArgument(0); 1258 } 1259 break; 1260 } 1261 } 1262 return; 1263 } 1264 case Builtin::BI__builtin___memcpy_chk: 1265 case Builtin::BI__builtin___memmove_chk: 1266 case Builtin::BI__builtin___memset_chk: 1267 case Builtin::BI__builtin___strlcat_chk: 1268 case Builtin::BI__builtin___strlcpy_chk: 1269 case Builtin::BI__builtin___strncat_chk: 1270 case Builtin::BI__builtin___strncpy_chk: 1271 case Builtin::BI__builtin___stpncpy_chk: 1272 case Builtin::BI__builtin___memccpy_chk: 1273 case Builtin::BI__builtin___mempcpy_chk: { 1274 DiagID = diag::warn_builtin_chk_overflow; 1275 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 2); 1276 DestinationSize = 1277 ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1); 1278 IsChkVariant = true; 1279 break; 1280 } 1281 1282 case Builtin::BI__builtin___snprintf_chk: 1283 case Builtin::BI__builtin___vsnprintf_chk: { 1284 DiagID = diag::warn_builtin_chk_overflow; 1285 SourceSize = ComputeExplicitObjectSizeArgument(1); 1286 DestinationSize = ComputeExplicitObjectSizeArgument(3); 1287 IsChkVariant = true; 1288 break; 1289 } 1290 1291 case Builtin::BIstrncat: 1292 case Builtin::BI__builtin_strncat: 1293 case Builtin::BIstrncpy: 1294 case Builtin::BI__builtin_strncpy: 1295 case Builtin::BIstpncpy: 1296 case Builtin::BI__builtin_stpncpy: { 1297 // Whether these functions overflow depends on the runtime strlen of the 1298 // string, not just the buffer size, so emitting the "always overflow" 1299 // diagnostic isn't quite right. We should still diagnose passing a buffer 1300 // size larger than the destination buffer though; this is a runtime abort 1301 // in _FORTIFY_SOURCE mode, and is quite suspicious otherwise. 1302 DiagID = diag::warn_fortify_source_size_mismatch; 1303 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1); 1304 DestinationSize = ComputeSizeArgument(0); 1305 break; 1306 } 1307 1308 case Builtin::BImemcpy: 1309 case Builtin::BI__builtin_memcpy: 1310 case Builtin::BImemmove: 1311 case Builtin::BI__builtin_memmove: 1312 case Builtin::BImemset: 1313 case Builtin::BI__builtin_memset: 1314 case Builtin::BImempcpy: 1315 case Builtin::BI__builtin_mempcpy: { 1316 DiagID = diag::warn_fortify_source_overflow; 1317 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1); 1318 DestinationSize = ComputeSizeArgument(0); 1319 break; 1320 } 1321 case Builtin::BIsnprintf: 1322 case Builtin::BI__builtin_snprintf: 1323 case Builtin::BIvsnprintf: 1324 case Builtin::BI__builtin_vsnprintf: { 1325 DiagID = diag::warn_fortify_source_size_mismatch; 1326 SourceSize = ComputeExplicitObjectSizeArgument(1); 1327 DestinationSize = ComputeSizeArgument(0); 1328 break; 1329 } 1330 } 1331 1332 if (!SourceSize || !DestinationSize || 1333 llvm::APSInt::compareValues(*SourceSize, *DestinationSize) <= 0) 1334 return; 1335 1336 StringRef FunctionName = GetFunctionName(); 1337 1338 SmallString<16> DestinationStr; 1339 SmallString<16> SourceStr; 1340 DestinationSize->toString(DestinationStr, /*Radix=*/10); 1341 SourceSize->toString(SourceStr, /*Radix=*/10); 1342 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 1343 PDiag(DiagID) 1344 << FunctionName << DestinationStr << SourceStr); 1345} 1346 1347static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall, 1348 Scope::ScopeFlags NeededScopeFlags, 1349 unsigned DiagID) { 1350 // Scopes aren't available during instantiation. Fortunately, builtin 1351 // functions cannot be template args so they cannot be formed through template 1352 // instantiation. Therefore checking once during the parse is sufficient. 1353 if (SemaRef.inTemplateInstantiation()) 1354 return false; 1355 1356 Scope *S = SemaRef.getCurScope(); 1357 while (S && !S->isSEHExceptScope()) 1358 S = S->getParent(); 1359 if (!S || !(S->getFlags() & NeededScopeFlags)) { 1360 auto *DRE = cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 1361 SemaRef.Diag(TheCall->getExprLoc(), DiagID) 1362 << DRE->getDecl()->getIdentifier(); 1363 return true; 1364 } 1365 1366 return false; 1367} 1368 1369static inline bool isBlockPointer(Expr *Arg) { 1370 return Arg->getType()->isBlockPointerType(); 1371} 1372 1373/// OpenCL C v2.0, s6.13.17.2 - Checks that the block parameters are all local 1374/// void*, which is a requirement of device side enqueue. 1375static bool checkOpenCLBlockArgs(Sema &S, Expr *BlockArg) { 1376 const BlockPointerType *BPT = 1377 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 1378 ArrayRef<QualType> Params = 1379 BPT->getPointeeType()->castAs<FunctionProtoType>()->getParamTypes(); 1380 unsigned ArgCounter = 0; 1381 bool IllegalParams = false; 1382 // Iterate through the block parameters until either one is found that is not 1383 // a local void*, or the block is valid. 1384 for (ArrayRef<QualType>::iterator I = Params.begin(), E = Params.end(); 1385 I != E; ++I, ++ArgCounter) { 1386 if (!(*I)->isPointerType() || !(*I)->getPointeeType()->isVoidType() || 1387 (*I)->getPointeeType().getQualifiers().getAddressSpace() != 1388 LangAS::opencl_local) { 1389 // Get the location of the error. If a block literal has been passed 1390 // (BlockExpr) then we can point straight to the offending argument, 1391 // else we just point to the variable reference. 1392 SourceLocation ErrorLoc; 1393 if (isa<BlockExpr>(BlockArg)) { 1394 BlockDecl *BD = cast<BlockExpr>(BlockArg)->getBlockDecl(); 1395 ErrorLoc = BD->getParamDecl(ArgCounter)->getBeginLoc(); 1396 } else if (isa<DeclRefExpr>(BlockArg)) { 1397 ErrorLoc = cast<DeclRefExpr>(BlockArg)->getBeginLoc(); 1398 } 1399 S.Diag(ErrorLoc, 1400 diag::err_opencl_enqueue_kernel_blocks_non_local_void_args); 1401 IllegalParams = true; 1402 } 1403 } 1404 1405 return IllegalParams; 1406} 1407 1408static bool checkOpenCLSubgroupExt(Sema &S, CallExpr *Call) { 1409 // OpenCL device can support extension but not the feature as extension 1410 // requires subgroup independent forward progress, but subgroup independent 1411 // forward progress is optional in OpenCL C 3.0 __opencl_c_subgroups feature. 1412 if (!S.getOpenCLOptions().isSupported("cl_khr_subgroups", S.getLangOpts()) && 1413 !S.getOpenCLOptions().isSupported("__opencl_c_subgroups", 1414 S.getLangOpts())) { 1415 S.Diag(Call->getBeginLoc(), diag::err_opencl_requires_extension) 1416 << 1 << Call->getDirectCallee() 1417 << "cl_khr_subgroups or __opencl_c_subgroups"; 1418 return true; 1419 } 1420 return false; 1421} 1422 1423static bool SemaOpenCLBuiltinNDRangeAndBlock(Sema &S, CallExpr *TheCall) { 1424 if (checkArgCount(S, TheCall, 2)) 1425 return true; 1426 1427 if (checkOpenCLSubgroupExt(S, TheCall)) 1428 return true; 1429 1430 // First argument is an ndrange_t type. 1431 Expr *NDRangeArg = TheCall->getArg(0); 1432 if (NDRangeArg->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 1433 S.Diag(NDRangeArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1434 << TheCall->getDirectCallee() << "'ndrange_t'"; 1435 return true; 1436 } 1437 1438 Expr *BlockArg = TheCall->getArg(1); 1439 if (!isBlockPointer(BlockArg)) { 1440 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1441 << TheCall->getDirectCallee() << "block"; 1442 return true; 1443 } 1444 return checkOpenCLBlockArgs(S, BlockArg); 1445} 1446 1447/// OpenCL C v2.0, s6.13.17.6 - Check the argument to the 1448/// get_kernel_work_group_size 1449/// and get_kernel_preferred_work_group_size_multiple builtin functions. 1450static bool SemaOpenCLBuiltinKernelWorkGroupSize(Sema &S, CallExpr *TheCall) { 1451 if (checkArgCount(S, TheCall, 1)) 1452 return true; 1453 1454 Expr *BlockArg = TheCall->getArg(0); 1455 if (!isBlockPointer(BlockArg)) { 1456 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1457 << TheCall->getDirectCallee() << "block"; 1458 return true; 1459 } 1460 return checkOpenCLBlockArgs(S, BlockArg); 1461} 1462 1463/// Diagnose integer type and any valid implicit conversion to it. 1464static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, 1465 const QualType &IntType); 1466 1467static bool checkOpenCLEnqueueLocalSizeArgs(Sema &S, CallExpr *TheCall, 1468 unsigned Start, unsigned End) { 1469 bool IllegalParams = false; 1470 for (unsigned I = Start; I <= End; ++I) 1471 IllegalParams |= checkOpenCLEnqueueIntType(S, TheCall->getArg(I), 1472 S.Context.getSizeType()); 1473 return IllegalParams; 1474} 1475 1476/// OpenCL v2.0, s6.13.17.1 - Check that sizes are provided for all 1477/// 'local void*' parameter of passed block. 1478static bool checkOpenCLEnqueueVariadicArgs(Sema &S, CallExpr *TheCall, 1479 Expr *BlockArg, 1480 unsigned NumNonVarArgs) { 1481 const BlockPointerType *BPT = 1482 cast<BlockPointerType>(BlockArg->getType().getCanonicalType()); 1483 unsigned NumBlockParams = 1484 BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams(); 1485 unsigned TotalNumArgs = TheCall->getNumArgs(); 1486 1487 // For each argument passed to the block, a corresponding uint needs to 1488 // be passed to describe the size of the local memory. 1489 if (TotalNumArgs != NumBlockParams + NumNonVarArgs) { 1490 S.Diag(TheCall->getBeginLoc(), 1491 diag::err_opencl_enqueue_kernel_local_size_args); 1492 return true; 1493 } 1494 1495 // Check that the sizes of the local memory are specified by integers. 1496 return checkOpenCLEnqueueLocalSizeArgs(S, TheCall, NumNonVarArgs, 1497 TotalNumArgs - 1); 1498} 1499 1500/// OpenCL C v2.0, s6.13.17 - Enqueue kernel function contains four different 1501/// overload formats specified in Table 6.13.17.1. 1502/// int enqueue_kernel(queue_t queue, 1503/// kernel_enqueue_flags_t flags, 1504/// const ndrange_t ndrange, 1505/// void (^block)(void)) 1506/// int enqueue_kernel(queue_t queue, 1507/// kernel_enqueue_flags_t flags, 1508/// const ndrange_t ndrange, 1509/// uint num_events_in_wait_list, 1510/// clk_event_t *event_wait_list, 1511/// clk_event_t *event_ret, 1512/// void (^block)(void)) 1513/// int enqueue_kernel(queue_t queue, 1514/// kernel_enqueue_flags_t flags, 1515/// const ndrange_t ndrange, 1516/// void (^block)(local void*, ...), 1517/// uint size0, ...) 1518/// int enqueue_kernel(queue_t queue, 1519/// kernel_enqueue_flags_t flags, 1520/// const ndrange_t ndrange, 1521/// uint num_events_in_wait_list, 1522/// clk_event_t *event_wait_list, 1523/// clk_event_t *event_ret, 1524/// void (^block)(local void*, ...), 1525/// uint size0, ...) 1526static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) { 1527 unsigned NumArgs = TheCall->getNumArgs(); 1528 1529 if (NumArgs < 4) { 1530 S.Diag(TheCall->getBeginLoc(), 1531 diag::err_typecheck_call_too_few_args_at_least) 1532 << 0 << 4 << NumArgs; 1533 return true; 1534 } 1535 1536 Expr *Arg0 = TheCall->getArg(0); 1537 Expr *Arg1 = TheCall->getArg(1); 1538 Expr *Arg2 = TheCall->getArg(2); 1539 Expr *Arg3 = TheCall->getArg(3); 1540 1541 // First argument always needs to be a queue_t type. 1542 if (!Arg0->getType()->isQueueT()) { 1543 S.Diag(TheCall->getArg(0)->getBeginLoc(), 1544 diag::err_opencl_builtin_expected_type) 1545 << TheCall->getDirectCallee() << S.Context.OCLQueueTy; 1546 return true; 1547 } 1548 1549 // Second argument always needs to be a kernel_enqueue_flags_t enum value. 1550 if (!Arg1->getType()->isIntegerType()) { 1551 S.Diag(TheCall->getArg(1)->getBeginLoc(), 1552 diag::err_opencl_builtin_expected_type) 1553 << TheCall->getDirectCallee() << "'kernel_enqueue_flags_t' (i.e. uint)"; 1554 return true; 1555 } 1556 1557 // Third argument is always an ndrange_t type. 1558 if (Arg2->getType().getUnqualifiedType().getAsString() != "ndrange_t") { 1559 S.Diag(TheCall->getArg(2)->getBeginLoc(), 1560 diag::err_opencl_builtin_expected_type) 1561 << TheCall->getDirectCallee() << "'ndrange_t'"; 1562 return true; 1563 } 1564 1565 // With four arguments, there is only one form that the function could be 1566 // called in: no events and no variable arguments. 1567 if (NumArgs == 4) { 1568 // check that the last argument is the right block type. 1569 if (!isBlockPointer(Arg3)) { 1570 S.Diag(Arg3->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1571 << TheCall->getDirectCallee() << "block"; 1572 return true; 1573 } 1574 // we have a block type, check the prototype 1575 const BlockPointerType *BPT = 1576 cast<BlockPointerType>(Arg3->getType().getCanonicalType()); 1577 if (BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams() > 0) { 1578 S.Diag(Arg3->getBeginLoc(), 1579 diag::err_opencl_enqueue_kernel_blocks_no_args); 1580 return true; 1581 } 1582 return false; 1583 } 1584 // we can have block + varargs. 1585 if (isBlockPointer(Arg3)) 1586 return (checkOpenCLBlockArgs(S, Arg3) || 1587 checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg3, 4)); 1588 // last two cases with either exactly 7 args or 7 args and varargs. 1589 if (NumArgs >= 7) { 1590 // check common block argument. 1591 Expr *Arg6 = TheCall->getArg(6); 1592 if (!isBlockPointer(Arg6)) { 1593 S.Diag(Arg6->getBeginLoc(), diag::err_opencl_builtin_expected_type) 1594 << TheCall->getDirectCallee() << "block"; 1595 return true; 1596 } 1597 if (checkOpenCLBlockArgs(S, Arg6)) 1598 return true; 1599 1600 // Forth argument has to be any integer type. 1601 if (!Arg3->getType()->isIntegerType()) { 1602 S.Diag(TheCall->getArg(3)->getBeginLoc(), 1603 diag::err_opencl_builtin_expected_type) 1604 << TheCall->getDirectCallee() << "integer"; 1605 return true; 1606 } 1607 // check remaining common arguments. 1608 Expr *Arg4 = TheCall->getArg(4); 1609 Expr *Arg5 = TheCall->getArg(5); 1610 1611 // Fifth argument is always passed as a pointer to clk_event_t. 1612 if (!Arg4->isNullPointerConstant(S.Context, 1613 Expr::NPC_ValueDependentIsNotNull) && 1614 !Arg4->getType()->getPointeeOrArrayElementType()->isClkEventT()) { 1615 S.Diag(TheCall->getArg(4)->getBeginLoc(), 1616 diag::err_opencl_builtin_expected_type) 1617 << TheCall->getDirectCallee() 1618 << S.Context.getPointerType(S.Context.OCLClkEventTy); 1619 return true; 1620 } 1621 1622 // Sixth argument is always passed as a pointer to clk_event_t. 1623 if (!Arg5->isNullPointerConstant(S.Context, 1624 Expr::NPC_ValueDependentIsNotNull) && 1625 !(Arg5->getType()->isPointerType() && 1626 Arg5->getType()->getPointeeType()->isClkEventT())) { 1627 S.Diag(TheCall->getArg(5)->getBeginLoc(), 1628 diag::err_opencl_builtin_expected_type) 1629 << TheCall->getDirectCallee() 1630 << S.Context.getPointerType(S.Context.OCLClkEventTy); 1631 return true; 1632 } 1633 1634 if (NumArgs == 7) 1635 return false; 1636 1637 return checkOpenCLEnqueueVariadicArgs(S, TheCall, Arg6, 7); 1638 } 1639 1640 // None of the specific case has been detected, give generic error 1641 S.Diag(TheCall->getBeginLoc(), 1642 diag::err_opencl_enqueue_kernel_incorrect_args); 1643 return true; 1644} 1645 1646/// Returns OpenCL access qual. 1647static OpenCLAccessAttr *getOpenCLArgAccess(const Decl *D) { 1648 return D->getAttr<OpenCLAccessAttr>(); 1649} 1650 1651/// Returns true if pipe element type is different from the pointer. 1652static bool checkOpenCLPipeArg(Sema &S, CallExpr *Call) { 1653 const Expr *Arg0 = Call->getArg(0); 1654 // First argument type should always be pipe. 1655 if (!Arg0->getType()->isPipeType()) { 1656 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 1657 << Call->getDirectCallee() << Arg0->getSourceRange(); 1658 return true; 1659 } 1660 OpenCLAccessAttr *AccessQual = 1661 getOpenCLArgAccess(cast<DeclRefExpr>(Arg0)->getDecl()); 1662 // Validates the access qualifier is compatible with the call. 1663 // OpenCL v2.0 s6.13.16 - The access qualifiers for pipe should only be 1664 // read_only and write_only, and assumed to be read_only if no qualifier is 1665 // specified. 1666 switch (Call->getDirectCallee()->getBuiltinID()) { 1667 case Builtin::BIread_pipe: 1668 case Builtin::BIreserve_read_pipe: 1669 case Builtin::BIcommit_read_pipe: 1670 case Builtin::BIwork_group_reserve_read_pipe: 1671 case Builtin::BIsub_group_reserve_read_pipe: 1672 case Builtin::BIwork_group_commit_read_pipe: 1673 case Builtin::BIsub_group_commit_read_pipe: 1674 if (!(!AccessQual || AccessQual->isReadOnly())) { 1675 S.Diag(Arg0->getBeginLoc(), 1676 diag::err_opencl_builtin_pipe_invalid_access_modifier) 1677 << "read_only" << Arg0->getSourceRange(); 1678 return true; 1679 } 1680 break; 1681 case Builtin::BIwrite_pipe: 1682 case Builtin::BIreserve_write_pipe: 1683 case Builtin::BIcommit_write_pipe: 1684 case Builtin::BIwork_group_reserve_write_pipe: 1685 case Builtin::BIsub_group_reserve_write_pipe: 1686 case Builtin::BIwork_group_commit_write_pipe: 1687 case Builtin::BIsub_group_commit_write_pipe: 1688 if (!(AccessQual && AccessQual->isWriteOnly())) { 1689 S.Diag(Arg0->getBeginLoc(), 1690 diag::err_opencl_builtin_pipe_invalid_access_modifier) 1691 << "write_only" << Arg0->getSourceRange(); 1692 return true; 1693 } 1694 break; 1695 default: 1696 break; 1697 } 1698 return false; 1699} 1700 1701/// Returns true if pipe element type is different from the pointer. 1702static bool checkOpenCLPipePacketType(Sema &S, CallExpr *Call, unsigned Idx) { 1703 const Expr *Arg0 = Call->getArg(0); 1704 const Expr *ArgIdx = Call->getArg(Idx); 1705 const PipeType *PipeTy = cast<PipeType>(Arg0->getType()); 1706 const QualType EltTy = PipeTy->getElementType(); 1707 const PointerType *ArgTy = ArgIdx->getType()->getAs<PointerType>(); 1708 // The Idx argument should be a pointer and the type of the pointer and 1709 // the type of pipe element should also be the same. 1710 if (!ArgTy || 1711 !S.Context.hasSameType( 1712 EltTy, ArgTy->getPointeeType()->getCanonicalTypeInternal())) { 1713 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1714 << Call->getDirectCallee() << S.Context.getPointerType(EltTy) 1715 << ArgIdx->getType() << ArgIdx->getSourceRange(); 1716 return true; 1717 } 1718 return false; 1719} 1720 1721// Performs semantic analysis for the read/write_pipe call. 1722// \param S Reference to the semantic analyzer. 1723// \param Call A pointer to the builtin call. 1724// \return True if a semantic error has been found, false otherwise. 1725static bool SemaBuiltinRWPipe(Sema &S, CallExpr *Call) { 1726 // OpenCL v2.0 s6.13.16.2 - The built-in read/write 1727 // functions have two forms. 1728 switch (Call->getNumArgs()) { 1729 case 2: 1730 if (checkOpenCLPipeArg(S, Call)) 1731 return true; 1732 // The call with 2 arguments should be 1733 // read/write_pipe(pipe T, T*). 1734 // Check packet type T. 1735 if (checkOpenCLPipePacketType(S, Call, 1)) 1736 return true; 1737 break; 1738 1739 case 4: { 1740 if (checkOpenCLPipeArg(S, Call)) 1741 return true; 1742 // The call with 4 arguments should be 1743 // read/write_pipe(pipe T, reserve_id_t, uint, T*). 1744 // Check reserve_id_t. 1745 if (!Call->getArg(1)->getType()->isReserveIDT()) { 1746 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1747 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 1748 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1749 return true; 1750 } 1751 1752 // Check the index. 1753 const Expr *Arg2 = Call->getArg(2); 1754 if (!Arg2->getType()->isIntegerType() && 1755 !Arg2->getType()->isUnsignedIntegerType()) { 1756 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1757 << Call->getDirectCallee() << S.Context.UnsignedIntTy 1758 << Arg2->getType() << Arg2->getSourceRange(); 1759 return true; 1760 } 1761 1762 // Check packet type T. 1763 if (checkOpenCLPipePacketType(S, Call, 3)) 1764 return true; 1765 } break; 1766 default: 1767 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_arg_num) 1768 << Call->getDirectCallee() << Call->getSourceRange(); 1769 return true; 1770 } 1771 1772 return false; 1773} 1774 1775// Performs a semantic analysis on the {work_group_/sub_group_ 1776// /_}reserve_{read/write}_pipe 1777// \param S Reference to the semantic analyzer. 1778// \param Call The call to the builtin function to be analyzed. 1779// \return True if a semantic error was found, false otherwise. 1780static bool SemaBuiltinReserveRWPipe(Sema &S, CallExpr *Call) { 1781 if (checkArgCount(S, Call, 2)) 1782 return true; 1783 1784 if (checkOpenCLPipeArg(S, Call)) 1785 return true; 1786 1787 // Check the reserve size. 1788 if (!Call->getArg(1)->getType()->isIntegerType() && 1789 !Call->getArg(1)->getType()->isUnsignedIntegerType()) { 1790 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1791 << Call->getDirectCallee() << S.Context.UnsignedIntTy 1792 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1793 return true; 1794 } 1795 1796 // Since return type of reserve_read/write_pipe built-in function is 1797 // reserve_id_t, which is not defined in the builtin def file , we used int 1798 // as return type and need to override the return type of these functions. 1799 Call->setType(S.Context.OCLReserveIDTy); 1800 1801 return false; 1802} 1803 1804// Performs a semantic analysis on {work_group_/sub_group_ 1805// /_}commit_{read/write}_pipe 1806// \param S Reference to the semantic analyzer. 1807// \param Call The call to the builtin function to be analyzed. 1808// \return True if a semantic error was found, false otherwise. 1809static bool SemaBuiltinCommitRWPipe(Sema &S, CallExpr *Call) { 1810 if (checkArgCount(S, Call, 2)) 1811 return true; 1812 1813 if (checkOpenCLPipeArg(S, Call)) 1814 return true; 1815 1816 // Check reserve_id_t. 1817 if (!Call->getArg(1)->getType()->isReserveIDT()) { 1818 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg) 1819 << Call->getDirectCallee() << S.Context.OCLReserveIDTy 1820 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange(); 1821 return true; 1822 } 1823 1824 return false; 1825} 1826 1827// Performs a semantic analysis on the call to built-in Pipe 1828// Query Functions. 1829// \param S Reference to the semantic analyzer. 1830// \param Call The call to the builtin function to be analyzed. 1831// \return True if a semantic error was found, false otherwise. 1832static bool SemaBuiltinPipePackets(Sema &S, CallExpr *Call) { 1833 if (checkArgCount(S, Call, 1)) 1834 return true; 1835 1836 if (!Call->getArg(0)->getType()->isPipeType()) { 1837 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg) 1838 << Call->getDirectCallee() << Call->getArg(0)->getSourceRange(); 1839 return true; 1840 } 1841 1842 return false; 1843} 1844 1845// OpenCL v2.0 s6.13.9 - Address space qualifier functions. 1846// Performs semantic analysis for the to_global/local/private call. 1847// \param S Reference to the semantic analyzer. 1848// \param BuiltinID ID of the builtin function. 1849// \param Call A pointer to the builtin call. 1850// \return True if a semantic error has been found, false otherwise. 1851static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID, 1852 CallExpr *Call) { 1853 if (checkArgCount(S, Call, 1)) 1854 return true; 1855 1856 auto RT = Call->getArg(0)->getType(); 1857 if (!RT->isPointerType() || RT->getPointeeType() 1858 .getAddressSpace() == LangAS::opencl_constant) { 1859 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_invalid_arg) 1860 << Call->getArg(0) << Call->getDirectCallee() << Call->getSourceRange(); 1861 return true; 1862 } 1863 1864 if (RT->getPointeeType().getAddressSpace() != LangAS::opencl_generic) { 1865 S.Diag(Call->getArg(0)->getBeginLoc(), 1866 diag::warn_opencl_generic_address_space_arg) 1867 << Call->getDirectCallee()->getNameInfo().getAsString() 1868 << Call->getArg(0)->getSourceRange(); 1869 } 1870 1871 RT = RT->getPointeeType(); 1872 auto Qual = RT.getQualifiers(); 1873 switch (BuiltinID) { 1874 case Builtin::BIto_global: 1875 Qual.setAddressSpace(LangAS::opencl_global); 1876 break; 1877 case Builtin::BIto_local: 1878 Qual.setAddressSpace(LangAS::opencl_local); 1879 break; 1880 case Builtin::BIto_private: 1881 Qual.setAddressSpace(LangAS::opencl_private); 1882 break; 1883 default: 1884 llvm_unreachable("Invalid builtin function"); 1885 } 1886 Call->setType(S.Context.getPointerType(S.Context.getQualifiedType( 1887 RT.getUnqualifiedType(), Qual))); 1888 1889 return false; 1890} 1891 1892static ExprResult SemaBuiltinLaunder(Sema &S, CallExpr *TheCall) { 1893 if (checkArgCount(S, TheCall, 1)) 1894 return ExprError(); 1895 1896 // Compute __builtin_launder's parameter type from the argument. 1897 // The parameter type is: 1898 // * The type of the argument if it's not an array or function type, 1899 // Otherwise, 1900 // * The decayed argument type. 1901 QualType ParamTy = [&]() { 1902 QualType ArgTy = TheCall->getArg(0)->getType(); 1903 if (const ArrayType *Ty = ArgTy->getAsArrayTypeUnsafe()) 1904 return S.Context.getPointerType(Ty->getElementType()); 1905 if (ArgTy->isFunctionType()) { 1906 return S.Context.getPointerType(ArgTy); 1907 } 1908 return ArgTy; 1909 }(); 1910 1911 TheCall->setType(ParamTy); 1912 1913 auto DiagSelect = [&]() -> std::optional<unsigned> { 1914 if (!ParamTy->isPointerType()) 1915 return 0; 1916 if (ParamTy->isFunctionPointerType()) 1917 return 1; 1918 if (ParamTy->isVoidPointerType()) 1919 return 2; 1920 return std::optional<unsigned>{}; 1921 }(); 1922 if (DiagSelect) { 1923 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_launder_invalid_arg) 1924 << *DiagSelect << TheCall->getSourceRange(); 1925 return ExprError(); 1926 } 1927 1928 // We either have an incomplete class type, or we have a class template 1929 // whose instantiation has not been forced. Example: 1930 // 1931 // template <class T> struct Foo { T value; }; 1932 // Foo<int> *p = nullptr; 1933 // auto *d = __builtin_launder(p); 1934 if (S.RequireCompleteType(TheCall->getBeginLoc(), ParamTy->getPointeeType(), 1935 diag::err_incomplete_type)) 1936 return ExprError(); 1937 1938 assert(ParamTy->getPointeeType()->isObjectType() && 1939 "Unhandled non-object pointer case"); 1940 1941 InitializedEntity Entity = 1942 InitializedEntity::InitializeParameter(S.Context, ParamTy, false); 1943 ExprResult Arg = 1944 S.PerformCopyInitialization(Entity, SourceLocation(), TheCall->getArg(0)); 1945 if (Arg.isInvalid()) 1946 return ExprError(); 1947 TheCall->setArg(0, Arg.get()); 1948 1949 return TheCall; 1950} 1951 1952// Emit an error and return true if the current object format type is in the 1953// list of unsupported types. 1954static bool CheckBuiltinTargetNotInUnsupported( 1955 Sema &S, unsigned BuiltinID, CallExpr *TheCall, 1956 ArrayRef<llvm::Triple::ObjectFormatType> UnsupportedObjectFormatTypes) { 1957 llvm::Triple::ObjectFormatType CurObjFormat = 1958 S.getASTContext().getTargetInfo().getTriple().getObjectFormat(); 1959 if (llvm::is_contained(UnsupportedObjectFormatTypes, CurObjFormat)) { 1960 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 1961 << TheCall->getSourceRange(); 1962 return true; 1963 } 1964 return false; 1965} 1966 1967// Emit an error and return true if the current architecture is not in the list 1968// of supported architectures. 1969static bool 1970CheckBuiltinTargetInSupported(Sema &S, unsigned BuiltinID, CallExpr *TheCall, 1971 ArrayRef<llvm::Triple::ArchType> SupportedArchs) { 1972 llvm::Triple::ArchType CurArch = 1973 S.getASTContext().getTargetInfo().getTriple().getArch(); 1974 if (llvm::is_contained(SupportedArchs, CurArch)) 1975 return false; 1976 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 1977 << TheCall->getSourceRange(); 1978 return true; 1979} 1980 1981static void CheckNonNullArgument(Sema &S, const Expr *ArgExpr, 1982 SourceLocation CallSiteLoc); 1983 1984bool Sema::CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 1985 CallExpr *TheCall) { 1986 switch (TI.getTriple().getArch()) { 1987 default: 1988 // Some builtins don't require additional checking, so just consider these 1989 // acceptable. 1990 return false; 1991 case llvm::Triple::arm: 1992 case llvm::Triple::armeb: 1993 case llvm::Triple::thumb: 1994 case llvm::Triple::thumbeb: 1995 return CheckARMBuiltinFunctionCall(TI, BuiltinID, TheCall); 1996 case llvm::Triple::aarch64: 1997 case llvm::Triple::aarch64_32: 1998 case llvm::Triple::aarch64_be: 1999 return CheckAArch64BuiltinFunctionCall(TI, BuiltinID, TheCall); 2000 case llvm::Triple::bpfeb: 2001 case llvm::Triple::bpfel: 2002 return CheckBPFBuiltinFunctionCall(BuiltinID, TheCall); 2003 case llvm::Triple::hexagon: 2004 return CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall); 2005 case llvm::Triple::mips: 2006 case llvm::Triple::mipsel: 2007 case llvm::Triple::mips64: 2008 case llvm::Triple::mips64el: 2009 return CheckMipsBuiltinFunctionCall(TI, BuiltinID, TheCall); 2010 case llvm::Triple::systemz: 2011 return CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall); 2012 case llvm::Triple::x86: 2013 case llvm::Triple::x86_64: 2014 return CheckX86BuiltinFunctionCall(TI, BuiltinID, TheCall); 2015 case llvm::Triple::ppc: 2016 case llvm::Triple::ppcle: 2017 case llvm::Triple::ppc64: 2018 case llvm::Triple::ppc64le: 2019 return CheckPPCBuiltinFunctionCall(TI, BuiltinID, TheCall); 2020 case llvm::Triple::amdgcn: 2021 return CheckAMDGCNBuiltinFunctionCall(BuiltinID, TheCall); 2022 case llvm::Triple::riscv32: 2023 case llvm::Triple::riscv64: 2024 return CheckRISCVBuiltinFunctionCall(TI, BuiltinID, TheCall); 2025 case llvm::Triple::loongarch32: 2026 case llvm::Triple::loongarch64: 2027 return CheckLoongArchBuiltinFunctionCall(TI, BuiltinID, TheCall); 2028 } 2029} 2030 2031// Check if \p Ty is a valid type for the elementwise math builtins. If it is 2032// not a valid type, emit an error message and return true. Otherwise return 2033// false. 2034static bool checkMathBuiltinElementType(Sema &S, SourceLocation Loc, 2035 QualType Ty) { 2036 if (!Ty->getAs<VectorType>() && !ConstantMatrixType::isValidElementType(Ty)) { 2037 return S.Diag(Loc, diag::err_builtin_invalid_arg_type) 2038 << 1 << /* vector, integer or float ty*/ 0 << Ty; 2039 } 2040 2041 return false; 2042} 2043 2044static bool checkFPMathBuiltinElementType(Sema &S, SourceLocation Loc, 2045 QualType ArgTy, int ArgIndex) { 2046 QualType EltTy = ArgTy; 2047 if (auto *VecTy = EltTy->getAs<VectorType>()) 2048 EltTy = VecTy->getElementType(); 2049 2050 if (!EltTy->isRealFloatingType()) { 2051 return S.Diag(Loc, diag::err_builtin_invalid_arg_type) 2052 << ArgIndex << /* vector or float ty*/ 5 << ArgTy; 2053 } 2054 2055 return false; 2056} 2057 2058ExprResult 2059Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, 2060 CallExpr *TheCall) { 2061 ExprResult TheCallResult(TheCall); 2062 2063 // Find out if any arguments are required to be integer constant expressions. 2064 unsigned ICEArguments = 0; 2065 ASTContext::GetBuiltinTypeError Error; 2066 Context.GetBuiltinType(BuiltinID, Error, &ICEArguments); 2067 if (Error != ASTContext::GE_None) 2068 ICEArguments = 0; // Don't diagnose previously diagnosed errors. 2069 2070 // If any arguments are required to be ICE's, check and diagnose. 2071 for (unsigned ArgNo = 0; ICEArguments != 0; ++ArgNo) { 2072 // Skip arguments not required to be ICE's. 2073 if ((ICEArguments & (1 << ArgNo)) == 0) continue; 2074 2075 llvm::APSInt Result; 2076 // If we don't have enough arguments, continue so we can issue better 2077 // diagnostic in checkArgCount(...) 2078 if (ArgNo < TheCall->getNumArgs() && 2079 SemaBuiltinConstantArg(TheCall, ArgNo, Result)) 2080 return true; 2081 ICEArguments &= ~(1 << ArgNo); 2082 } 2083 2084 switch (BuiltinID) { 2085 case Builtin::BI__builtin___CFStringMakeConstantString: 2086 // CFStringMakeConstantString is currently not implemented for GOFF (i.e., 2087 // on z/OS) and for XCOFF (i.e., on AIX). Emit unsupported 2088 if (CheckBuiltinTargetNotInUnsupported( 2089 *this, BuiltinID, TheCall, 2090 {llvm::Triple::GOFF, llvm::Triple::XCOFF})) 2091 return ExprError(); 2092 assert(TheCall->getNumArgs() == 1 && 2093 "Wrong # arguments to builtin CFStringMakeConstantString"); 2094 if (CheckObjCString(TheCall->getArg(0))) 2095 return ExprError(); 2096 break; 2097 case Builtin::BI__builtin_ms_va_start: 2098 case Builtin::BI__builtin_stdarg_start: 2099 case Builtin::BI__builtin_va_start: 2100 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 2101 return ExprError(); 2102 break; 2103 case Builtin::BI__va_start: { 2104 switch (Context.getTargetInfo().getTriple().getArch()) { 2105 case llvm::Triple::aarch64: 2106 case llvm::Triple::arm: 2107 case llvm::Triple::thumb: 2108 if (SemaBuiltinVAStartARMMicrosoft(TheCall)) 2109 return ExprError(); 2110 break; 2111 default: 2112 if (SemaBuiltinVAStart(BuiltinID, TheCall)) 2113 return ExprError(); 2114 break; 2115 } 2116 break; 2117 } 2118 2119 // The acquire, release, and no fence variants are ARM and AArch64 only. 2120 case Builtin::BI_interlockedbittestandset_acq: 2121 case Builtin::BI_interlockedbittestandset_rel: 2122 case Builtin::BI_interlockedbittestandset_nf: 2123 case Builtin::BI_interlockedbittestandreset_acq: 2124 case Builtin::BI_interlockedbittestandreset_rel: 2125 case Builtin::BI_interlockedbittestandreset_nf: 2126 if (CheckBuiltinTargetInSupported( 2127 *this, BuiltinID, TheCall, 2128 {llvm::Triple::arm, llvm::Triple::thumb, llvm::Triple::aarch64})) 2129 return ExprError(); 2130 break; 2131 2132 // The 64-bit bittest variants are x64, ARM, and AArch64 only. 2133 case Builtin::BI_bittest64: 2134 case Builtin::BI_bittestandcomplement64: 2135 case Builtin::BI_bittestandreset64: 2136 case Builtin::BI_bittestandset64: 2137 case Builtin::BI_interlockedbittestandreset64: 2138 case Builtin::BI_interlockedbittestandset64: 2139 if (CheckBuiltinTargetInSupported(*this, BuiltinID, TheCall, 2140 {llvm::Triple::x86_64, llvm::Triple::arm, 2141 llvm::Triple::thumb, 2142 llvm::Triple::aarch64})) 2143 return ExprError(); 2144 break; 2145 2146 case Builtin::BI__builtin_isgreater: 2147 case Builtin::BI__builtin_isgreaterequal: 2148 case Builtin::BI__builtin_isless: 2149 case Builtin::BI__builtin_islessequal: 2150 case Builtin::BI__builtin_islessgreater: 2151 case Builtin::BI__builtin_isunordered: 2152 if (SemaBuiltinUnorderedCompare(TheCall)) 2153 return ExprError(); 2154 break; 2155 case Builtin::BI__builtin_fpclassify: 2156 if (SemaBuiltinFPClassification(TheCall, 6)) 2157 return ExprError(); 2158 break; 2159 case Builtin::BI__builtin_isfinite: 2160 case Builtin::BI__builtin_isinf: 2161 case Builtin::BI__builtin_isinf_sign: 2162 case Builtin::BI__builtin_isnan: 2163 case Builtin::BI__builtin_isnormal: 2164 case Builtin::BI__builtin_signbit: 2165 case Builtin::BI__builtin_signbitf: 2166 case Builtin::BI__builtin_signbitl: 2167 if (SemaBuiltinFPClassification(TheCall, 1)) 2168 return ExprError(); 2169 break; 2170 case Builtin::BI__builtin_shufflevector: 2171 return SemaBuiltinShuffleVector(TheCall); 2172 // TheCall will be freed by the smart pointer here, but that's fine, since 2173 // SemaBuiltinShuffleVector guts it, but then doesn't release it. 2174 case Builtin::BI__builtin_prefetch: 2175 if (SemaBuiltinPrefetch(TheCall)) 2176 return ExprError(); 2177 break; 2178 case Builtin::BI__builtin_alloca_with_align: 2179 case Builtin::BI__builtin_alloca_with_align_uninitialized: 2180 if (SemaBuiltinAllocaWithAlign(TheCall)) 2181 return ExprError(); 2182 [[fallthrough]]; 2183 case Builtin::BI__builtin_alloca: 2184 case Builtin::BI__builtin_alloca_uninitialized: 2185 Diag(TheCall->getBeginLoc(), diag::warn_alloca) 2186 << TheCall->getDirectCallee(); 2187 break; 2188 case Builtin::BI__arithmetic_fence: 2189 if (SemaBuiltinArithmeticFence(TheCall)) 2190 return ExprError(); 2191 break; 2192 case Builtin::BI__assume: 2193 case Builtin::BI__builtin_assume: 2194 if (SemaBuiltinAssume(TheCall)) 2195 return ExprError(); 2196 break; 2197 case Builtin::BI__builtin_assume_aligned: 2198 if (SemaBuiltinAssumeAligned(TheCall)) 2199 return ExprError(); 2200 break; 2201 case Builtin::BI__builtin_dynamic_object_size: 2202 case Builtin::BI__builtin_object_size: 2203 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 3)) 2204 return ExprError(); 2205 break; 2206 case Builtin::BI__builtin_longjmp: 2207 if (SemaBuiltinLongjmp(TheCall)) 2208 return ExprError(); 2209 break; 2210 case Builtin::BI__builtin_setjmp: 2211 if (SemaBuiltinSetjmp(TheCall)) 2212 return ExprError(); 2213 break; 2214 case Builtin::BI__builtin_classify_type: 2215 if (checkArgCount(*this, TheCall, 1)) return true; 2216 TheCall->setType(Context.IntTy); 2217 break; 2218 case Builtin::BI__builtin_complex: 2219 if (SemaBuiltinComplex(TheCall)) 2220 return ExprError(); 2221 break; 2222 case Builtin::BI__builtin_constant_p: { 2223 if (checkArgCount(*this, TheCall, 1)) return true; 2224 ExprResult Arg = DefaultFunctionArrayLvalueConversion(TheCall->getArg(0)); 2225 if (Arg.isInvalid()) return true; 2226 TheCall->setArg(0, Arg.get()); 2227 TheCall->setType(Context.IntTy); 2228 break; 2229 } 2230 case Builtin::BI__builtin_launder: 2231 return SemaBuiltinLaunder(*this, TheCall); 2232 case Builtin::BI__sync_fetch_and_add: 2233 case Builtin::BI__sync_fetch_and_add_1: 2234 case Builtin::BI__sync_fetch_and_add_2: 2235 case Builtin::BI__sync_fetch_and_add_4: 2236 case Builtin::BI__sync_fetch_and_add_8: 2237 case Builtin::BI__sync_fetch_and_add_16: 2238 case Builtin::BI__sync_fetch_and_sub: 2239 case Builtin::BI__sync_fetch_and_sub_1: 2240 case Builtin::BI__sync_fetch_and_sub_2: 2241 case Builtin::BI__sync_fetch_and_sub_4: 2242 case Builtin::BI__sync_fetch_and_sub_8: 2243 case Builtin::BI__sync_fetch_and_sub_16: 2244 case Builtin::BI__sync_fetch_and_or: 2245 case Builtin::BI__sync_fetch_and_or_1: 2246 case Builtin::BI__sync_fetch_and_or_2: 2247 case Builtin::BI__sync_fetch_and_or_4: 2248 case Builtin::BI__sync_fetch_and_or_8: 2249 case Builtin::BI__sync_fetch_and_or_16: 2250 case Builtin::BI__sync_fetch_and_and: 2251 case Builtin::BI__sync_fetch_and_and_1: 2252 case Builtin::BI__sync_fetch_and_and_2: 2253 case Builtin::BI__sync_fetch_and_and_4: 2254 case Builtin::BI__sync_fetch_and_and_8: 2255 case Builtin::BI__sync_fetch_and_and_16: 2256 case Builtin::BI__sync_fetch_and_xor: 2257 case Builtin::BI__sync_fetch_and_xor_1: 2258 case Builtin::BI__sync_fetch_and_xor_2: 2259 case Builtin::BI__sync_fetch_and_xor_4: 2260 case Builtin::BI__sync_fetch_and_xor_8: 2261 case Builtin::BI__sync_fetch_and_xor_16: 2262 case Builtin::BI__sync_fetch_and_nand: 2263 case Builtin::BI__sync_fetch_and_nand_1: 2264 case Builtin::BI__sync_fetch_and_nand_2: 2265 case Builtin::BI__sync_fetch_and_nand_4: 2266 case Builtin::BI__sync_fetch_and_nand_8: 2267 case Builtin::BI__sync_fetch_and_nand_16: 2268 case Builtin::BI__sync_add_and_fetch: 2269 case Builtin::BI__sync_add_and_fetch_1: 2270 case Builtin::BI__sync_add_and_fetch_2: 2271 case Builtin::BI__sync_add_and_fetch_4: 2272 case Builtin::BI__sync_add_and_fetch_8: 2273 case Builtin::BI__sync_add_and_fetch_16: 2274 case Builtin::BI__sync_sub_and_fetch: 2275 case Builtin::BI__sync_sub_and_fetch_1: 2276 case Builtin::BI__sync_sub_and_fetch_2: 2277 case Builtin::BI__sync_sub_and_fetch_4: 2278 case Builtin::BI__sync_sub_and_fetch_8: 2279 case Builtin::BI__sync_sub_and_fetch_16: 2280 case Builtin::BI__sync_and_and_fetch: 2281 case Builtin::BI__sync_and_and_fetch_1: 2282 case Builtin::BI__sync_and_and_fetch_2: 2283 case Builtin::BI__sync_and_and_fetch_4: 2284 case Builtin::BI__sync_and_and_fetch_8: 2285 case Builtin::BI__sync_and_and_fetch_16: 2286 case Builtin::BI__sync_or_and_fetch: 2287 case Builtin::BI__sync_or_and_fetch_1: 2288 case Builtin::BI__sync_or_and_fetch_2: 2289 case Builtin::BI__sync_or_and_fetch_4: 2290 case Builtin::BI__sync_or_and_fetch_8: 2291 case Builtin::BI__sync_or_and_fetch_16: 2292 case Builtin::BI__sync_xor_and_fetch: 2293 case Builtin::BI__sync_xor_and_fetch_1: 2294 case Builtin::BI__sync_xor_and_fetch_2: 2295 case Builtin::BI__sync_xor_and_fetch_4: 2296 case Builtin::BI__sync_xor_and_fetch_8: 2297 case Builtin::BI__sync_xor_and_fetch_16: 2298 case Builtin::BI__sync_nand_and_fetch: 2299 case Builtin::BI__sync_nand_and_fetch_1: 2300 case Builtin::BI__sync_nand_and_fetch_2: 2301 case Builtin::BI__sync_nand_and_fetch_4: 2302 case Builtin::BI__sync_nand_and_fetch_8: 2303 case Builtin::BI__sync_nand_and_fetch_16: 2304 case Builtin::BI__sync_val_compare_and_swap: 2305 case Builtin::BI__sync_val_compare_and_swap_1: 2306 case Builtin::BI__sync_val_compare_and_swap_2: 2307 case Builtin::BI__sync_val_compare_and_swap_4: 2308 case Builtin::BI__sync_val_compare_and_swap_8: 2309 case Builtin::BI__sync_val_compare_and_swap_16: 2310 case Builtin::BI__sync_bool_compare_and_swap: 2311 case Builtin::BI__sync_bool_compare_and_swap_1: 2312 case Builtin::BI__sync_bool_compare_and_swap_2: 2313 case Builtin::BI__sync_bool_compare_and_swap_4: 2314 case Builtin::BI__sync_bool_compare_and_swap_8: 2315 case Builtin::BI__sync_bool_compare_and_swap_16: 2316 case Builtin::BI__sync_lock_test_and_set: 2317 case Builtin::BI__sync_lock_test_and_set_1: 2318 case Builtin::BI__sync_lock_test_and_set_2: 2319 case Builtin::BI__sync_lock_test_and_set_4: 2320 case Builtin::BI__sync_lock_test_and_set_8: 2321 case Builtin::BI__sync_lock_test_and_set_16: 2322 case Builtin::BI__sync_lock_release: 2323 case Builtin::BI__sync_lock_release_1: 2324 case Builtin::BI__sync_lock_release_2: 2325 case Builtin::BI__sync_lock_release_4: 2326 case Builtin::BI__sync_lock_release_8: 2327 case Builtin::BI__sync_lock_release_16: 2328 case Builtin::BI__sync_swap: 2329 case Builtin::BI__sync_swap_1: 2330 case Builtin::BI__sync_swap_2: 2331 case Builtin::BI__sync_swap_4: 2332 case Builtin::BI__sync_swap_8: 2333 case Builtin::BI__sync_swap_16: 2334 return SemaBuiltinAtomicOverloaded(TheCallResult); 2335 case Builtin::BI__sync_synchronize: 2336 Diag(TheCall->getBeginLoc(), diag::warn_atomic_implicit_seq_cst) 2337 << TheCall->getCallee()->getSourceRange(); 2338 break; 2339 case Builtin::BI__builtin_nontemporal_load: 2340 case Builtin::BI__builtin_nontemporal_store: 2341 return SemaBuiltinNontemporalOverloaded(TheCallResult); 2342 case Builtin::BI__builtin_memcpy_inline: { 2343 clang::Expr *SizeOp = TheCall->getArg(2); 2344 // We warn about copying to or from `nullptr` pointers when `size` is 2345 // greater than 0. When `size` is value dependent we cannot evaluate its 2346 // value so we bail out. 2347 if (SizeOp->isValueDependent()) 2348 break; 2349 if (!SizeOp->EvaluateKnownConstInt(Context).isZero()) { 2350 CheckNonNullArgument(*this, TheCall->getArg(0), TheCall->getExprLoc()); 2351 CheckNonNullArgument(*this, TheCall->getArg(1), TheCall->getExprLoc()); 2352 } 2353 break; 2354 } 2355 case Builtin::BI__builtin_memset_inline: { 2356 clang::Expr *SizeOp = TheCall->getArg(2); 2357 // We warn about filling to `nullptr` pointers when `size` is greater than 2358 // 0. When `size` is value dependent we cannot evaluate its value so we bail 2359 // out. 2360 if (SizeOp->isValueDependent()) 2361 break; 2362 if (!SizeOp->EvaluateKnownConstInt(Context).isZero()) 2363 CheckNonNullArgument(*this, TheCall->getArg(0), TheCall->getExprLoc()); 2364 break; 2365 } 2366#define BUILTIN(ID, TYPE, ATTRS) 2367#define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \ 2368 case Builtin::BI##ID: \ 2369 return SemaAtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID); 2370#include "clang/Basic/Builtins.def" 2371 case Builtin::BI__annotation: 2372 if (SemaBuiltinMSVCAnnotation(*this, TheCall)) 2373 return ExprError(); 2374 break; 2375 case Builtin::BI__builtin_annotation: 2376 if (SemaBuiltinAnnotation(*this, TheCall)) 2377 return ExprError(); 2378 break; 2379 case Builtin::BI__builtin_addressof: 2380 if (SemaBuiltinAddressof(*this, TheCall)) 2381 return ExprError(); 2382 break; 2383 case Builtin::BI__builtin_function_start: 2384 if (SemaBuiltinFunctionStart(*this, TheCall)) 2385 return ExprError(); 2386 break; 2387 case Builtin::BI__builtin_is_aligned: 2388 case Builtin::BI__builtin_align_up: 2389 case Builtin::BI__builtin_align_down: 2390 if (SemaBuiltinAlignment(*this, TheCall, BuiltinID)) 2391 return ExprError(); 2392 break; 2393 case Builtin::BI__builtin_add_overflow: 2394 case Builtin::BI__builtin_sub_overflow: 2395 case Builtin::BI__builtin_mul_overflow: 2396 if (SemaBuiltinOverflow(*this, TheCall, BuiltinID)) 2397 return ExprError(); 2398 break; 2399 case Builtin::BI__builtin_operator_new: 2400 case Builtin::BI__builtin_operator_delete: { 2401 bool IsDelete = BuiltinID == Builtin::BI__builtin_operator_delete; 2402 ExprResult Res = 2403 SemaBuiltinOperatorNewDeleteOverloaded(TheCallResult, IsDelete); 2404 if (Res.isInvalid()) 2405 CorrectDelayedTyposInExpr(TheCallResult.get()); 2406 return Res; 2407 } 2408 case Builtin::BI__builtin_dump_struct: 2409 return SemaBuiltinDumpStruct(*this, TheCall); 2410 case Builtin::BI__builtin_expect_with_probability: { 2411 // We first want to ensure we are called with 3 arguments 2412 if (checkArgCount(*this, TheCall, 3)) 2413 return ExprError(); 2414 // then check probability is constant float in range [0.0, 1.0] 2415 const Expr *ProbArg = TheCall->getArg(2); 2416 SmallVector<PartialDiagnosticAt, 8> Notes; 2417 Expr::EvalResult Eval; 2418 Eval.Diag = &Notes; 2419 if ((!ProbArg->EvaluateAsConstantExpr(Eval, Context)) || 2420 !Eval.Val.isFloat()) { 2421 Diag(ProbArg->getBeginLoc(), diag::err_probability_not_constant_float) 2422 << ProbArg->getSourceRange(); 2423 for (const PartialDiagnosticAt &PDiag : Notes) 2424 Diag(PDiag.first, PDiag.second); 2425 return ExprError(); 2426 } 2427 llvm::APFloat Probability = Eval.Val.getFloat(); 2428 bool LoseInfo = false; 2429 Probability.convert(llvm::APFloat::IEEEdouble(), 2430 llvm::RoundingMode::Dynamic, &LoseInfo); 2431 if (!(Probability >= llvm::APFloat(0.0) && 2432 Probability <= llvm::APFloat(1.0))) { 2433 Diag(ProbArg->getBeginLoc(), diag::err_probability_out_of_range) 2434 << ProbArg->getSourceRange(); 2435 return ExprError(); 2436 } 2437 break; 2438 } 2439 case Builtin::BI__builtin_preserve_access_index: 2440 if (SemaBuiltinPreserveAI(*this, TheCall)) 2441 return ExprError(); 2442 break; 2443 case Builtin::BI__builtin_call_with_static_chain: 2444 if (SemaBuiltinCallWithStaticChain(*this, TheCall)) 2445 return ExprError(); 2446 break; 2447 case Builtin::BI__exception_code: 2448 case Builtin::BI_exception_code: 2449 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHExceptScope, 2450 diag::err_seh___except_block)) 2451 return ExprError(); 2452 break; 2453 case Builtin::BI__exception_info: 2454 case Builtin::BI_exception_info: 2455 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHFilterScope, 2456 diag::err_seh___except_filter)) 2457 return ExprError(); 2458 break; 2459 case Builtin::BI__GetExceptionInfo: 2460 if (checkArgCount(*this, TheCall, 1)) 2461 return ExprError(); 2462 2463 if (CheckCXXThrowOperand( 2464 TheCall->getBeginLoc(), 2465 Context.getExceptionObjectType(FDecl->getParamDecl(0)->getType()), 2466 TheCall)) 2467 return ExprError(); 2468 2469 TheCall->setType(Context.VoidPtrTy); 2470 break; 2471 case Builtin::BIaddressof: 2472 case Builtin::BI__addressof: 2473 case Builtin::BIforward: 2474 case Builtin::BImove: 2475 case Builtin::BImove_if_noexcept: 2476 case Builtin::BIas_const: { 2477 // These are all expected to be of the form 2478 // T &/&&/* f(U &/&&) 2479 // where T and U only differ in qualification. 2480 if (checkArgCount(*this, TheCall, 1)) 2481 return ExprError(); 2482 QualType Param = FDecl->getParamDecl(0)->getType(); 2483 QualType Result = FDecl->getReturnType(); 2484 bool ReturnsPointer = BuiltinID == Builtin::BIaddressof || 2485 BuiltinID == Builtin::BI__addressof; 2486 if (!(Param->isReferenceType() && 2487 (ReturnsPointer ? Result->isAnyPointerType() 2488 : Result->isReferenceType()) && 2489 Context.hasSameUnqualifiedType(Param->getPointeeType(), 2490 Result->getPointeeType()))) { 2491 Diag(TheCall->getBeginLoc(), diag::err_builtin_move_forward_unsupported) 2492 << FDecl; 2493 return ExprError(); 2494 } 2495 break; 2496 } 2497 // OpenCL v2.0, s6.13.16 - Pipe functions 2498 case Builtin::BIread_pipe: 2499 case Builtin::BIwrite_pipe: 2500 // Since those two functions are declared with var args, we need a semantic 2501 // check for the argument. 2502 if (SemaBuiltinRWPipe(*this, TheCall)) 2503 return ExprError(); 2504 break; 2505 case Builtin::BIreserve_read_pipe: 2506 case Builtin::BIreserve_write_pipe: 2507 case Builtin::BIwork_group_reserve_read_pipe: 2508 case Builtin::BIwork_group_reserve_write_pipe: 2509 if (SemaBuiltinReserveRWPipe(*this, TheCall)) 2510 return ExprError(); 2511 break; 2512 case Builtin::BIsub_group_reserve_read_pipe: 2513 case Builtin::BIsub_group_reserve_write_pipe: 2514 if (checkOpenCLSubgroupExt(*this, TheCall) || 2515 SemaBuiltinReserveRWPipe(*this, TheCall)) 2516 return ExprError(); 2517 break; 2518 case Builtin::BIcommit_read_pipe: 2519 case Builtin::BIcommit_write_pipe: 2520 case Builtin::BIwork_group_commit_read_pipe: 2521 case Builtin::BIwork_group_commit_write_pipe: 2522 if (SemaBuiltinCommitRWPipe(*this, TheCall)) 2523 return ExprError(); 2524 break; 2525 case Builtin::BIsub_group_commit_read_pipe: 2526 case Builtin::BIsub_group_commit_write_pipe: 2527 if (checkOpenCLSubgroupExt(*this, TheCall) || 2528 SemaBuiltinCommitRWPipe(*this, TheCall)) 2529 return ExprError(); 2530 break; 2531 case Builtin::BIget_pipe_num_packets: 2532 case Builtin::BIget_pipe_max_packets: 2533 if (SemaBuiltinPipePackets(*this, TheCall)) 2534 return ExprError(); 2535 break; 2536 case Builtin::BIto_global: 2537 case Builtin::BIto_local: 2538 case Builtin::BIto_private: 2539 if (SemaOpenCLBuiltinToAddr(*this, BuiltinID, TheCall)) 2540 return ExprError(); 2541 break; 2542 // OpenCL v2.0, s6.13.17 - Enqueue kernel functions. 2543 case Builtin::BIenqueue_kernel: 2544 if (SemaOpenCLBuiltinEnqueueKernel(*this, TheCall)) 2545 return ExprError(); 2546 break; 2547 case Builtin::BIget_kernel_work_group_size: 2548 case Builtin::BIget_kernel_preferred_work_group_size_multiple: 2549 if (SemaOpenCLBuiltinKernelWorkGroupSize(*this, TheCall)) 2550 return ExprError(); 2551 break; 2552 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange: 2553 case Builtin::BIget_kernel_sub_group_count_for_ndrange: 2554 if (SemaOpenCLBuiltinNDRangeAndBlock(*this, TheCall)) 2555 return ExprError(); 2556 break; 2557 case Builtin::BI__builtin_os_log_format: 2558 Cleanup.setExprNeedsCleanups(true); 2559 [[fallthrough]]; 2560 case Builtin::BI__builtin_os_log_format_buffer_size: 2561 if (SemaBuiltinOSLogFormat(TheCall)) 2562 return ExprError(); 2563 break; 2564 case Builtin::BI__builtin_frame_address: 2565 case Builtin::BI__builtin_return_address: { 2566 if (SemaBuiltinConstantArgRange(TheCall, 0, 0, 0xFFFF)) 2567 return ExprError(); 2568 2569 // -Wframe-address warning if non-zero passed to builtin 2570 // return/frame address. 2571 Expr::EvalResult Result; 2572 if (!TheCall->getArg(0)->isValueDependent() && 2573 TheCall->getArg(0)->EvaluateAsInt(Result, getASTContext()) && 2574 Result.Val.getInt() != 0) 2575 Diag(TheCall->getBeginLoc(), diag::warn_frame_address) 2576 << ((BuiltinID == Builtin::BI__builtin_return_address) 2577 ? "__builtin_return_address" 2578 : "__builtin_frame_address") 2579 << TheCall->getSourceRange(); 2580 break; 2581 } 2582 2583 // __builtin_elementwise_abs restricts the element type to signed integers or 2584 // floating point types only. 2585 case Builtin::BI__builtin_elementwise_abs: { 2586 if (PrepareBuiltinElementwiseMathOneArgCall(TheCall)) 2587 return ExprError(); 2588 2589 QualType ArgTy = TheCall->getArg(0)->getType(); 2590 QualType EltTy = ArgTy; 2591 2592 if (auto *VecTy = EltTy->getAs<VectorType>()) 2593 EltTy = VecTy->getElementType(); 2594 if (EltTy->isUnsignedIntegerType()) { 2595 Diag(TheCall->getArg(0)->getBeginLoc(), 2596 diag::err_builtin_invalid_arg_type) 2597 << 1 << /* signed integer or float ty*/ 3 << ArgTy; 2598 return ExprError(); 2599 } 2600 break; 2601 } 2602 2603 // These builtins restrict the element type to floating point 2604 // types only. 2605 case Builtin::BI__builtin_elementwise_ceil: 2606 case Builtin::BI__builtin_elementwise_cos: 2607 case Builtin::BI__builtin_elementwise_floor: 2608 case Builtin::BI__builtin_elementwise_roundeven: 2609 case Builtin::BI__builtin_elementwise_sin: 2610 case Builtin::BI__builtin_elementwise_trunc: 2611 case Builtin::BI__builtin_elementwise_canonicalize: { 2612 if (PrepareBuiltinElementwiseMathOneArgCall(TheCall)) 2613 return ExprError(); 2614 2615 QualType ArgTy = TheCall->getArg(0)->getType(); 2616 QualType EltTy = ArgTy; 2617 2618 if (auto *VecTy = EltTy->getAs<VectorType>()) 2619 EltTy = VecTy->getElementType(); 2620 if (!EltTy->isFloatingType()) { 2621 Diag(TheCall->getArg(0)->getBeginLoc(), 2622 diag::err_builtin_invalid_arg_type) 2623 << 1 << /* float ty*/ 5 << ArgTy; 2624 2625 return ExprError(); 2626 } 2627 break; 2628 } 2629 2630 // These builtins restrict the element type to integer 2631 // types only. 2632 case Builtin::BI__builtin_elementwise_add_sat: 2633 case Builtin::BI__builtin_elementwise_sub_sat: { 2634 if (SemaBuiltinElementwiseMath(TheCall)) 2635 return ExprError(); 2636 2637 const Expr *Arg = TheCall->getArg(0); 2638 QualType ArgTy = Arg->getType(); 2639 QualType EltTy = ArgTy; 2640 2641 if (auto *VecTy = EltTy->getAs<VectorType>()) 2642 EltTy = VecTy->getElementType(); 2643 2644 if (!EltTy->isIntegerType()) { 2645 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type) 2646 << 1 << /* integer ty */ 6 << ArgTy; 2647 return ExprError(); 2648 } 2649 break; 2650 } 2651 2652 case Builtin::BI__builtin_elementwise_min: 2653 case Builtin::BI__builtin_elementwise_max: 2654 if (SemaBuiltinElementwiseMath(TheCall)) 2655 return ExprError(); 2656 break; 2657 case Builtin::BI__builtin_elementwise_copysign: { 2658 if (checkArgCount(*this, TheCall, 2)) 2659 return ExprError(); 2660 2661 ExprResult Magnitude = UsualUnaryConversions(TheCall->getArg(0)); 2662 ExprResult Sign = UsualUnaryConversions(TheCall->getArg(1)); 2663 if (Magnitude.isInvalid() || Sign.isInvalid()) 2664 return ExprError(); 2665 2666 QualType MagnitudeTy = Magnitude.get()->getType(); 2667 QualType SignTy = Sign.get()->getType(); 2668 if (checkFPMathBuiltinElementType(*this, TheCall->getArg(0)->getBeginLoc(), 2669 MagnitudeTy, 1) || 2670 checkFPMathBuiltinElementType(*this, TheCall->getArg(1)->getBeginLoc(), 2671 SignTy, 2)) { 2672 return ExprError(); 2673 } 2674 2675 if (MagnitudeTy.getCanonicalType() != SignTy.getCanonicalType()) { 2676 return Diag(Sign.get()->getBeginLoc(), 2677 diag::err_typecheck_call_different_arg_types) 2678 << MagnitudeTy << SignTy; 2679 } 2680 2681 TheCall->setArg(0, Magnitude.get()); 2682 TheCall->setArg(1, Sign.get()); 2683 TheCall->setType(Magnitude.get()->getType()); 2684 break; 2685 } 2686 case Builtin::BI__builtin_reduce_max: 2687 case Builtin::BI__builtin_reduce_min: { 2688 if (PrepareBuiltinReduceMathOneArgCall(TheCall)) 2689 return ExprError(); 2690 2691 const Expr *Arg = TheCall->getArg(0); 2692 const auto *TyA = Arg->getType()->getAs<VectorType>(); 2693 if (!TyA) { 2694 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type) 2695 << 1 << /* vector ty*/ 4 << Arg->getType(); 2696 return ExprError(); 2697 } 2698 2699 TheCall->setType(TyA->getElementType()); 2700 break; 2701 } 2702 2703 // These builtins support vectors of integers only. 2704 // TODO: ADD/MUL should support floating-point types. 2705 case Builtin::BI__builtin_reduce_add: 2706 case Builtin::BI__builtin_reduce_mul: 2707 case Builtin::BI__builtin_reduce_xor: 2708 case Builtin::BI__builtin_reduce_or: 2709 case Builtin::BI__builtin_reduce_and: { 2710 if (PrepareBuiltinReduceMathOneArgCall(TheCall)) 2711 return ExprError(); 2712 2713 const Expr *Arg = TheCall->getArg(0); 2714 const auto *TyA = Arg->getType()->getAs<VectorType>(); 2715 if (!TyA || !TyA->getElementType()->isIntegerType()) { 2716 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type) 2717 << 1 << /* vector of integers */ 6 << Arg->getType(); 2718 return ExprError(); 2719 } 2720 TheCall->setType(TyA->getElementType()); 2721 break; 2722 } 2723 2724 case Builtin::BI__builtin_matrix_transpose: 2725 return SemaBuiltinMatrixTranspose(TheCall, TheCallResult); 2726 2727 case Builtin::BI__builtin_matrix_column_major_load: 2728 return SemaBuiltinMatrixColumnMajorLoad(TheCall, TheCallResult); 2729 2730 case Builtin::BI__builtin_matrix_column_major_store: 2731 return SemaBuiltinMatrixColumnMajorStore(TheCall, TheCallResult); 2732 2733 case Builtin::BI__builtin_get_device_side_mangled_name: { 2734 auto Check = [](CallExpr *TheCall) { 2735 if (TheCall->getNumArgs() != 1) 2736 return false; 2737 auto *DRE = dyn_cast<DeclRefExpr>(TheCall->getArg(0)->IgnoreImpCasts()); 2738 if (!DRE) 2739 return false; 2740 auto *D = DRE->getDecl(); 2741 if (!isa<FunctionDecl>(D) && !isa<VarDecl>(D)) 2742 return false; 2743 return D->hasAttr<CUDAGlobalAttr>() || D->hasAttr<CUDADeviceAttr>() || 2744 D->hasAttr<CUDAConstantAttr>() || D->hasAttr<HIPManagedAttr>(); 2745 }; 2746 if (!Check(TheCall)) { 2747 Diag(TheCall->getBeginLoc(), 2748 diag::err_hip_invalid_args_builtin_mangled_name); 2749 return ExprError(); 2750 } 2751 } 2752 } 2753 2754 // Since the target specific builtins for each arch overlap, only check those 2755 // of the arch we are compiling for. 2756 if (Context.BuiltinInfo.isTSBuiltin(BuiltinID)) { 2757 if (Context.BuiltinInfo.isAuxBuiltinID(BuiltinID)) { 2758 assert(Context.getAuxTargetInfo() && 2759 "Aux Target Builtin, but not an aux target?"); 2760 2761 if (CheckTSBuiltinFunctionCall( 2762 *Context.getAuxTargetInfo(), 2763 Context.BuiltinInfo.getAuxBuiltinID(BuiltinID), TheCall)) 2764 return ExprError(); 2765 } else { 2766 if (CheckTSBuiltinFunctionCall(Context.getTargetInfo(), BuiltinID, 2767 TheCall)) 2768 return ExprError(); 2769 } 2770 } 2771 2772 return TheCallResult; 2773} 2774 2775// Get the valid immediate range for the specified NEON type code. 2776static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) { 2777 NeonTypeFlags Type(t); 2778 int IsQuad = ForceQuad ? true : Type.isQuad(); 2779 switch (Type.getEltType()) { 2780 case NeonTypeFlags::Int8: 2781 case NeonTypeFlags::Poly8: 2782 return shift ? 7 : (8 << IsQuad) - 1; 2783 case NeonTypeFlags::Int16: 2784 case NeonTypeFlags::Poly16: 2785 return shift ? 15 : (4 << IsQuad) - 1; 2786 case NeonTypeFlags::Int32: 2787 return shift ? 31 : (2 << IsQuad) - 1; 2788 case NeonTypeFlags::Int64: 2789 case NeonTypeFlags::Poly64: 2790 return shift ? 63 : (1 << IsQuad) - 1; 2791 case NeonTypeFlags::Poly128: 2792 return shift ? 127 : (1 << IsQuad) - 1; 2793 case NeonTypeFlags::Float16: 2794 assert(!shift && "cannot shift float types!"); 2795 return (4 << IsQuad) - 1; 2796 case NeonTypeFlags::Float32: 2797 assert(!shift && "cannot shift float types!"); 2798 return (2 << IsQuad) - 1; 2799 case NeonTypeFlags::Float64: 2800 assert(!shift && "cannot shift float types!"); 2801 return (1 << IsQuad) - 1; 2802 case NeonTypeFlags::BFloat16: 2803 assert(!shift && "cannot shift float types!"); 2804 return (4 << IsQuad) - 1; 2805 } 2806 llvm_unreachable("Invalid NeonTypeFlag!"); 2807} 2808 2809/// getNeonEltType - Return the QualType corresponding to the elements of 2810/// the vector type specified by the NeonTypeFlags. This is used to check 2811/// the pointer arguments for Neon load/store intrinsics. 2812static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context, 2813 bool IsPolyUnsigned, bool IsInt64Long) { 2814 switch (Flags.getEltType()) { 2815 case NeonTypeFlags::Int8: 2816 return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy; 2817 case NeonTypeFlags::Int16: 2818 return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy; 2819 case NeonTypeFlags::Int32: 2820 return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy; 2821 case NeonTypeFlags::Int64: 2822 if (IsInt64Long) 2823 return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy; 2824 else 2825 return Flags.isUnsigned() ? Context.UnsignedLongLongTy 2826 : Context.LongLongTy; 2827 case NeonTypeFlags::Poly8: 2828 return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy; 2829 case NeonTypeFlags::Poly16: 2830 return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy; 2831 case NeonTypeFlags::Poly64: 2832 if (IsInt64Long) 2833 return Context.UnsignedLongTy; 2834 else 2835 return Context.UnsignedLongLongTy; 2836 case NeonTypeFlags::Poly128: 2837 break; 2838 case NeonTypeFlags::Float16: 2839 return Context.HalfTy; 2840 case NeonTypeFlags::Float32: 2841 return Context.FloatTy; 2842 case NeonTypeFlags::Float64: 2843 return Context.DoubleTy; 2844 case NeonTypeFlags::BFloat16: 2845 return Context.BFloat16Ty; 2846 } 2847 llvm_unreachable("Invalid NeonTypeFlag!"); 2848} 2849 2850bool Sema::CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 2851 // Range check SVE intrinsics that take immediate values. 2852 SmallVector<std::tuple<int,int,int>, 3> ImmChecks; 2853 2854 switch (BuiltinID) { 2855 default: 2856 return false; 2857#define GET_SVE_IMMEDIATE_CHECK 2858#include "clang/Basic/arm_sve_sema_rangechecks.inc" 2859#undef GET_SVE_IMMEDIATE_CHECK 2860 } 2861 2862 // Perform all the immediate checks for this builtin call. 2863 bool HasError = false; 2864 for (auto &I : ImmChecks) { 2865 int ArgNum, CheckTy, ElementSizeInBits; 2866 std::tie(ArgNum, CheckTy, ElementSizeInBits) = I; 2867 2868 typedef bool(*OptionSetCheckFnTy)(int64_t Value); 2869 2870 // Function that checks whether the operand (ArgNum) is an immediate 2871 // that is one of the predefined values. 2872 auto CheckImmediateInSet = [&](OptionSetCheckFnTy CheckImm, 2873 int ErrDiag) -> bool { 2874 // We can't check the value of a dependent argument. 2875 Expr *Arg = TheCall->getArg(ArgNum); 2876 if (Arg->isTypeDependent() || Arg->isValueDependent()) 2877 return false; 2878 2879 // Check constant-ness first. 2880 llvm::APSInt Imm; 2881 if (SemaBuiltinConstantArg(TheCall, ArgNum, Imm)) 2882 return true; 2883 2884 if (!CheckImm(Imm.getSExtValue())) 2885 return Diag(TheCall->getBeginLoc(), ErrDiag) << Arg->getSourceRange(); 2886 return false; 2887 }; 2888 2889 switch ((SVETypeFlags::ImmCheckType)CheckTy) { 2890 case SVETypeFlags::ImmCheck0_31: 2891 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 31)) 2892 HasError = true; 2893 break; 2894 case SVETypeFlags::ImmCheck0_13: 2895 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 13)) 2896 HasError = true; 2897 break; 2898 case SVETypeFlags::ImmCheck1_16: 2899 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 16)) 2900 HasError = true; 2901 break; 2902 case SVETypeFlags::ImmCheck0_7: 2903 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 7)) 2904 HasError = true; 2905 break; 2906 case SVETypeFlags::ImmCheckExtract: 2907 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2908 (2048 / ElementSizeInBits) - 1)) 2909 HasError = true; 2910 break; 2911 case SVETypeFlags::ImmCheckShiftRight: 2912 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, ElementSizeInBits)) 2913 HasError = true; 2914 break; 2915 case SVETypeFlags::ImmCheckShiftRightNarrow: 2916 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 1, 2917 ElementSizeInBits / 2)) 2918 HasError = true; 2919 break; 2920 case SVETypeFlags::ImmCheckShiftLeft: 2921 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2922 ElementSizeInBits - 1)) 2923 HasError = true; 2924 break; 2925 case SVETypeFlags::ImmCheckLaneIndex: 2926 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2927 (128 / (1 * ElementSizeInBits)) - 1)) 2928 HasError = true; 2929 break; 2930 case SVETypeFlags::ImmCheckLaneIndexCompRotate: 2931 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2932 (128 / (2 * ElementSizeInBits)) - 1)) 2933 HasError = true; 2934 break; 2935 case SVETypeFlags::ImmCheckLaneIndexDot: 2936 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2937 (128 / (4 * ElementSizeInBits)) - 1)) 2938 HasError = true; 2939 break; 2940 case SVETypeFlags::ImmCheckComplexRot90_270: 2941 if (CheckImmediateInSet([](int64_t V) { return V == 90 || V == 270; }, 2942 diag::err_rotation_argument_to_cadd)) 2943 HasError = true; 2944 break; 2945 case SVETypeFlags::ImmCheckComplexRotAll90: 2946 if (CheckImmediateInSet( 2947 [](int64_t V) { 2948 return V == 0 || V == 90 || V == 180 || V == 270; 2949 }, 2950 diag::err_rotation_argument_to_cmla)) 2951 HasError = true; 2952 break; 2953 case SVETypeFlags::ImmCheck0_1: 2954 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 1)) 2955 HasError = true; 2956 break; 2957 case SVETypeFlags::ImmCheck0_2: 2958 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 2)) 2959 HasError = true; 2960 break; 2961 case SVETypeFlags::ImmCheck0_3: 2962 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, 3)) 2963 HasError = true; 2964 break; 2965 } 2966 } 2967 2968 return HasError; 2969} 2970 2971bool Sema::CheckNeonBuiltinFunctionCall(const TargetInfo &TI, 2972 unsigned BuiltinID, CallExpr *TheCall) { 2973 llvm::APSInt Result; 2974 uint64_t mask = 0; 2975 unsigned TV = 0; 2976 int PtrArgNum = -1; 2977 bool HasConstPtr = false; 2978 switch (BuiltinID) { 2979#define GET_NEON_OVERLOAD_CHECK 2980#include "clang/Basic/arm_neon.inc" 2981#include "clang/Basic/arm_fp16.inc" 2982#undef GET_NEON_OVERLOAD_CHECK 2983 } 2984 2985 // For NEON intrinsics which are overloaded on vector element type, validate 2986 // the immediate which specifies which variant to emit. 2987 unsigned ImmArg = TheCall->getNumArgs()-1; 2988 if (mask) { 2989 if (SemaBuiltinConstantArg(TheCall, ImmArg, Result)) 2990 return true; 2991 2992 TV = Result.getLimitedValue(64); 2993 if ((TV > 63) || (mask & (1ULL << TV)) == 0) 2994 return Diag(TheCall->getBeginLoc(), diag::err_invalid_neon_type_code) 2995 << TheCall->getArg(ImmArg)->getSourceRange(); 2996 } 2997 2998 if (PtrArgNum >= 0) { 2999 // Check that pointer arguments have the specified type. 3000 Expr *Arg = TheCall->getArg(PtrArgNum); 3001 if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) 3002 Arg = ICE->getSubExpr(); 3003 ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg); 3004 QualType RHSTy = RHS.get()->getType(); 3005 3006 llvm::Triple::ArchType Arch = TI.getTriple().getArch(); 3007 bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 || 3008 Arch == llvm::Triple::aarch64_32 || 3009 Arch == llvm::Triple::aarch64_be; 3010 bool IsInt64Long = TI.getInt64Type() == TargetInfo::SignedLong; 3011 QualType EltTy = 3012 getNeonEltType(NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long); 3013 if (HasConstPtr) 3014 EltTy = EltTy.withConst(); 3015 QualType LHSTy = Context.getPointerType(EltTy); 3016 AssignConvertType ConvTy; 3017 ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS); 3018 if (RHS.isInvalid()) 3019 return true; 3020 if (DiagnoseAssignmentResult(ConvTy, Arg->getBeginLoc(), LHSTy, RHSTy, 3021 RHS.get(), AA_Assigning)) 3022 return true; 3023 } 3024 3025 // For NEON intrinsics which take an immediate value as part of the 3026 // instruction, range check them here. 3027 unsigned i = 0, l = 0, u = 0; 3028 switch (BuiltinID) { 3029 default: 3030 return false; 3031 #define GET_NEON_IMMEDIATE_CHECK 3032 #include "clang/Basic/arm_neon.inc" 3033 #include "clang/Basic/arm_fp16.inc" 3034 #undef GET_NEON_IMMEDIATE_CHECK 3035 } 3036 3037 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 3038} 3039 3040bool Sema::CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { 3041 switch (BuiltinID) { 3042 default: 3043 return false; 3044 #include "clang/Basic/arm_mve_builtin_sema.inc" 3045 } 3046} 3047 3048bool Sema::CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 3049 CallExpr *TheCall) { 3050 bool Err = false; 3051 switch (BuiltinID) { 3052 default: 3053 return false; 3054#include "clang/Basic/arm_cde_builtin_sema.inc" 3055 } 3056 3057 if (Err) 3058 return true; 3059 3060 return CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), /*WantCDE*/ true); 3061} 3062 3063bool Sema::CheckARMCoprocessorImmediate(const TargetInfo &TI, 3064 const Expr *CoprocArg, bool WantCDE) { 3065 if (isConstantEvaluated()) 3066 return false; 3067 3068 // We can't check the value of a dependent argument. 3069 if (CoprocArg->isTypeDependent() || CoprocArg->isValueDependent()) 3070 return false; 3071 3072 llvm::APSInt CoprocNoAP = *CoprocArg->getIntegerConstantExpr(Context); 3073 int64_t CoprocNo = CoprocNoAP.getExtValue(); 3074 assert(CoprocNo >= 0 && "Coprocessor immediate must be non-negative"); 3075 3076 uint32_t CDECoprocMask = TI.getARMCDECoprocMask(); 3077 bool IsCDECoproc = CoprocNo <= 7 && (CDECoprocMask & (1 << CoprocNo)); 3078 3079 if (IsCDECoproc != WantCDE) 3080 return Diag(CoprocArg->getBeginLoc(), diag::err_arm_invalid_coproc) 3081 << (int)CoprocNo << (int)WantCDE << CoprocArg->getSourceRange(); 3082 3083 return false; 3084} 3085 3086bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, 3087 unsigned MaxWidth) { 3088 assert((BuiltinID == ARM::BI__builtin_arm_ldrex || 3089 BuiltinID == ARM::BI__builtin_arm_ldaex || 3090 BuiltinID == ARM::BI__builtin_arm_strex || 3091 BuiltinID == ARM::BI__builtin_arm_stlex || 3092 BuiltinID == AArch64::BI__builtin_arm_ldrex || 3093 BuiltinID == AArch64::BI__builtin_arm_ldaex || 3094 BuiltinID == AArch64::BI__builtin_arm_strex || 3095 BuiltinID == AArch64::BI__builtin_arm_stlex) && 3096 "unexpected ARM builtin"); 3097 bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex || 3098 BuiltinID == ARM::BI__builtin_arm_ldaex || 3099 BuiltinID == AArch64::BI__builtin_arm_ldrex || 3100 BuiltinID == AArch64::BI__builtin_arm_ldaex; 3101 3102 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 3103 3104 // Ensure that we have the proper number of arguments. 3105 if (checkArgCount(*this, TheCall, IsLdrex ? 1 : 2)) 3106 return true; 3107 3108 // Inspect the pointer argument of the atomic builtin. This should always be 3109 // a pointer type, whose element is an integral scalar or pointer type. 3110 // Because it is a pointer type, we don't have to worry about any implicit 3111 // casts here. 3112 Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1); 3113 ExprResult PointerArgRes = DefaultFunctionArrayLvalueConversion(PointerArg); 3114 if (PointerArgRes.isInvalid()) 3115 return true; 3116 PointerArg = PointerArgRes.get(); 3117 3118 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 3119 if (!pointerType) { 3120 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 3121 << PointerArg->getType() << PointerArg->getSourceRange(); 3122 return true; 3123 } 3124 3125 // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next 3126 // task is to insert the appropriate casts into the AST. First work out just 3127 // what the appropriate type is. 3128 QualType ValType = pointerType->getPointeeType(); 3129 QualType AddrType = ValType.getUnqualifiedType().withVolatile(); 3130 if (IsLdrex) 3131 AddrType.addConst(); 3132 3133 // Issue a warning if the cast is dodgy. 3134 CastKind CastNeeded = CK_NoOp; 3135 if (!AddrType.isAtLeastAsQualifiedAs(ValType)) { 3136 CastNeeded = CK_BitCast; 3137 Diag(DRE->getBeginLoc(), diag::ext_typecheck_convert_discards_qualifiers) 3138 << PointerArg->getType() << Context.getPointerType(AddrType) 3139 << AA_Passing << PointerArg->getSourceRange(); 3140 } 3141 3142 // Finally, do the cast and replace the argument with the corrected version. 3143 AddrType = Context.getPointerType(AddrType); 3144 PointerArgRes = ImpCastExprToType(PointerArg, AddrType, CastNeeded); 3145 if (PointerArgRes.isInvalid()) 3146 return true; 3147 PointerArg = PointerArgRes.get(); 3148 3149 TheCall->setArg(IsLdrex ? 0 : 1, PointerArg); 3150 3151 // In general, we allow ints, floats and pointers to be loaded and stored. 3152 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 3153 !ValType->isBlockPointerType() && !ValType->isFloatingType()) { 3154 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intfltptr) 3155 << PointerArg->getType() << PointerArg->getSourceRange(); 3156 return true; 3157 } 3158 3159 // But ARM doesn't have instructions to deal with 128-bit versions. 3160 if (Context.getTypeSize(ValType) > MaxWidth) { 3161 assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate"); 3162 Diag(DRE->getBeginLoc(), diag::err_atomic_exclusive_builtin_pointer_size) 3163 << PointerArg->getType() << PointerArg->getSourceRange(); 3164 return true; 3165 } 3166 3167 switch (ValType.getObjCLifetime()) { 3168 case Qualifiers::OCL_None: 3169 case Qualifiers::OCL_ExplicitNone: 3170 // okay 3171 break; 3172 3173 case Qualifiers::OCL_Weak: 3174 case Qualifiers::OCL_Strong: 3175 case Qualifiers::OCL_Autoreleasing: 3176 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 3177 << ValType << PointerArg->getSourceRange(); 3178 return true; 3179 } 3180 3181 if (IsLdrex) { 3182 TheCall->setType(ValType); 3183 return false; 3184 } 3185 3186 // Initialize the argument to be stored. 3187 ExprResult ValArg = TheCall->getArg(0); 3188 InitializedEntity Entity = InitializedEntity::InitializeParameter( 3189 Context, ValType, /*consume*/ false); 3190 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 3191 if (ValArg.isInvalid()) 3192 return true; 3193 TheCall->setArg(0, ValArg.get()); 3194 3195 // __builtin_arm_strex always returns an int. It's marked as such in the .def, 3196 // but the custom checker bypasses all default analysis. 3197 TheCall->setType(Context.IntTy); 3198 return false; 3199} 3200 3201bool Sema::CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 3202 CallExpr *TheCall) { 3203 if (BuiltinID == ARM::BI__builtin_arm_ldrex || 3204 BuiltinID == ARM::BI__builtin_arm_ldaex || 3205 BuiltinID == ARM::BI__builtin_arm_strex || 3206 BuiltinID == ARM::BI__builtin_arm_stlex) { 3207 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64); 3208 } 3209 3210 if (BuiltinID == ARM::BI__builtin_arm_prefetch) { 3211 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 3212 SemaBuiltinConstantArgRange(TheCall, 2, 0, 1); 3213 } 3214 3215 if (BuiltinID == ARM::BI__builtin_arm_rsr64 || 3216 BuiltinID == ARM::BI__builtin_arm_wsr64) 3217 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 3, false); 3218 3219 if (BuiltinID == ARM::BI__builtin_arm_rsr || 3220 BuiltinID == ARM::BI__builtin_arm_rsrp || 3221 BuiltinID == ARM::BI__builtin_arm_wsr || 3222 BuiltinID == ARM::BI__builtin_arm_wsrp) 3223 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 3224 3225 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall)) 3226 return true; 3227 if (CheckMVEBuiltinFunctionCall(BuiltinID, TheCall)) 3228 return true; 3229 if (CheckCDEBuiltinFunctionCall(TI, BuiltinID, TheCall)) 3230 return true; 3231 3232 // For intrinsics which take an immediate value as part of the instruction, 3233 // range check them here. 3234 // FIXME: VFP Intrinsics should error if VFP not present. 3235 switch (BuiltinID) { 3236 default: return false; 3237 case ARM::BI__builtin_arm_ssat: 3238 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 32); 3239 case ARM::BI__builtin_arm_usat: 3240 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31); 3241 case ARM::BI__builtin_arm_ssat16: 3242 return SemaBuiltinConstantArgRange(TheCall, 1, 1, 16); 3243 case ARM::BI__builtin_arm_usat16: 3244 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 3245 case ARM::BI__builtin_arm_vcvtr_f: 3246 case ARM::BI__builtin_arm_vcvtr_d: 3247 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 3248 case ARM::BI__builtin_arm_dmb: 3249 case ARM::BI__builtin_arm_dsb: 3250 case ARM::BI__builtin_arm_isb: 3251 case ARM::BI__builtin_arm_dbg: 3252 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15); 3253 case ARM::BI__builtin_arm_cdp: 3254 case ARM::BI__builtin_arm_cdp2: 3255 case ARM::BI__builtin_arm_mcr: 3256 case ARM::BI__builtin_arm_mcr2: 3257 case ARM::BI__builtin_arm_mrc: 3258 case ARM::BI__builtin_arm_mrc2: 3259 case ARM::BI__builtin_arm_mcrr: 3260 case ARM::BI__builtin_arm_mcrr2: 3261 case ARM::BI__builtin_arm_mrrc: 3262 case ARM::BI__builtin_arm_mrrc2: 3263 case ARM::BI__builtin_arm_ldc: 3264 case ARM::BI__builtin_arm_ldcl: 3265 case ARM::BI__builtin_arm_ldc2: 3266 case ARM::BI__builtin_arm_ldc2l: 3267 case ARM::BI__builtin_arm_stc: 3268 case ARM::BI__builtin_arm_stcl: 3269 case ARM::BI__builtin_arm_stc2: 3270 case ARM::BI__builtin_arm_stc2l: 3271 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 15) || 3272 CheckARMCoprocessorImmediate(TI, TheCall->getArg(0), 3273 /*WantCDE*/ false); 3274 } 3275} 3276 3277bool Sema::CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, 3278 unsigned BuiltinID, 3279 CallExpr *TheCall) { 3280 if (BuiltinID == AArch64::BI__builtin_arm_ldrex || 3281 BuiltinID == AArch64::BI__builtin_arm_ldaex || 3282 BuiltinID == AArch64::BI__builtin_arm_strex || 3283 BuiltinID == AArch64::BI__builtin_arm_stlex) { 3284 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128); 3285 } 3286 3287 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) { 3288 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 3289 SemaBuiltinConstantArgRange(TheCall, 2, 0, 3) || 3290 SemaBuiltinConstantArgRange(TheCall, 3, 0, 1) || 3291 SemaBuiltinConstantArgRange(TheCall, 4, 0, 1); 3292 } 3293 3294 if (BuiltinID == AArch64::BI__builtin_arm_rsr64 || 3295 BuiltinID == AArch64::BI__builtin_arm_wsr64 || 3296 BuiltinID == AArch64::BI__builtin_arm_rsr128 || 3297 BuiltinID == AArch64::BI__builtin_arm_wsr128) 3298 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 3299 3300 // Memory Tagging Extensions (MTE) Intrinsics 3301 if (BuiltinID == AArch64::BI__builtin_arm_irg || 3302 BuiltinID == AArch64::BI__builtin_arm_addg || 3303 BuiltinID == AArch64::BI__builtin_arm_gmi || 3304 BuiltinID == AArch64::BI__builtin_arm_ldg || 3305 BuiltinID == AArch64::BI__builtin_arm_stg || 3306 BuiltinID == AArch64::BI__builtin_arm_subp) { 3307 return SemaBuiltinARMMemoryTaggingCall(BuiltinID, TheCall); 3308 } 3309 3310 if (BuiltinID == AArch64::BI__builtin_arm_rsr || 3311 BuiltinID == AArch64::BI__builtin_arm_rsrp || 3312 BuiltinID == AArch64::BI__builtin_arm_wsr || 3313 BuiltinID == AArch64::BI__builtin_arm_wsrp) 3314 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); 3315 3316 // Only check the valid encoding range. Any constant in this range would be 3317 // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw 3318 // an exception for incorrect registers. This matches MSVC behavior. 3319 if (BuiltinID == AArch64::BI_ReadStatusReg || 3320 BuiltinID == AArch64::BI_WriteStatusReg) 3321 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0x7fff); 3322 3323 if (BuiltinID == AArch64::BI__getReg) 3324 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31); 3325 3326 if (BuiltinID == AArch64::BI__break) 3327 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0xffff); 3328 3329 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall)) 3330 return true; 3331 3332 if (CheckSVEBuiltinFunctionCall(BuiltinID, TheCall)) 3333 return true; 3334 3335 // For intrinsics which take an immediate value as part of the instruction, 3336 // range check them here. 3337 unsigned i = 0, l = 0, u = 0; 3338 switch (BuiltinID) { 3339 default: return false; 3340 case AArch64::BI__builtin_arm_dmb: 3341 case AArch64::BI__builtin_arm_dsb: 3342 case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break; 3343 case AArch64::BI__builtin_arm_tcancel: l = 0; u = 65535; break; 3344 } 3345 3346 return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); 3347} 3348 3349static bool isValidBPFPreserveFieldInfoArg(Expr *Arg) { 3350 if (Arg->getType()->getAsPlaceholderType()) 3351 return false; 3352 3353 // The first argument needs to be a record field access. 3354 // If it is an array element access, we delay decision 3355 // to BPF backend to check whether the access is a 3356 // field access or not. 3357 return (Arg->IgnoreParens()->getObjectKind() == OK_BitField || 3358 isa<MemberExpr>(Arg->IgnoreParens()) || 3359 isa<ArraySubscriptExpr>(Arg->IgnoreParens())); 3360} 3361 3362static bool isValidBPFPreserveTypeInfoArg(Expr *Arg) { 3363 QualType ArgType = Arg->getType(); 3364 if (ArgType->getAsPlaceholderType()) 3365 return false; 3366 3367 // for TYPE_EXISTENCE/TYPE_MATCH/TYPE_SIZEOF reloc type 3368 // format: 3369 // 1. __builtin_preserve_type_info(*(<type> *)0, flag); 3370 // 2. <type> var; 3371 // __builtin_preserve_type_info(var, flag); 3372 if (!isa<DeclRefExpr>(Arg->IgnoreParens()) && 3373 !isa<UnaryOperator>(Arg->IgnoreParens())) 3374 return false; 3375 3376 // Typedef type. 3377 if (ArgType->getAs<TypedefType>()) 3378 return true; 3379 3380 // Record type or Enum type. 3381 const Type *Ty = ArgType->getUnqualifiedDesugaredType(); 3382 if (const auto *RT = Ty->getAs<RecordType>()) { 3383 if (!RT->getDecl()->getDeclName().isEmpty()) 3384 return true; 3385 } else if (const auto *ET = Ty->getAs<EnumType>()) { 3386 if (!ET->getDecl()->getDeclName().isEmpty()) 3387 return true; 3388 } 3389 3390 return false; 3391} 3392 3393static bool isValidBPFPreserveEnumValueArg(Expr *Arg) { 3394 QualType ArgType = Arg->getType(); 3395 if (ArgType->getAsPlaceholderType()) 3396 return false; 3397 3398 // for ENUM_VALUE_EXISTENCE/ENUM_VALUE reloc type 3399 // format: 3400 // __builtin_preserve_enum_value(*(<enum_type> *)<enum_value>, 3401 // flag); 3402 const auto *UO = dyn_cast<UnaryOperator>(Arg->IgnoreParens()); 3403 if (!UO) 3404 return false; 3405 3406 const auto *CE = dyn_cast<CStyleCastExpr>(UO->getSubExpr()); 3407 if (!CE) 3408 return false; 3409 if (CE->getCastKind() != CK_IntegralToPointer && 3410 CE->getCastKind() != CK_NullToPointer) 3411 return false; 3412 3413 // The integer must be from an EnumConstantDecl. 3414 const auto *DR = dyn_cast<DeclRefExpr>(CE->getSubExpr()); 3415 if (!DR) 3416 return false; 3417 3418 const EnumConstantDecl *Enumerator = 3419 dyn_cast<EnumConstantDecl>(DR->getDecl()); 3420 if (!Enumerator) 3421 return false; 3422 3423 // The type must be EnumType. 3424 const Type *Ty = ArgType->getUnqualifiedDesugaredType(); 3425 const auto *ET = Ty->getAs<EnumType>(); 3426 if (!ET) 3427 return false; 3428 3429 // The enum value must be supported. 3430 return llvm::is_contained(ET->getDecl()->enumerators(), Enumerator); 3431} 3432 3433bool Sema::CheckBPFBuiltinFunctionCall(unsigned BuiltinID, 3434 CallExpr *TheCall) { 3435 assert((BuiltinID == BPF::BI__builtin_preserve_field_info || 3436 BuiltinID == BPF::BI__builtin_btf_type_id || 3437 BuiltinID == BPF::BI__builtin_preserve_type_info || 3438 BuiltinID == BPF::BI__builtin_preserve_enum_value) && 3439 "unexpected BPF builtin"); 3440 3441 if (checkArgCount(*this, TheCall, 2)) 3442 return true; 3443 3444 // The second argument needs to be a constant int 3445 Expr *Arg = TheCall->getArg(1); 3446 std::optional<llvm::APSInt> Value = Arg->getIntegerConstantExpr(Context); 3447 diag::kind kind; 3448 if (!Value) { 3449 if (BuiltinID == BPF::BI__builtin_preserve_field_info) 3450 kind = diag::err_preserve_field_info_not_const; 3451 else if (BuiltinID == BPF::BI__builtin_btf_type_id) 3452 kind = diag::err_btf_type_id_not_const; 3453 else if (BuiltinID == BPF::BI__builtin_preserve_type_info) 3454 kind = diag::err_preserve_type_info_not_const; 3455 else 3456 kind = diag::err_preserve_enum_value_not_const; 3457 Diag(Arg->getBeginLoc(), kind) << 2 << Arg->getSourceRange(); 3458 return true; 3459 } 3460 3461 // The first argument 3462 Arg = TheCall->getArg(0); 3463 bool InvalidArg = false; 3464 bool ReturnUnsignedInt = true; 3465 if (BuiltinID == BPF::BI__builtin_preserve_field_info) { 3466 if (!isValidBPFPreserveFieldInfoArg(Arg)) { 3467 InvalidArg = true; 3468 kind = diag::err_preserve_field_info_not_field; 3469 } 3470 } else if (BuiltinID == BPF::BI__builtin_preserve_type_info) { 3471 if (!isValidBPFPreserveTypeInfoArg(Arg)) { 3472 InvalidArg = true; 3473 kind = diag::err_preserve_type_info_invalid; 3474 } 3475 } else if (BuiltinID == BPF::BI__builtin_preserve_enum_value) { 3476 if (!isValidBPFPreserveEnumValueArg(Arg)) { 3477 InvalidArg = true; 3478 kind = diag::err_preserve_enum_value_invalid; 3479 } 3480 ReturnUnsignedInt = false; 3481 } else if (BuiltinID == BPF::BI__builtin_btf_type_id) { 3482 ReturnUnsignedInt = false; 3483 } 3484 3485 if (InvalidArg) { 3486 Diag(Arg->getBeginLoc(), kind) << 1 << Arg->getSourceRange(); 3487 return true; 3488 } 3489 3490 if (ReturnUnsignedInt) 3491 TheCall->setType(Context.UnsignedIntTy); 3492 else 3493 TheCall->setType(Context.UnsignedLongTy); 3494 return false; 3495} 3496 3497bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) { 3498 struct ArgInfo { 3499 uint8_t OpNum; 3500 bool IsSigned; 3501 uint8_t BitWidth; 3502 uint8_t Align; 3503 }; 3504 struct BuiltinInfo { 3505 unsigned BuiltinID; 3506 ArgInfo Infos[2]; 3507 }; 3508 3509 static BuiltinInfo Infos[] = { 3510 { Hexagon::BI__builtin_circ_ldd, {{ 3, true, 4, 3 }} }, 3511 { Hexagon::BI__builtin_circ_ldw, {{ 3, true, 4, 2 }} }, 3512 { Hexagon::BI__builtin_circ_ldh, {{ 3, true, 4, 1 }} }, 3513 { Hexagon::BI__builtin_circ_lduh, {{ 3, true, 4, 1 }} }, 3514 { Hexagon::BI__builtin_circ_ldb, {{ 3, true, 4, 0 }} }, 3515 { Hexagon::BI__builtin_circ_ldub, {{ 3, true, 4, 0 }} }, 3516 { Hexagon::BI__builtin_circ_std, {{ 3, true, 4, 3 }} }, 3517 { Hexagon::BI__builtin_circ_stw, {{ 3, true, 4, 2 }} }, 3518 { Hexagon::BI__builtin_circ_sth, {{ 3, true, 4, 1 }} }, 3519 { Hexagon::BI__builtin_circ_sthhi, {{ 3, true, 4, 1 }} }, 3520 { Hexagon::BI__builtin_circ_stb, {{ 3, true, 4, 0 }} }, 3521 3522 { Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci, {{ 1, true, 4, 0 }} }, 3523 { Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci, {{ 1, true, 4, 0 }} }, 3524 { Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci, {{ 1, true, 4, 1 }} }, 3525 { Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci, {{ 1, true, 4, 1 }} }, 3526 { Hexagon::BI__builtin_HEXAGON_L2_loadri_pci, {{ 1, true, 4, 2 }} }, 3527 { Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci, {{ 1, true, 4, 3 }} }, 3528 { Hexagon::BI__builtin_HEXAGON_S2_storerb_pci, {{ 1, true, 4, 0 }} }, 3529 { Hexagon::BI__builtin_HEXAGON_S2_storerh_pci, {{ 1, true, 4, 1 }} }, 3530 { Hexagon::BI__builtin_HEXAGON_S2_storerf_pci, {{ 1, true, 4, 1 }} }, 3531 { Hexagon::BI__builtin_HEXAGON_S2_storeri_pci, {{ 1, true, 4, 2 }} }, 3532 { Hexagon::BI__builtin_HEXAGON_S2_storerd_pci, {{ 1, true, 4, 3 }} }, 3533 3534 { Hexagon::BI__builtin_HEXAGON_A2_combineii, {{ 1, true, 8, 0 }} }, 3535 { Hexagon::BI__builtin_HEXAGON_A2_tfrih, {{ 1, false, 16, 0 }} }, 3536 { Hexagon::BI__builtin_HEXAGON_A2_tfril, {{ 1, false, 16, 0 }} }, 3537 { Hexagon::BI__builtin_HEXAGON_A2_tfrpi, {{ 0, true, 8, 0 }} }, 3538 { Hexagon::BI__builtin_HEXAGON_A4_bitspliti, {{ 1, false, 5, 0 }} }, 3539 { Hexagon::BI__builtin_HEXAGON_A4_cmpbeqi, {{ 1, false, 8, 0 }} }, 3540 { Hexagon::BI__builtin_HEXAGON_A4_cmpbgti, {{ 1, true, 8, 0 }} }, 3541 { Hexagon::BI__builtin_HEXAGON_A4_cround_ri, {{ 1, false, 5, 0 }} }, 3542 { Hexagon::BI__builtin_HEXAGON_A4_round_ri, {{ 1, false, 5, 0 }} }, 3543 { Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat, {{ 1, false, 5, 0 }} }, 3544 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbeqi, {{ 1, false, 8, 0 }} }, 3545 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgti, {{ 1, true, 8, 0 }} }, 3546 { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgtui, {{ 1, false, 7, 0 }} }, 3547 { Hexagon::BI__builtin_HEXAGON_A4_vcmpheqi, {{ 1, true, 8, 0 }} }, 3548 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgti, {{ 1, true, 8, 0 }} }, 3549 { Hexagon::BI__builtin_HEXAGON_A4_vcmphgtui, {{ 1, false, 7, 0 }} }, 3550 { Hexagon::BI__builtin_HEXAGON_A4_vcmpweqi, {{ 1, true, 8, 0 }} }, 3551 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgti, {{ 1, true, 8, 0 }} }, 3552 { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgtui, {{ 1, false, 7, 0 }} }, 3553 { Hexagon::BI__builtin_HEXAGON_C2_bitsclri, {{ 1, false, 6, 0 }} }, 3554 { Hexagon::BI__builtin_HEXAGON_C2_muxii, {{ 2, true, 8, 0 }} }, 3555 { Hexagon::BI__builtin_HEXAGON_C4_nbitsclri, {{ 1, false, 6, 0 }} }, 3556 { Hexagon::BI__builtin_HEXAGON_F2_dfclass, {{ 1, false, 5, 0 }} }, 3557 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_n, {{ 0, false, 10, 0 }} }, 3558 { Hexagon::BI__builtin_HEXAGON_F2_dfimm_p, {{ 0, false, 10, 0 }} }, 3559 { Hexagon::BI__builtin_HEXAGON_F2_sfclass, {{ 1, false, 5, 0 }} }, 3560 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_n, {{ 0, false, 10, 0 }} }, 3561 { Hexagon::BI__builtin_HEXAGON_F2_sfimm_p, {{ 0, false, 10, 0 }} }, 3562 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addi, {{ 2, false, 6, 0 }} }, 3563 { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addr_u2, {{ 1, false, 6, 2 }} }, 3564 { Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri, {{ 2, false, 3, 0 }} }, 3565 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc, {{ 2, false, 6, 0 }} }, 3566 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and, {{ 2, false, 6, 0 }} }, 3567 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p, {{ 1, false, 6, 0 }} }, 3568 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac, {{ 2, false, 6, 0 }} }, 3569 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or, {{ 2, false, 6, 0 }} }, 3570 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc, {{ 2, false, 6, 0 }} }, 3571 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc, {{ 2, false, 5, 0 }} }, 3572 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and, {{ 2, false, 5, 0 }} }, 3573 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r, {{ 1, false, 5, 0 }} }, 3574 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac, {{ 2, false, 5, 0 }} }, 3575 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or, {{ 2, false, 5, 0 }} }, 3576 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat, {{ 1, false, 5, 0 }} }, 3577 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc, {{ 2, false, 5, 0 }} }, 3578 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh, {{ 1, false, 4, 0 }} }, 3579 { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw, {{ 1, false, 5, 0 }} }, 3580 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc, {{ 2, false, 6, 0 }} }, 3581 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and, {{ 2, false, 6, 0 }} }, 3582 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p, {{ 1, false, 6, 0 }} }, 3583 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac, {{ 2, false, 6, 0 }} }, 3584 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or, {{ 2, false, 6, 0 }} }, 3585 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax, 3586 {{ 1, false, 6, 0 }} }, 3587 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd, {{ 1, false, 6, 0 }} }, 3588 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc, {{ 2, false, 5, 0 }} }, 3589 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and, {{ 2, false, 5, 0 }} }, 3590 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r, {{ 1, false, 5, 0 }} }, 3591 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac, {{ 2, false, 5, 0 }} }, 3592 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or, {{ 2, false, 5, 0 }} }, 3593 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax, 3594 {{ 1, false, 5, 0 }} }, 3595 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd, {{ 1, false, 5, 0 }} }, 3596 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun, {{ 1, false, 5, 0 }} }, 3597 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh, {{ 1, false, 4, 0 }} }, 3598 { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw, {{ 1, false, 5, 0 }} }, 3599 { Hexagon::BI__builtin_HEXAGON_S2_clrbit_i, {{ 1, false, 5, 0 }} }, 3600 { Hexagon::BI__builtin_HEXAGON_S2_extractu, {{ 1, false, 5, 0 }, 3601 { 2, false, 5, 0 }} }, 3602 { Hexagon::BI__builtin_HEXAGON_S2_extractup, {{ 1, false, 6, 0 }, 3603 { 2, false, 6, 0 }} }, 3604 { Hexagon::BI__builtin_HEXAGON_S2_insert, {{ 2, false, 5, 0 }, 3605 { 3, false, 5, 0 }} }, 3606 { Hexagon::BI__builtin_HEXAGON_S2_insertp, {{ 2, false, 6, 0 }, 3607 { 3, false, 6, 0 }} }, 3608 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc, {{ 2, false, 6, 0 }} }, 3609 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and, {{ 2, false, 6, 0 }} }, 3610 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p, {{ 1, false, 6, 0 }} }, 3611 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac, {{ 2, false, 6, 0 }} }, 3612 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or, {{ 2, false, 6, 0 }} }, 3613 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc, {{ 2, false, 6, 0 }} }, 3614 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc, {{ 2, false, 5, 0 }} }, 3615 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and, {{ 2, false, 5, 0 }} }, 3616 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r, {{ 1, false, 5, 0 }} }, 3617 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac, {{ 2, false, 5, 0 }} }, 3618 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or, {{ 2, false, 5, 0 }} }, 3619 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc, {{ 2, false, 5, 0 }} }, 3620 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh, {{ 1, false, 4, 0 }} }, 3621 { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw, {{ 1, false, 5, 0 }} }, 3622 { Hexagon::BI__builtin_HEXAGON_S2_setbit_i, {{ 1, false, 5, 0 }} }, 3623 { Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax, 3624 {{ 2, false, 4, 0 }, 3625 { 3, false, 5, 0 }} }, 3626 { Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax, 3627 {{ 2, false, 4, 0 }, 3628 { 3, false, 5, 0 }} }, 3629 { Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax, 3630 {{ 2, false, 4, 0 }, 3631 { 3, false, 5, 0 }} }, 3632 { Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax, 3633 {{ 2, false, 4, 0 }, 3634 { 3, false, 5, 0 }} }, 3635 { Hexagon::BI__builtin_HEXAGON_S2_togglebit_i, {{ 1, false, 5, 0 }} }, 3636 { Hexagon::BI__builtin_HEXAGON_S2_tstbit_i, {{ 1, false, 5, 0 }} }, 3637 { Hexagon::BI__builtin_HEXAGON_S2_valignib, {{ 2, false, 3, 0 }} }, 3638 { Hexagon::BI__builtin_HEXAGON_S2_vspliceib, {{ 2, false, 3, 0 }} }, 3639 { Hexagon::BI__builtin_HEXAGON_S4_addi_asl_ri, {{ 2, false, 5, 0 }} }, 3640 { Hexagon::BI__builtin_HEXAGON_S4_addi_lsr_ri, {{ 2, false, 5, 0 }} }, 3641 { Hexagon::BI__builtin_HEXAGON_S4_andi_asl_ri, {{ 2, false, 5, 0 }} }, 3642 { Hexagon::BI__builtin_HEXAGON_S4_andi_lsr_ri, {{ 2, false, 5, 0 }} }, 3643 { Hexagon::BI__builtin_HEXAGON_S4_clbaddi, {{ 1, true , 6, 0 }} }, 3644 { Hexagon::BI__builtin_HEXAGON_S4_clbpaddi, {{ 1, true, 6, 0 }} }, 3645 { Hexagon::BI__builtin_HEXAGON_S4_extract, {{ 1, false, 5, 0 }, 3646 { 2, false, 5, 0 }} }, 3647 { Hexagon::BI__builtin_HEXAGON_S4_extractp, {{ 1, false, 6, 0 }, 3648 { 2, false, 6, 0 }} }, 3649 { Hexagon::BI__builtin_HEXAGON_S4_lsli, {{ 0, true, 6, 0 }} }, 3650 { Hexagon::BI__builtin_HEXAGON_S4_ntstbit_i, {{ 1, false, 5, 0 }} }, 3651 { Hexagon::BI__builtin_HEXAGON_S4_ori_asl_ri, {{ 2, false, 5, 0 }} }, 3652 { Hexagon::BI__builtin_HEXAGON_S4_ori_lsr_ri, {{ 2, false, 5, 0 }} }, 3653 { Hexagon::BI__builtin_HEXAGON_S4_subi_asl_ri, {{ 2, false, 5, 0 }} }, 3654 { Hexagon::BI__builtin_HEXAGON_S4_subi_lsr_ri, {{ 2, false, 5, 0 }} }, 3655 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate_acc, {{ 3, false, 2, 0 }} }, 3656 { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate, {{ 2, false, 2, 0 }} }, 3657 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax, 3658 {{ 1, false, 4, 0 }} }, 3659 { Hexagon::BI__builtin_HEXAGON_S5_asrhub_sat, {{ 1, false, 4, 0 }} }, 3660 { Hexagon::BI__builtin_HEXAGON_S5_vasrhrnd_goodsyntax, 3661 {{ 1, false, 4, 0 }} }, 3662 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, {{ 1, false, 6, 0 }} }, 3663 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, {{ 2, false, 6, 0 }} }, 3664 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, {{ 2, false, 6, 0 }} }, 3665 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, {{ 2, false, 6, 0 }} }, 3666 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, {{ 2, false, 6, 0 }} }, 3667 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, {{ 2, false, 6, 0 }} }, 3668 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, {{ 1, false, 5, 0 }} }, 3669 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, {{ 2, false, 5, 0 }} }, 3670 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, {{ 2, false, 5, 0 }} }, 3671 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, {{ 2, false, 5, 0 }} }, 3672 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, {{ 2, false, 5, 0 }} }, 3673 { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, {{ 2, false, 5, 0 }} }, 3674 { Hexagon::BI__builtin_HEXAGON_V6_valignbi, {{ 2, false, 3, 0 }} }, 3675 { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, {{ 2, false, 3, 0 }} }, 3676 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, {{ 2, false, 3, 0 }} }, 3677 { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, {{ 2, false, 3, 0 }} }, 3678 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, {{ 2, false, 1, 0 }} }, 3679 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, {{ 2, false, 1, 0 }} }, 3680 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, {{ 3, false, 1, 0 }} }, 3681 { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B, 3682 {{ 3, false, 1, 0 }} }, 3683 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, {{ 2, false, 1, 0 }} }, 3684 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, {{ 2, false, 1, 0 }} }, 3685 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, {{ 3, false, 1, 0 }} }, 3686 { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B, 3687 {{ 3, false, 1, 0 }} }, 3688 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, {{ 2, false, 1, 0 }} }, 3689 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, {{ 2, false, 1, 0 }} }, 3690 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, {{ 3, false, 1, 0 }} }, 3691 { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B, 3692 {{ 3, false, 1, 0 }} }, 3693 3694 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10, {{ 2, false, 2, 0 }} }, 3695 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10_128B, 3696 {{ 2, false, 2, 0 }} }, 3697 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10_vxx, 3698 {{ 3, false, 2, 0 }} }, 3699 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10_vxx_128B, 3700 {{ 3, false, 2, 0 }} }, 3701 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10, {{ 2, false, 2, 0 }} }, 3702 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10_128B, 3703 {{ 2, false, 2, 0 }} }, 3704 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10_vxx, 3705 {{ 3, false, 2, 0 }} }, 3706 { Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10_vxx_128B, 3707 {{ 3, false, 2, 0 }} }, 3708 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi, {{ 2, false, 3, 0 }} }, 3709 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi_128B, {{ 2, false, 3, 0 }} }, 3710 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci, {{ 3, false, 3, 0 }} }, 3711 { Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci_128B, 3712 {{ 3, false, 3, 0 }} }, 3713 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi, {{ 2, false, 3, 0 }} }, 3714 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi_128B, {{ 2, false, 3, 0 }} }, 3715 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci, {{ 3, false, 3, 0 }} }, 3716 { Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci_128B, 3717 {{ 3, false, 3, 0 }} }, 3718 }; 3719 3720 // Use a dynamically initialized static to sort the table exactly once on 3721 // first run. 3722 static const bool SortOnce = 3723 (llvm::sort(Infos, 3724 [](const BuiltinInfo &LHS, const BuiltinInfo &RHS) { 3725 return LHS.BuiltinID < RHS.BuiltinID; 3726 }), 3727 true); 3728 (void)SortOnce; 3729 3730 const BuiltinInfo *F = llvm::partition_point( 3731 Infos, [=](const BuiltinInfo &BI) { return BI.BuiltinID < BuiltinID; }); 3732 if (F == std::end(Infos) || F->BuiltinID != BuiltinID) 3733 return false; 3734 3735 bool Error = false; 3736 3737 for (const ArgInfo &A : F->Infos) { 3738 // Ignore empty ArgInfo elements. 3739 if (A.BitWidth == 0) 3740 continue; 3741 3742 int32_t Min = A.IsSigned ? -(1 << (A.BitWidth - 1)) : 0; 3743 int32_t Max = (1 << (A.IsSigned ? A.BitWidth - 1 : A.BitWidth)) - 1; 3744 if (!A.Align) { 3745 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max); 3746 } else { 3747 unsigned M = 1 << A.Align; 3748 Min *= M; 3749 Max *= M; 3750 Error |= SemaBuiltinConstantArgRange(TheCall, A.OpNum, Min, Max); 3751 Error |= SemaBuiltinConstantArgMultiple(TheCall, A.OpNum, M); 3752 } 3753 } 3754 return Error; 3755} 3756 3757bool Sema::CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, 3758 CallExpr *TheCall) { 3759 return CheckHexagonBuiltinArgument(BuiltinID, TheCall); 3760} 3761 3762bool Sema::CheckLoongArchBuiltinFunctionCall(const TargetInfo &TI, 3763 unsigned BuiltinID, 3764 CallExpr *TheCall) { 3765 switch (BuiltinID) { 3766 default: 3767 break; 3768 case LoongArch::BI__builtin_loongarch_cacop_d: 3769 if (!TI.hasFeature("64bit")) 3770 return Diag(TheCall->getBeginLoc(), 3771 diag::err_loongarch_builtin_requires_la64) 3772 << TheCall->getSourceRange(); 3773 LLVM_FALLTHROUGH; 3774 case LoongArch::BI__builtin_loongarch_cacop_w: { 3775 if (BuiltinID == LoongArch::BI__builtin_loongarch_cacop_w && 3776 !TI.hasFeature("32bit")) 3777 return Diag(TheCall->getBeginLoc(), 3778 diag::err_loongarch_builtin_requires_la32) 3779 << TheCall->getSourceRange(); 3780 SemaBuiltinConstantArgRange(TheCall, 0, 0, llvm::maxUIntN(5)); 3781 SemaBuiltinConstantArgRange(TheCall, 2, llvm::minIntN(12), 3782 llvm::maxIntN(12)); 3783 break; 3784 } 3785 case LoongArch::BI__builtin_loongarch_crc_w_b_w: 3786 case LoongArch::BI__builtin_loongarch_crc_w_h_w: 3787 case LoongArch::BI__builtin_loongarch_crc_w_w_w: 3788 case LoongArch::BI__builtin_loongarch_crc_w_d_w: 3789 case LoongArch::BI__builtin_loongarch_crcc_w_b_w: 3790 case LoongArch::BI__builtin_loongarch_crcc_w_h_w: 3791 case LoongArch::BI__builtin_loongarch_crcc_w_w_w: 3792 case LoongArch::BI__builtin_loongarch_crcc_w_d_w: 3793 case LoongArch::BI__builtin_loongarch_iocsrrd_d: 3794 case LoongArch::BI__builtin_loongarch_iocsrwr_d: 3795 case LoongArch::BI__builtin_loongarch_asrtle_d: 3796 case LoongArch::BI__builtin_loongarch_asrtgt_d: 3797 if (!TI.hasFeature("64bit")) 3798 return Diag(TheCall->getBeginLoc(), 3799 diag::err_loongarch_builtin_requires_la64) 3800 << TheCall->getSourceRange(); 3801 break; 3802 case LoongArch::BI__builtin_loongarch_break: 3803 case LoongArch::BI__builtin_loongarch_dbar: 3804 case LoongArch::BI__builtin_loongarch_ibar: 3805 case LoongArch::BI__builtin_loongarch_syscall: 3806 // Check if immediate is in [0, 32767]. 3807 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 32767); 3808 case LoongArch::BI__builtin_loongarch_csrrd_w: 3809 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 16383); 3810 case LoongArch::BI__builtin_loongarch_csrwr_w: 3811 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 16383); 3812 case LoongArch::BI__builtin_loongarch_csrxchg_w: 3813 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 16383); 3814 case LoongArch::BI__builtin_loongarch_csrrd_d: 3815 if (!TI.hasFeature("64bit")) 3816 return Diag(TheCall->getBeginLoc(), 3817 diag::err_loongarch_builtin_requires_la64) 3818 << TheCall->getSourceRange(); 3819 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 16383); 3820 case LoongArch::BI__builtin_loongarch_csrwr_d: 3821 if (!TI.hasFeature("64bit")) 3822 return Diag(TheCall->getBeginLoc(), 3823 diag::err_loongarch_builtin_requires_la64) 3824 << TheCall->getSourceRange(); 3825 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 16383); 3826 case LoongArch::BI__builtin_loongarch_csrxchg_d: 3827 if (!TI.hasFeature("64bit")) 3828 return Diag(TheCall->getBeginLoc(), 3829 diag::err_loongarch_builtin_requires_la64) 3830 << TheCall->getSourceRange(); 3831 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 16383); 3832 case LoongArch::BI__builtin_loongarch_lddir_d: 3833 case LoongArch::BI__builtin_loongarch_ldpte_d: 3834 if (!TI.hasFeature("64bit")) 3835 return Diag(TheCall->getBeginLoc(), 3836 diag::err_loongarch_builtin_requires_la64) 3837 << TheCall->getSourceRange(); 3838 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 31); 3839 case LoongArch::BI__builtin_loongarch_movfcsr2gr: 3840 case LoongArch::BI__builtin_loongarch_movgr2fcsr: 3841 return SemaBuiltinConstantArgRange(TheCall, 0, 0, llvm::maxUIntN(2)); 3842 } 3843 3844 return false; 3845} 3846 3847bool Sema::CheckMipsBuiltinFunctionCall(const TargetInfo &TI, 3848 unsigned BuiltinID, CallExpr *TheCall) { 3849 return CheckMipsBuiltinCpu(TI, BuiltinID, TheCall) || 3850 CheckMipsBuiltinArgument(BuiltinID, TheCall); 3851} 3852 3853bool Sema::CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID, 3854 CallExpr *TheCall) { 3855 3856 if (Mips::BI__builtin_mips_addu_qb <= BuiltinID && 3857 BuiltinID <= Mips::BI__builtin_mips_lwx) { 3858 if (!TI.hasFeature("dsp")) 3859 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_dsp); 3860 } 3861 3862 if (Mips::BI__builtin_mips_absq_s_qb <= BuiltinID && 3863 BuiltinID <= Mips::BI__builtin_mips_subuh_r_qb) { 3864 if (!TI.hasFeature("dspr2")) 3865 return Diag(TheCall->getBeginLoc(), 3866 diag::err_mips_builtin_requires_dspr2); 3867 } 3868 3869 if (Mips::BI__builtin_msa_add_a_b <= BuiltinID && 3870 BuiltinID <= Mips::BI__builtin_msa_xori_b) { 3871 if (!TI.hasFeature("msa")) 3872 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_msa); 3873 } 3874 3875 return false; 3876} 3877 3878// CheckMipsBuiltinArgument - Checks the constant value passed to the 3879// intrinsic is correct. The switch statement is ordered by DSP, MSA. The 3880// ordering for DSP is unspecified. MSA is ordered by the data format used 3881// by the underlying instruction i.e., df/m, df/n and then by size. 3882// 3883// FIXME: The size tests here should instead be tablegen'd along with the 3884// definitions from include/clang/Basic/BuiltinsMips.def. 3885// FIXME: GCC is strict on signedness for some of these intrinsics, we should 3886// be too. 3887bool Sema::CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) { 3888 unsigned i = 0, l = 0, u = 0, m = 0; 3889 switch (BuiltinID) { 3890 default: return false; 3891 case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break; 3892 case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break; 3893 case Mips::BI__builtin_mips_append: i = 2; l = 0; u = 31; break; 3894 case Mips::BI__builtin_mips_balign: i = 2; l = 0; u = 3; break; 3895 case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break; 3896 case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break; 3897 case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break; 3898 // MSA intrinsics. Instructions (which the intrinsics maps to) which use the 3899 // df/m field. 3900 // These intrinsics take an unsigned 3 bit immediate. 3901 case Mips::BI__builtin_msa_bclri_b: 3902 case Mips::BI__builtin_msa_bnegi_b: 3903 case Mips::BI__builtin_msa_bseti_b: 3904 case Mips::BI__builtin_msa_sat_s_b: 3905 case Mips::BI__builtin_msa_sat_u_b: 3906 case Mips::BI__builtin_msa_slli_b: 3907 case Mips::BI__builtin_msa_srai_b: 3908 case Mips::BI__builtin_msa_srari_b: 3909 case Mips::BI__builtin_msa_srli_b: 3910 case Mips::BI__builtin_msa_srlri_b: i = 1; l = 0; u = 7; break; 3911 case Mips::BI__builtin_msa_binsli_b: 3912 case Mips::BI__builtin_msa_binsri_b: i = 2; l = 0; u = 7; break; 3913 // These intrinsics take an unsigned 4 bit immediate. 3914 case Mips::BI__builtin_msa_bclri_h: 3915 case Mips::BI__builtin_msa_bnegi_h: 3916 case Mips::BI__builtin_msa_bseti_h: 3917 case Mips::BI__builtin_msa_sat_s_h: 3918 case Mips::BI__builtin_msa_sat_u_h: 3919 case Mips::BI__builtin_msa_slli_h: 3920 case Mips::BI__builtin_msa_srai_h: 3921 case Mips::BI__builtin_msa_srari_h: 3922 case Mips::BI__builtin_msa_srli_h: 3923 case Mips::BI__builtin_msa_srlri_h: i = 1; l = 0; u = 15; break; 3924 case Mips::BI__builtin_msa_binsli_h: 3925 case Mips::BI__builtin_msa_binsri_h: i = 2; l = 0; u = 15; break; 3926 // These intrinsics take an unsigned 5 bit immediate. 3927 // The first block of intrinsics actually have an unsigned 5 bit field, 3928 // not a df/n field. 3929 case Mips::BI__builtin_msa_cfcmsa: 3930 case Mips::BI__builtin_msa_ctcmsa: i = 0; l = 0; u = 31; break; 3931 case Mips::BI__builtin_msa_clei_u_b: 3932 case Mips::BI__builtin_msa_clei_u_h: 3933 case Mips::BI__builtin_msa_clei_u_w: 3934 case Mips::BI__builtin_msa_clei_u_d: 3935 case Mips::BI__builtin_msa_clti_u_b: 3936 case Mips::BI__builtin_msa_clti_u_h: 3937 case Mips::BI__builtin_msa_clti_u_w: 3938 case Mips::BI__builtin_msa_clti_u_d: 3939 case Mips::BI__builtin_msa_maxi_u_b: 3940 case Mips::BI__builtin_msa_maxi_u_h: 3941 case Mips::BI__builtin_msa_maxi_u_w: 3942 case Mips::BI__builtin_msa_maxi_u_d: 3943 case Mips::BI__builtin_msa_mini_u_b: 3944 case Mips::BI__builtin_msa_mini_u_h: 3945 case Mips::BI__builtin_msa_mini_u_w: 3946 case Mips::BI__builtin_msa_mini_u_d: 3947 case Mips::BI__builtin_msa_addvi_b: 3948 case Mips::BI__builtin_msa_addvi_h: 3949 case Mips::BI__builtin_msa_addvi_w: 3950 case Mips::BI__builtin_msa_addvi_d: 3951 case Mips::BI__builtin_msa_bclri_w: 3952 case Mips::BI__builtin_msa_bnegi_w: 3953 case Mips::BI__builtin_msa_bseti_w: 3954 case Mips::BI__builtin_msa_sat_s_w: 3955 case Mips::BI__builtin_msa_sat_u_w: 3956 case Mips::BI__builtin_msa_slli_w: 3957 case Mips::BI__builtin_msa_srai_w: 3958 case Mips::BI__builtin_msa_srari_w: 3959 case Mips::BI__builtin_msa_srli_w: 3960 case Mips::BI__builtin_msa_srlri_w: 3961 case Mips::BI__builtin_msa_subvi_b: 3962 case Mips::BI__builtin_msa_subvi_h: 3963 case Mips::BI__builtin_msa_subvi_w: 3964 case Mips::BI__builtin_msa_subvi_d: i = 1; l = 0; u = 31; break; 3965 case Mips::BI__builtin_msa_binsli_w: 3966 case Mips::BI__builtin_msa_binsri_w: i = 2; l = 0; u = 31; break; 3967 // These intrinsics take an unsigned 6 bit immediate. 3968 case Mips::BI__builtin_msa_bclri_d: 3969 case Mips::BI__builtin_msa_bnegi_d: 3970 case Mips::BI__builtin_msa_bseti_d: 3971 case Mips::BI__builtin_msa_sat_s_d: 3972 case Mips::BI__builtin_msa_sat_u_d: 3973 case Mips::BI__builtin_msa_slli_d: 3974 case Mips::BI__builtin_msa_srai_d: 3975 case Mips::BI__builtin_msa_srari_d: 3976 case Mips::BI__builtin_msa_srli_d: 3977 case Mips::BI__builtin_msa_srlri_d: i = 1; l = 0; u = 63; break; 3978 case Mips::BI__builtin_msa_binsli_d: 3979 case Mips::BI__builtin_msa_binsri_d: i = 2; l = 0; u = 63; break; 3980 // These intrinsics take a signed 5 bit immediate. 3981 case Mips::BI__builtin_msa_ceqi_b: 3982 case Mips::BI__builtin_msa_ceqi_h: 3983 case Mips::BI__builtin_msa_ceqi_w: 3984 case Mips::BI__builtin_msa_ceqi_d: 3985 case Mips::BI__builtin_msa_clti_s_b: 3986 case Mips::BI__builtin_msa_clti_s_h: 3987 case Mips::BI__builtin_msa_clti_s_w: 3988 case Mips::BI__builtin_msa_clti_s_d: 3989 case Mips::BI__builtin_msa_clei_s_b: 3990 case Mips::BI__builtin_msa_clei_s_h: 3991 case Mips::BI__builtin_msa_clei_s_w: 3992 case Mips::BI__builtin_msa_clei_s_d: 3993 case Mips::BI__builtin_msa_maxi_s_b: 3994 case Mips::BI__builtin_msa_maxi_s_h: 3995 case Mips::BI__builtin_msa_maxi_s_w: 3996 case Mips::BI__builtin_msa_maxi_s_d: 3997 case Mips::BI__builtin_msa_mini_s_b: 3998 case Mips::BI__builtin_msa_mini_s_h: 3999 case Mips::BI__builtin_msa_mini_s_w: 4000 case Mips::BI__builtin_msa_mini_s_d: i = 1; l = -16; u = 15; break; 4001 // These intrinsics take an unsigned 8 bit immediate. 4002 case Mips::BI__builtin_msa_andi_b: 4003 case Mips::BI__builtin_msa_nori_b: 4004 case Mips::BI__builtin_msa_ori_b: 4005 case Mips::BI__builtin_msa_shf_b: 4006 case Mips::BI__builtin_msa_shf_h: 4007 case Mips::BI__builtin_msa_shf_w: 4008 case Mips::BI__builtin_msa_xori_b: i = 1; l = 0; u = 255; break; 4009 case Mips::BI__builtin_msa_bseli_b: 4010 case Mips::BI__builtin_msa_bmnzi_b: 4011 case Mips::BI__builtin_msa_bmzi_b: i = 2; l = 0; u = 255; break; 4012 // df/n format 4013 // These intrinsics take an unsigned 4 bit immediate. 4014 case Mips::BI__builtin_msa_copy_s_b: 4015 case Mips::BI__builtin_msa_copy_u_b: 4016 case Mips::BI__builtin_msa_insve_b: 4017 case Mips::BI__builtin_msa_splati_b: i = 1; l = 0; u = 15; break; 4018 case Mips::BI__builtin_msa_sldi_b: i = 2; l = 0; u = 15; break; 4019 // These intrinsics take an unsigned 3 bit immediate. 4020 case Mips::BI__builtin_msa_copy_s_h: 4021 case Mips::BI__builtin_msa_copy_u_h: 4022 case Mips::BI__builtin_msa_insve_h: 4023 case Mips::BI__builtin_msa_splati_h: i = 1; l = 0; u = 7; break; 4024 case Mips::BI__builtin_msa_sldi_h: i = 2; l = 0; u = 7; break; 4025 // These intrinsics take an unsigned 2 bit immediate. 4026 case Mips::BI__builtin_msa_copy_s_w: 4027 case Mips::BI__builtin_msa_copy_u_w: 4028 case Mips::BI__builtin_msa_insve_w: 4029 case Mips::BI__builtin_msa_splati_w: i = 1; l = 0; u = 3; break; 4030 case Mips::BI__builtin_msa_sldi_w: i = 2; l = 0; u = 3; break; 4031 // These intrinsics take an unsigned 1 bit immediate. 4032 case Mips::BI__builtin_msa_copy_s_d: 4033 case Mips::BI__builtin_msa_copy_u_d: 4034 case Mips::BI__builtin_msa_insve_d: 4035 case Mips::BI__builtin_msa_splati_d: i = 1; l = 0; u = 1; break; 4036 case Mips::BI__builtin_msa_sldi_d: i = 2; l = 0; u = 1; break; 4037 // Memory offsets and immediate loads. 4038 // These intrinsics take a signed 10 bit immediate. 4039 case Mips::BI__builtin_msa_ldi_b: i = 0; l = -128; u = 255; break; 4040 case Mips::BI__builtin_msa_ldi_h: 4041 case Mips::BI__builtin_msa_ldi_w: 4042 case Mips::BI__builtin_msa_ldi_d: i = 0; l = -512; u = 511; break; 4043 case Mips::BI__builtin_msa_ld_b: i = 1; l = -512; u = 511; m = 1; break; 4044 case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 2; break; 4045 case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 4; break; 4046 case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 8; break; 4047 case Mips::BI__builtin_msa_ldr_d: i = 1; l = -4096; u = 4088; m = 8; break; 4048 case Mips::BI__builtin_msa_ldr_w: i = 1; l = -2048; u = 2044; m = 4; break; 4049 case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 1; break; 4050 case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 2; break; 4051 case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 4; break; 4052 case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 8; break; 4053 case Mips::BI__builtin_msa_str_d: i = 2; l = -4096; u = 4088; m = 8; break; 4054 case Mips::BI__builtin_msa_str_w: i = 2; l = -2048; u = 2044; m = 4; break; 4055 } 4056 4057 if (!m) 4058 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 4059 4060 return SemaBuiltinConstantArgRange(TheCall, i, l, u) || 4061 SemaBuiltinConstantArgMultiple(TheCall, i, m); 4062} 4063 4064/// DecodePPCMMATypeFromStr - This decodes one PPC MMA type descriptor from Str, 4065/// advancing the pointer over the consumed characters. The decoded type is 4066/// returned. If the decoded type represents a constant integer with a 4067/// constraint on its value then Mask is set to that value. The type descriptors 4068/// used in Str are specific to PPC MMA builtins and are documented in the file 4069/// defining the PPC builtins. 4070static QualType DecodePPCMMATypeFromStr(ASTContext &Context, const char *&Str, 4071 unsigned &Mask) { 4072 bool RequireICE = false; 4073 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None; 4074 switch (*Str++) { 4075 case 'V': 4076 return Context.getVectorType(Context.UnsignedCharTy, 16, 4077 VectorType::VectorKind::AltiVecVector); 4078 case 'i': { 4079 char *End; 4080 unsigned size = strtoul(Str, &End, 10); 4081 assert(End != Str && "Missing constant parameter constraint"); 4082 Str = End; 4083 Mask = size; 4084 return Context.IntTy; 4085 } 4086 case 'W': { 4087 char *End; 4088 unsigned size = strtoul(Str, &End, 10); 4089 assert(End != Str && "Missing PowerPC MMA type size"); 4090 Str = End; 4091 QualType Type; 4092 switch (size) { 4093 #define PPC_VECTOR_TYPE(typeName, Id, size) \ 4094 case size: Type = Context.Id##Ty; break; 4095 #include "clang/Basic/PPCTypes.def" 4096 default: llvm_unreachable("Invalid PowerPC MMA vector type"); 4097 } 4098 bool CheckVectorArgs = false; 4099 while (!CheckVectorArgs) { 4100 switch (*Str++) { 4101 case '*': 4102 Type = Context.getPointerType(Type); 4103 break; 4104 case 'C': 4105 Type = Type.withConst(); 4106 break; 4107 default: 4108 CheckVectorArgs = true; 4109 --Str; 4110 break; 4111 } 4112 } 4113 return Type; 4114 } 4115 default: 4116 return Context.DecodeTypeStr(--Str, Context, Error, RequireICE, true); 4117 } 4118} 4119 4120static bool isPPC_64Builtin(unsigned BuiltinID) { 4121 // These builtins only work on PPC 64bit targets. 4122 switch (BuiltinID) { 4123 case PPC::BI__builtin_divde: 4124 case PPC::BI__builtin_divdeu: 4125 case PPC::BI__builtin_bpermd: 4126 case PPC::BI__builtin_pdepd: 4127 case PPC::BI__builtin_pextd: 4128 case PPC::BI__builtin_ppc_ldarx: 4129 case PPC::BI__builtin_ppc_stdcx: 4130 case PPC::BI__builtin_ppc_tdw: 4131 case PPC::BI__builtin_ppc_trapd: 4132 case PPC::BI__builtin_ppc_cmpeqb: 4133 case PPC::BI__builtin_ppc_setb: 4134 case PPC::BI__builtin_ppc_mulhd: 4135 case PPC::BI__builtin_ppc_mulhdu: 4136 case PPC::BI__builtin_ppc_maddhd: 4137 case PPC::BI__builtin_ppc_maddhdu: 4138 case PPC::BI__builtin_ppc_maddld: 4139 case PPC::BI__builtin_ppc_load8r: 4140 case PPC::BI__builtin_ppc_store8r: 4141 case PPC::BI__builtin_ppc_insert_exp: 4142 case PPC::BI__builtin_ppc_extract_sig: 4143 case PPC::BI__builtin_ppc_addex: 4144 case PPC::BI__builtin_darn: 4145 case PPC::BI__builtin_darn_raw: 4146 case PPC::BI__builtin_ppc_compare_and_swaplp: 4147 case PPC::BI__builtin_ppc_fetch_and_addlp: 4148 case PPC::BI__builtin_ppc_fetch_and_andlp: 4149 case PPC::BI__builtin_ppc_fetch_and_orlp: 4150 case PPC::BI__builtin_ppc_fetch_and_swaplp: 4151 return true; 4152 } 4153 return false; 4154} 4155 4156static bool SemaFeatureCheck(Sema &S, CallExpr *TheCall, 4157 StringRef FeatureToCheck, unsigned DiagID, 4158 StringRef DiagArg = "") { 4159 if (S.Context.getTargetInfo().hasFeature(FeatureToCheck)) 4160 return false; 4161 4162 if (DiagArg.empty()) 4163 S.Diag(TheCall->getBeginLoc(), DiagID) << TheCall->getSourceRange(); 4164 else 4165 S.Diag(TheCall->getBeginLoc(), DiagID) 4166 << DiagArg << TheCall->getSourceRange(); 4167 4168 return true; 4169} 4170 4171/// Returns true if the argument consists of one contiguous run of 1s with any 4172/// number of 0s on either side. The 1s are allowed to wrap from LSB to MSB, so 4173/// 0x000FFF0, 0x0000FFFF, 0xFF0000FF, 0x0 are all runs. 0x0F0F0000 is not, 4174/// since all 1s are not contiguous. 4175bool Sema::SemaValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum) { 4176 llvm::APSInt Result; 4177 // We can't check the value of a dependent argument. 4178 Expr *Arg = TheCall->getArg(ArgNum); 4179 if (Arg->isTypeDependent() || Arg->isValueDependent()) 4180 return false; 4181 4182 // Check constant-ness first. 4183 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 4184 return true; 4185 4186 // Check contiguous run of 1s, 0xFF0000FF is also a run of 1s. 4187 if (Result.isShiftedMask() || (~Result).isShiftedMask()) 4188 return false; 4189 4190 return Diag(TheCall->getBeginLoc(), 4191 diag::err_argument_not_contiguous_bit_field) 4192 << ArgNum << Arg->getSourceRange(); 4193} 4194 4195bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 4196 CallExpr *TheCall) { 4197 unsigned i = 0, l = 0, u = 0; 4198 bool IsTarget64Bit = TI.getTypeWidth(TI.getIntPtrType()) == 64; 4199 llvm::APSInt Result; 4200 4201 if (isPPC_64Builtin(BuiltinID) && !IsTarget64Bit) 4202 return Diag(TheCall->getBeginLoc(), diag::err_64_bit_builtin_32_bit_tgt) 4203 << TheCall->getSourceRange(); 4204 4205 switch (BuiltinID) { 4206 default: return false; 4207 case PPC::BI__builtin_altivec_crypto_vshasigmaw: 4208 case PPC::BI__builtin_altivec_crypto_vshasigmad: 4209 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || 4210 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 4211 case PPC::BI__builtin_altivec_dss: 4212 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3); 4213 case PPC::BI__builtin_tbegin: 4214 case PPC::BI__builtin_tend: 4215 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 1) || 4216 SemaFeatureCheck(*this, TheCall, "htm", 4217 diag::err_ppc_builtin_requires_htm); 4218 case PPC::BI__builtin_tsr: 4219 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 7) || 4220 SemaFeatureCheck(*this, TheCall, "htm", 4221 diag::err_ppc_builtin_requires_htm); 4222 case PPC::BI__builtin_tabortwc: 4223 case PPC::BI__builtin_tabortdc: 4224 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) || 4225 SemaFeatureCheck(*this, TheCall, "htm", 4226 diag::err_ppc_builtin_requires_htm); 4227 case PPC::BI__builtin_tabortwci: 4228 case PPC::BI__builtin_tabortdci: 4229 return SemaFeatureCheck(*this, TheCall, "htm", 4230 diag::err_ppc_builtin_requires_htm) || 4231 (SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) || 4232 SemaBuiltinConstantArgRange(TheCall, 2, 0, 31)); 4233 case PPC::BI__builtin_tabort: 4234 case PPC::BI__builtin_tcheck: 4235 case PPC::BI__builtin_treclaim: 4236 case PPC::BI__builtin_trechkpt: 4237 case PPC::BI__builtin_tendall: 4238 case PPC::BI__builtin_tresume: 4239 case PPC::BI__builtin_tsuspend: 4240 case PPC::BI__builtin_get_texasr: 4241 case PPC::BI__builtin_get_texasru: 4242 case PPC::BI__builtin_get_tfhar: 4243 case PPC::BI__builtin_get_tfiar: 4244 case PPC::BI__builtin_set_texasr: 4245 case PPC::BI__builtin_set_texasru: 4246 case PPC::BI__builtin_set_tfhar: 4247 case PPC::BI__builtin_set_tfiar: 4248 case PPC::BI__builtin_ttest: 4249 return SemaFeatureCheck(*this, TheCall, "htm", 4250 diag::err_ppc_builtin_requires_htm); 4251 // According to GCC 'Basic PowerPC Built-in Functions Available on ISA 2.05', 4252 // __builtin_(un)pack_longdouble are available only if long double uses IBM 4253 // extended double representation. 4254 case PPC::BI__builtin_unpack_longdouble: 4255 if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 1)) 4256 return true; 4257 [[fallthrough]]; 4258 case PPC::BI__builtin_pack_longdouble: 4259 if (&TI.getLongDoubleFormat() != &llvm::APFloat::PPCDoubleDouble()) 4260 return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_requires_abi) 4261 << "ibmlongdouble"; 4262 return false; 4263 case PPC::BI__builtin_altivec_dst: 4264 case PPC::BI__builtin_altivec_dstt: 4265 case PPC::BI__builtin_altivec_dstst: 4266 case PPC::BI__builtin_altivec_dststt: 4267 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3); 4268 case PPC::BI__builtin_vsx_xxpermdi: 4269 case PPC::BI__builtin_vsx_xxsldwi: 4270 return SemaBuiltinVSX(TheCall); 4271 case PPC::BI__builtin_divwe: 4272 case PPC::BI__builtin_divweu: 4273 case PPC::BI__builtin_divde: 4274 case PPC::BI__builtin_divdeu: 4275 return SemaFeatureCheck(*this, TheCall, "extdiv", 4276 diag::err_ppc_builtin_only_on_arch, "7"); 4277 case PPC::BI__builtin_bpermd: 4278 return SemaFeatureCheck(*this, TheCall, "bpermd", 4279 diag::err_ppc_builtin_only_on_arch, "7"); 4280 case PPC::BI__builtin_unpack_vector_int128: 4281 return SemaFeatureCheck(*this, TheCall, "vsx", 4282 diag::err_ppc_builtin_only_on_arch, "7") || 4283 SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 4284 case PPC::BI__builtin_pack_vector_int128: 4285 return SemaFeatureCheck(*this, TheCall, "vsx", 4286 diag::err_ppc_builtin_only_on_arch, "7"); 4287 case PPC::BI__builtin_pdepd: 4288 case PPC::BI__builtin_pextd: 4289 return SemaFeatureCheck(*this, TheCall, "isa-v31-instructions", 4290 diag::err_ppc_builtin_only_on_arch, "10"); 4291 case PPC::BI__builtin_altivec_vgnb: 4292 return SemaBuiltinConstantArgRange(TheCall, 1, 2, 7); 4293 case PPC::BI__builtin_vsx_xxeval: 4294 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 255); 4295 case PPC::BI__builtin_altivec_vsldbi: 4296 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7); 4297 case PPC::BI__builtin_altivec_vsrdbi: 4298 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7); 4299 case PPC::BI__builtin_vsx_xxpermx: 4300 return SemaBuiltinConstantArgRange(TheCall, 3, 0, 7); 4301 case PPC::BI__builtin_ppc_tw: 4302 case PPC::BI__builtin_ppc_tdw: 4303 return SemaBuiltinConstantArgRange(TheCall, 2, 1, 31); 4304 case PPC::BI__builtin_ppc_cmpeqb: 4305 case PPC::BI__builtin_ppc_setb: 4306 case PPC::BI__builtin_ppc_maddhd: 4307 case PPC::BI__builtin_ppc_maddhdu: 4308 case PPC::BI__builtin_ppc_maddld: 4309 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 4310 diag::err_ppc_builtin_only_on_arch, "9"); 4311 case PPC::BI__builtin_ppc_cmprb: 4312 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 4313 diag::err_ppc_builtin_only_on_arch, "9") || 4314 SemaBuiltinConstantArgRange(TheCall, 0, 0, 1); 4315 // For __rlwnm, __rlwimi and __rldimi, the last parameter mask must 4316 // be a constant that represents a contiguous bit field. 4317 case PPC::BI__builtin_ppc_rlwnm: 4318 return SemaValueIsRunOfOnes(TheCall, 2); 4319 case PPC::BI__builtin_ppc_rlwimi: 4320 case PPC::BI__builtin_ppc_rldimi: 4321 return SemaBuiltinConstantArg(TheCall, 2, Result) || 4322 SemaValueIsRunOfOnes(TheCall, 3); 4323 case PPC::BI__builtin_ppc_extract_exp: 4324 case PPC::BI__builtin_ppc_extract_sig: 4325 case PPC::BI__builtin_ppc_insert_exp: 4326 return SemaFeatureCheck(*this, TheCall, "power9-vector", 4327 diag::err_ppc_builtin_only_on_arch, "9"); 4328 case PPC::BI__builtin_ppc_addex: { 4329 if (SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 4330 diag::err_ppc_builtin_only_on_arch, "9") || 4331 SemaBuiltinConstantArgRange(TheCall, 2, 0, 3)) 4332 return true; 4333 // Output warning for reserved values 1 to 3. 4334 int ArgValue = 4335 TheCall->getArg(2)->getIntegerConstantExpr(Context)->getSExtValue(); 4336 if (ArgValue != 0) 4337 Diag(TheCall->getBeginLoc(), diag::warn_argument_undefined_behaviour) 4338 << ArgValue; 4339 return false; 4340 } 4341 case PPC::BI__builtin_ppc_mtfsb0: 4342 case PPC::BI__builtin_ppc_mtfsb1: 4343 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31); 4344 case PPC::BI__builtin_ppc_mtfsf: 4345 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 255); 4346 case PPC::BI__builtin_ppc_mtfsfi: 4347 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 7) || 4348 SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 4349 case PPC::BI__builtin_ppc_alignx: 4350 return SemaBuiltinConstantArgPower2(TheCall, 0); 4351 case PPC::BI__builtin_ppc_rdlam: 4352 return SemaValueIsRunOfOnes(TheCall, 2); 4353 case PPC::BI__builtin_ppc_icbt: 4354 case PPC::BI__builtin_ppc_sthcx: 4355 case PPC::BI__builtin_ppc_stbcx: 4356 case PPC::BI__builtin_ppc_lharx: 4357 case PPC::BI__builtin_ppc_lbarx: 4358 return SemaFeatureCheck(*this, TheCall, "isa-v207-instructions", 4359 diag::err_ppc_builtin_only_on_arch, "8"); 4360 case PPC::BI__builtin_vsx_ldrmb: 4361 case PPC::BI__builtin_vsx_strmb: 4362 return SemaFeatureCheck(*this, TheCall, "isa-v207-instructions", 4363 diag::err_ppc_builtin_only_on_arch, "8") || 4364 SemaBuiltinConstantArgRange(TheCall, 1, 1, 16); 4365 case PPC::BI__builtin_altivec_vcntmbb: 4366 case PPC::BI__builtin_altivec_vcntmbh: 4367 case PPC::BI__builtin_altivec_vcntmbw: 4368 case PPC::BI__builtin_altivec_vcntmbd: 4369 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1); 4370 case PPC::BI__builtin_darn: 4371 case PPC::BI__builtin_darn_raw: 4372 case PPC::BI__builtin_darn_32: 4373 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 4374 diag::err_ppc_builtin_only_on_arch, "9"); 4375 case PPC::BI__builtin_vsx_xxgenpcvbm: 4376 case PPC::BI__builtin_vsx_xxgenpcvhm: 4377 case PPC::BI__builtin_vsx_xxgenpcvwm: 4378 case PPC::BI__builtin_vsx_xxgenpcvdm: 4379 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3); 4380 case PPC::BI__builtin_ppc_compare_exp_uo: 4381 case PPC::BI__builtin_ppc_compare_exp_lt: 4382 case PPC::BI__builtin_ppc_compare_exp_gt: 4383 case PPC::BI__builtin_ppc_compare_exp_eq: 4384 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 4385 diag::err_ppc_builtin_only_on_arch, "9") || 4386 SemaFeatureCheck(*this, TheCall, "vsx", 4387 diag::err_ppc_builtin_requires_vsx); 4388 case PPC::BI__builtin_ppc_test_data_class: { 4389 // Check if the first argument of the __builtin_ppc_test_data_class call is 4390 // valid. The argument must be 'float' or 'double' or '__float128'. 4391 QualType ArgType = TheCall->getArg(0)->getType(); 4392 if (ArgType != QualType(Context.FloatTy) && 4393 ArgType != QualType(Context.DoubleTy) && 4394 ArgType != QualType(Context.Float128Ty)) 4395 return Diag(TheCall->getBeginLoc(), 4396 diag::err_ppc_invalid_test_data_class_type); 4397 return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions", 4398 diag::err_ppc_builtin_only_on_arch, "9") || 4399 SemaFeatureCheck(*this, TheCall, "vsx", 4400 diag::err_ppc_builtin_requires_vsx) || 4401 SemaBuiltinConstantArgRange(TheCall, 1, 0, 127); 4402 } 4403 case PPC::BI__builtin_ppc_maxfe: 4404 case PPC::BI__builtin_ppc_minfe: 4405 case PPC::BI__builtin_ppc_maxfl: 4406 case PPC::BI__builtin_ppc_minfl: 4407 case PPC::BI__builtin_ppc_maxfs: 4408 case PPC::BI__builtin_ppc_minfs: { 4409 if (Context.getTargetInfo().getTriple().isOSAIX() && 4410 (BuiltinID == PPC::BI__builtin_ppc_maxfe || 4411 BuiltinID == PPC::BI__builtin_ppc_minfe)) 4412 return Diag(TheCall->getBeginLoc(), diag::err_target_unsupported_type) 4413 << "builtin" << true << 128 << QualType(Context.LongDoubleTy) 4414 << false << Context.getTargetInfo().getTriple().str(); 4415 // Argument type should be exact. 4416 QualType ArgType = QualType(Context.LongDoubleTy); 4417 if (BuiltinID == PPC::BI__builtin_ppc_maxfl || 4418 BuiltinID == PPC::BI__builtin_ppc_minfl) 4419 ArgType = QualType(Context.DoubleTy); 4420 else if (BuiltinID == PPC::BI__builtin_ppc_maxfs || 4421 BuiltinID == PPC::BI__builtin_ppc_minfs) 4422 ArgType = QualType(Context.FloatTy); 4423 for (unsigned I = 0, E = TheCall->getNumArgs(); I < E; ++I) 4424 if (TheCall->getArg(I)->getType() != ArgType) 4425 return Diag(TheCall->getBeginLoc(), 4426 diag::err_typecheck_convert_incompatible) 4427 << TheCall->getArg(I)->getType() << ArgType << 1 << 0 << 0; 4428 return false; 4429 } 4430 case PPC::BI__builtin_ppc_load8r: 4431 case PPC::BI__builtin_ppc_store8r: 4432 return SemaFeatureCheck(*this, TheCall, "isa-v206-instructions", 4433 diag::err_ppc_builtin_only_on_arch, "7"); 4434#define CUSTOM_BUILTIN(Name, Intr, Types, Acc) \ 4435 case PPC::BI__builtin_##Name: \ 4436 return SemaBuiltinPPCMMACall(TheCall, BuiltinID, Types); 4437#include "clang/Basic/BuiltinsPPC.def" 4438 } 4439 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 4440} 4441 4442// Check if the given type is a non-pointer PPC MMA type. This function is used 4443// in Sema to prevent invalid uses of restricted PPC MMA types. 4444bool Sema::CheckPPCMMAType(QualType Type, SourceLocation TypeLoc) { 4445 if (Type->isPointerType() || Type->isArrayType()) 4446 return false; 4447 4448 QualType CoreType = Type.getCanonicalType().getUnqualifiedType(); 4449#define PPC_VECTOR_TYPE(Name, Id, Size) || CoreType == Context.Id##Ty 4450 if (false 4451#include "clang/Basic/PPCTypes.def" 4452 ) { 4453 Diag(TypeLoc, diag::err_ppc_invalid_use_mma_type); 4454 return true; 4455 } 4456 return false; 4457} 4458 4459bool Sema::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, 4460 CallExpr *TheCall) { 4461 // position of memory order and scope arguments in the builtin 4462 unsigned OrderIndex, ScopeIndex; 4463 switch (BuiltinID) { 4464 case AMDGPU::BI__builtin_amdgcn_atomic_inc32: 4465 case AMDGPU::BI__builtin_amdgcn_atomic_inc64: 4466 case AMDGPU::BI__builtin_amdgcn_atomic_dec32: 4467 case AMDGPU::BI__builtin_amdgcn_atomic_dec64: 4468 OrderIndex = 2; 4469 ScopeIndex = 3; 4470 break; 4471 case AMDGPU::BI__builtin_amdgcn_fence: 4472 OrderIndex = 0; 4473 ScopeIndex = 1; 4474 break; 4475 default: 4476 return false; 4477 } 4478 4479 ExprResult Arg = TheCall->getArg(OrderIndex); 4480 auto ArgExpr = Arg.get(); 4481 Expr::EvalResult ArgResult; 4482 4483 if (!ArgExpr->EvaluateAsInt(ArgResult, Context)) 4484 return Diag(ArgExpr->getExprLoc(), diag::err_typecheck_expect_int) 4485 << ArgExpr->getType(); 4486 auto Ord = ArgResult.Val.getInt().getZExtValue(); 4487 4488 // Check validity of memory ordering as per C11 / C++11's memody model. 4489 // Only fence needs check. Atomic dec/inc allow all memory orders. 4490 if (!llvm::isValidAtomicOrderingCABI(Ord)) 4491 return Diag(ArgExpr->getBeginLoc(), 4492 diag::warn_atomic_op_has_invalid_memory_order) 4493 << ArgExpr->getSourceRange(); 4494 switch (static_cast<llvm::AtomicOrderingCABI>(Ord)) { 4495 case llvm::AtomicOrderingCABI::relaxed: 4496 case llvm::AtomicOrderingCABI::consume: 4497 if (BuiltinID == AMDGPU::BI__builtin_amdgcn_fence) 4498 return Diag(ArgExpr->getBeginLoc(), 4499 diag::warn_atomic_op_has_invalid_memory_order) 4500 << ArgExpr->getSourceRange(); 4501 break; 4502 case llvm::AtomicOrderingCABI::acquire: 4503 case llvm::AtomicOrderingCABI::release: 4504 case llvm::AtomicOrderingCABI::acq_rel: 4505 case llvm::AtomicOrderingCABI::seq_cst: 4506 break; 4507 } 4508 4509 Arg = TheCall->getArg(ScopeIndex); 4510 ArgExpr = Arg.get(); 4511 Expr::EvalResult ArgResult1; 4512 // Check that sync scope is a constant literal 4513 if (!ArgExpr->EvaluateAsConstantExpr(ArgResult1, Context)) 4514 return Diag(ArgExpr->getExprLoc(), diag::err_expr_not_string_literal) 4515 << ArgExpr->getType(); 4516 4517 return false; 4518} 4519 4520bool Sema::CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum) { 4521 llvm::APSInt Result; 4522 4523 // We can't check the value of a dependent argument. 4524 Expr *Arg = TheCall->getArg(ArgNum); 4525 if (Arg->isTypeDependent() || Arg->isValueDependent()) 4526 return false; 4527 4528 // Check constant-ness first. 4529 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 4530 return true; 4531 4532 int64_t Val = Result.getSExtValue(); 4533 if ((Val >= 0 && Val <= 3) || (Val >= 5 && Val <= 7)) 4534 return false; 4535 4536 return Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_invalid_lmul) 4537 << Arg->getSourceRange(); 4538} 4539 4540bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI, 4541 unsigned BuiltinID, 4542 CallExpr *TheCall) { 4543 // CodeGenFunction can also detect this, but this gives a better error 4544 // message. 4545 bool FeatureMissing = false; 4546 SmallVector<StringRef> ReqFeatures; 4547 StringRef Features = Context.BuiltinInfo.getRequiredFeatures(BuiltinID); 4548 Features.split(ReqFeatures, ','); 4549 4550 // Check if each required feature is included 4551 for (StringRef F : ReqFeatures) { 4552 SmallVector<StringRef> ReqOpFeatures; 4553 F.split(ReqOpFeatures, '|'); 4554 4555 if (llvm::none_of(ReqOpFeatures, 4556 [&TI](StringRef OF) { return TI.hasFeature(OF); })) { 4557 std::string FeatureStrs; 4558 bool IsExtension = true; 4559 for (StringRef OF : ReqOpFeatures) { 4560 // If the feature is 64bit, alter the string so it will print better in 4561 // the diagnostic. 4562 if (OF == "64bit") { 4563 assert(ReqOpFeatures.size() == 1 && "Expected '64bit' to be alone"); 4564 OF = "RV64"; 4565 IsExtension = false; 4566 } 4567 if (OF == "32bit") { 4568 assert(ReqOpFeatures.size() == 1 && "Expected '32bit' to be alone"); 4569 OF = "RV32"; 4570 IsExtension = false; 4571 } 4572 4573 // Convert features like "zbr" and "experimental-zbr" to "Zbr". 4574 OF.consume_front("experimental-"); 4575 std::string FeatureStr = OF.str(); 4576 FeatureStr[0] = std::toupper(FeatureStr[0]); 4577 // Combine strings. 4578 FeatureStrs += FeatureStrs == "" ? "" : ", "; 4579 FeatureStrs += "'"; 4580 FeatureStrs += FeatureStr; 4581 FeatureStrs += "'"; 4582 } 4583 // Error message 4584 FeatureMissing = true; 4585 Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_requires_extension) 4586 << IsExtension 4587 << TheCall->getSourceRange() << StringRef(FeatureStrs); 4588 } 4589 } 4590 4591 if (FeatureMissing) 4592 return true; 4593 4594 switch (BuiltinID) { 4595 case RISCVVector::BI__builtin_rvv_vsetvli: 4596 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3) || 4597 CheckRISCVLMUL(TheCall, 2); 4598 case RISCVVector::BI__builtin_rvv_vsetvlimax: 4599 return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) || 4600 CheckRISCVLMUL(TheCall, 1); 4601 case RISCVVector::BI__builtin_rvv_vget_v: { 4602 ASTContext::BuiltinVectorTypeInfo ResVecInfo = 4603 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>( 4604 TheCall->getType().getCanonicalType().getTypePtr())); 4605 ASTContext::BuiltinVectorTypeInfo VecInfo = 4606 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>( 4607 TheCall->getArg(0)->getType().getCanonicalType().getTypePtr())); 4608 unsigned MaxIndex = 4609 (VecInfo.EC.getKnownMinValue() * VecInfo.NumVectors) / 4610 (ResVecInfo.EC.getKnownMinValue() * ResVecInfo.NumVectors); 4611 return SemaBuiltinConstantArgRange(TheCall, 1, 0, MaxIndex - 1); 4612 } 4613 case RISCVVector::BI__builtin_rvv_vset_v: { 4614 ASTContext::BuiltinVectorTypeInfo ResVecInfo = 4615 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>( 4616 TheCall->getType().getCanonicalType().getTypePtr())); 4617 ASTContext::BuiltinVectorTypeInfo VecInfo = 4618 Context.getBuiltinVectorTypeInfo(cast<BuiltinType>( 4619 TheCall->getArg(2)->getType().getCanonicalType().getTypePtr())); 4620 unsigned MaxIndex = 4621 (ResVecInfo.EC.getKnownMinValue() * ResVecInfo.NumVectors) / 4622 (VecInfo.EC.getKnownMinValue() * VecInfo.NumVectors); 4623 return SemaBuiltinConstantArgRange(TheCall, 1, 0, MaxIndex - 1); 4624 } 4625 // Check if byteselect is in [0, 3] 4626 case RISCV::BI__builtin_riscv_aes32dsi_32: 4627 case RISCV::BI__builtin_riscv_aes32dsmi_32: 4628 case RISCV::BI__builtin_riscv_aes32esi_32: 4629 case RISCV::BI__builtin_riscv_aes32esmi_32: 4630 case RISCV::BI__builtin_riscv_sm4ks: 4631 case RISCV::BI__builtin_riscv_sm4ed: 4632 return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3); 4633 // Check if rnum is in [0, 10] 4634 case RISCV::BI__builtin_riscv_aes64ks1i_64: 4635 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 10); 4636 } 4637 4638 return false; 4639} 4640 4641bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, 4642 CallExpr *TheCall) { 4643 if (BuiltinID == SystemZ::BI__builtin_tabort) { 4644 Expr *Arg = TheCall->getArg(0); 4645 if (std::optional<llvm::APSInt> AbortCode = 4646 Arg->getIntegerConstantExpr(Context)) 4647 if (AbortCode->getSExtValue() >= 0 && AbortCode->getSExtValue() < 256) 4648 return Diag(Arg->getBeginLoc(), diag::err_systemz_invalid_tabort_code) 4649 << Arg->getSourceRange(); 4650 } 4651 4652 // For intrinsics which take an immediate value as part of the instruction, 4653 // range check them here. 4654 unsigned i = 0, l = 0, u = 0; 4655 switch (BuiltinID) { 4656 default: return false; 4657 case SystemZ::BI__builtin_s390_lcbb: i = 1; l = 0; u = 15; break; 4658 case SystemZ::BI__builtin_s390_verimb: 4659 case SystemZ::BI__builtin_s390_verimh: 4660 case SystemZ::BI__builtin_s390_verimf: 4661 case SystemZ::BI__builtin_s390_verimg: i = 3; l = 0; u = 255; break; 4662 case SystemZ::BI__builtin_s390_vfaeb: 4663 case SystemZ::BI__builtin_s390_vfaeh: 4664 case SystemZ::BI__builtin_s390_vfaef: 4665 case SystemZ::BI__builtin_s390_vfaebs: 4666 case SystemZ::BI__builtin_s390_vfaehs: 4667 case SystemZ::BI__builtin_s390_vfaefs: 4668 case SystemZ::BI__builtin_s390_vfaezb: 4669 case SystemZ::BI__builtin_s390_vfaezh: 4670 case SystemZ::BI__builtin_s390_vfaezf: 4671 case SystemZ::BI__builtin_s390_vfaezbs: 4672 case SystemZ::BI__builtin_s390_vfaezhs: 4673 case SystemZ::BI__builtin_s390_vfaezfs: i = 2; l = 0; u = 15; break; 4674 case SystemZ::BI__builtin_s390_vfisb: 4675 case SystemZ::BI__builtin_s390_vfidb: 4676 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15) || 4677 SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); 4678 case SystemZ::BI__builtin_s390_vftcisb: 4679 case SystemZ::BI__builtin_s390_vftcidb: i = 1; l = 0; u = 4095; break; 4680 case SystemZ::BI__builtin_s390_vlbb: i = 1; l = 0; u = 15; break; 4681 case SystemZ::BI__builtin_s390_vpdi: i = 2; l = 0; u = 15; break; 4682 case SystemZ::BI__builtin_s390_vsldb: i = 2; l = 0; u = 15; break; 4683 case SystemZ::BI__builtin_s390_vstrcb: 4684 case SystemZ::BI__builtin_s390_vstrch: 4685 case SystemZ::BI__builtin_s390_vstrcf: 4686 case SystemZ::BI__builtin_s390_vstrczb: 4687 case SystemZ::BI__builtin_s390_vstrczh: 4688 case SystemZ::BI__builtin_s390_vstrczf: 4689 case SystemZ::BI__builtin_s390_vstrcbs: 4690 case SystemZ::BI__builtin_s390_vstrchs: 4691 case SystemZ::BI__builtin_s390_vstrcfs: 4692 case SystemZ::BI__builtin_s390_vstrczbs: 4693 case SystemZ::BI__builtin_s390_vstrczhs: 4694 case SystemZ::BI__builtin_s390_vstrczfs: i = 3; l = 0; u = 15; break; 4695 case SystemZ::BI__builtin_s390_vmslg: i = 3; l = 0; u = 15; break; 4696 case SystemZ::BI__builtin_s390_vfminsb: 4697 case SystemZ::BI__builtin_s390_vfmaxsb: 4698 case SystemZ::BI__builtin_s390_vfmindb: 4699 case SystemZ::BI__builtin_s390_vfmaxdb: i = 2; l = 0; u = 15; break; 4700 case SystemZ::BI__builtin_s390_vsld: i = 2; l = 0; u = 7; break; 4701 case SystemZ::BI__builtin_s390_vsrd: i = 2; l = 0; u = 7; break; 4702 case SystemZ::BI__builtin_s390_vclfnhs: 4703 case SystemZ::BI__builtin_s390_vclfnls: 4704 case SystemZ::BI__builtin_s390_vcfn: 4705 case SystemZ::BI__builtin_s390_vcnf: i = 1; l = 0; u = 15; break; 4706 case SystemZ::BI__builtin_s390_vcrnfs: i = 2; l = 0; u = 15; break; 4707 } 4708 return SemaBuiltinConstantArgRange(TheCall, i, l, u); 4709} 4710 4711/// SemaBuiltinCpuSupports - Handle __builtin_cpu_supports(char *). 4712/// This checks that the target supports __builtin_cpu_supports and 4713/// that the string argument is constant and valid. 4714static bool SemaBuiltinCpuSupports(Sema &S, const TargetInfo &TI, 4715 CallExpr *TheCall) { 4716 Expr *Arg = TheCall->getArg(0); 4717 4718 // Check if the argument is a string literal. 4719 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 4720 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 4721 << Arg->getSourceRange(); 4722 4723 // Check the contents of the string. 4724 StringRef Feature = 4725 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 4726 if (!TI.validateCpuSupports(Feature)) 4727 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_supports) 4728 << Arg->getSourceRange(); 4729 return false; 4730} 4731 4732/// SemaBuiltinCpuIs - Handle __builtin_cpu_is(char *). 4733/// This checks that the target supports __builtin_cpu_is and 4734/// that the string argument is constant and valid. 4735static bool SemaBuiltinCpuIs(Sema &S, const TargetInfo &TI, CallExpr *TheCall) { 4736 Expr *Arg = TheCall->getArg(0); 4737 4738 // Check if the argument is a string literal. 4739 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 4740 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 4741 << Arg->getSourceRange(); 4742 4743 // Check the contents of the string. 4744 StringRef Feature = 4745 cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 4746 if (!TI.validateCpuIs(Feature)) 4747 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_is) 4748 << Arg->getSourceRange(); 4749 return false; 4750} 4751 4752// Check if the rounding mode is legal. 4753bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) { 4754 // Indicates if this instruction has rounding control or just SAE. 4755 bool HasRC = false; 4756 4757 unsigned ArgNum = 0; 4758 switch (BuiltinID) { 4759 default: 4760 return false; 4761 case X86::BI__builtin_ia32_vcvttsd2si32: 4762 case X86::BI__builtin_ia32_vcvttsd2si64: 4763 case X86::BI__builtin_ia32_vcvttsd2usi32: 4764 case X86::BI__builtin_ia32_vcvttsd2usi64: 4765 case X86::BI__builtin_ia32_vcvttss2si32: 4766 case X86::BI__builtin_ia32_vcvttss2si64: 4767 case X86::BI__builtin_ia32_vcvttss2usi32: 4768 case X86::BI__builtin_ia32_vcvttss2usi64: 4769 case X86::BI__builtin_ia32_vcvttsh2si32: 4770 case X86::BI__builtin_ia32_vcvttsh2si64: 4771 case X86::BI__builtin_ia32_vcvttsh2usi32: 4772 case X86::BI__builtin_ia32_vcvttsh2usi64: 4773 ArgNum = 1; 4774 break; 4775 case X86::BI__builtin_ia32_maxpd512: 4776 case X86::BI__builtin_ia32_maxps512: 4777 case X86::BI__builtin_ia32_minpd512: 4778 case X86::BI__builtin_ia32_minps512: 4779 case X86::BI__builtin_ia32_maxph512: 4780 case X86::BI__builtin_ia32_minph512: 4781 ArgNum = 2; 4782 break; 4783 case X86::BI__builtin_ia32_vcvtph2pd512_mask: 4784 case X86::BI__builtin_ia32_vcvtph2psx512_mask: 4785 case X86::BI__builtin_ia32_cvtps2pd512_mask: 4786 case X86::BI__builtin_ia32_cvttpd2dq512_mask: 4787 case X86::BI__builtin_ia32_cvttpd2qq512_mask: 4788 case X86::BI__builtin_ia32_cvttpd2udq512_mask: 4789 case X86::BI__builtin_ia32_cvttpd2uqq512_mask: 4790 case X86::BI__builtin_ia32_cvttps2dq512_mask: 4791 case X86::BI__builtin_ia32_cvttps2qq512_mask: 4792 case X86::BI__builtin_ia32_cvttps2udq512_mask: 4793 case X86::BI__builtin_ia32_cvttps2uqq512_mask: 4794 case X86::BI__builtin_ia32_vcvttph2w512_mask: 4795 case X86::BI__builtin_ia32_vcvttph2uw512_mask: 4796 case X86::BI__builtin_ia32_vcvttph2dq512_mask: 4797 case X86::BI__builtin_ia32_vcvttph2udq512_mask: 4798 case X86::BI__builtin_ia32_vcvttph2qq512_mask: 4799 case X86::BI__builtin_ia32_vcvttph2uqq512_mask: 4800 case X86::BI__builtin_ia32_exp2pd_mask: 4801 case X86::BI__builtin_ia32_exp2ps_mask: 4802 case X86::BI__builtin_ia32_getexppd512_mask: 4803 case X86::BI__builtin_ia32_getexpps512_mask: 4804 case X86::BI__builtin_ia32_getexpph512_mask: 4805 case X86::BI__builtin_ia32_rcp28pd_mask: 4806 case X86::BI__builtin_ia32_rcp28ps_mask: 4807 case X86::BI__builtin_ia32_rsqrt28pd_mask: 4808 case X86::BI__builtin_ia32_rsqrt28ps_mask: 4809 case X86::BI__builtin_ia32_vcomisd: 4810 case X86::BI__builtin_ia32_vcomiss: 4811 case X86::BI__builtin_ia32_vcomish: 4812 case X86::BI__builtin_ia32_vcvtph2ps512_mask: 4813 ArgNum = 3; 4814 break; 4815 case X86::BI__builtin_ia32_cmppd512_mask: 4816 case X86::BI__builtin_ia32_cmpps512_mask: 4817 case X86::BI__builtin_ia32_cmpsd_mask: 4818 case X86::BI__builtin_ia32_cmpss_mask: 4819 case X86::BI__builtin_ia32_cmpsh_mask: 4820 case X86::BI__builtin_ia32_vcvtsh2sd_round_mask: 4821 case X86::BI__builtin_ia32_vcvtsh2ss_round_mask: 4822 case X86::BI__builtin_ia32_cvtss2sd_round_mask: 4823 case X86::BI__builtin_ia32_getexpsd128_round_mask: 4824 case X86::BI__builtin_ia32_getexpss128_round_mask: 4825 case X86::BI__builtin_ia32_getexpsh128_round_mask: 4826 case X86::BI__builtin_ia32_getmantpd512_mask: 4827 case X86::BI__builtin_ia32_getmantps512_mask: 4828 case X86::BI__builtin_ia32_getmantph512_mask: 4829 case X86::BI__builtin_ia32_maxsd_round_mask: 4830 case X86::BI__builtin_ia32_maxss_round_mask: 4831 case X86::BI__builtin_ia32_maxsh_round_mask: 4832 case X86::BI__builtin_ia32_minsd_round_mask: 4833 case X86::BI__builtin_ia32_minss_round_mask: 4834 case X86::BI__builtin_ia32_minsh_round_mask: 4835 case X86::BI__builtin_ia32_rcp28sd_round_mask: 4836 case X86::BI__builtin_ia32_rcp28ss_round_mask: 4837 case X86::BI__builtin_ia32_reducepd512_mask: 4838 case X86::BI__builtin_ia32_reduceps512_mask: 4839 case X86::BI__builtin_ia32_reduceph512_mask: 4840 case X86::BI__builtin_ia32_rndscalepd_mask: 4841 case X86::BI__builtin_ia32_rndscaleps_mask: 4842 case X86::BI__builtin_ia32_rndscaleph_mask: 4843 case X86::BI__builtin_ia32_rsqrt28sd_round_mask: 4844 case X86::BI__builtin_ia32_rsqrt28ss_round_mask: 4845 ArgNum = 4; 4846 break; 4847 case X86::BI__builtin_ia32_fixupimmpd512_mask: 4848 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 4849 case X86::BI__builtin_ia32_fixupimmps512_mask: 4850 case X86::BI__builtin_ia32_fixupimmps512_maskz: 4851 case X86::BI__builtin_ia32_fixupimmsd_mask: 4852 case X86::BI__builtin_ia32_fixupimmsd_maskz: 4853 case X86::BI__builtin_ia32_fixupimmss_mask: 4854 case X86::BI__builtin_ia32_fixupimmss_maskz: 4855 case X86::BI__builtin_ia32_getmantsd_round_mask: 4856 case X86::BI__builtin_ia32_getmantss_round_mask: 4857 case X86::BI__builtin_ia32_getmantsh_round_mask: 4858 case X86::BI__builtin_ia32_rangepd512_mask: 4859 case X86::BI__builtin_ia32_rangeps512_mask: 4860 case X86::BI__builtin_ia32_rangesd128_round_mask: 4861 case X86::BI__builtin_ia32_rangess128_round_mask: 4862 case X86::BI__builtin_ia32_reducesd_mask: 4863 case X86::BI__builtin_ia32_reducess_mask: 4864 case X86::BI__builtin_ia32_reducesh_mask: 4865 case X86::BI__builtin_ia32_rndscalesd_round_mask: 4866 case X86::BI__builtin_ia32_rndscaless_round_mask: 4867 case X86::BI__builtin_ia32_rndscalesh_round_mask: 4868 ArgNum = 5; 4869 break; 4870 case X86::BI__builtin_ia32_vcvtsd2si64: 4871 case X86::BI__builtin_ia32_vcvtsd2si32: 4872 case X86::BI__builtin_ia32_vcvtsd2usi32: 4873 case X86::BI__builtin_ia32_vcvtsd2usi64: 4874 case X86::BI__builtin_ia32_vcvtss2si32: 4875 case X86::BI__builtin_ia32_vcvtss2si64: 4876 case X86::BI__builtin_ia32_vcvtss2usi32: 4877 case X86::BI__builtin_ia32_vcvtss2usi64: 4878 case X86::BI__builtin_ia32_vcvtsh2si32: 4879 case X86::BI__builtin_ia32_vcvtsh2si64: 4880 case X86::BI__builtin_ia32_vcvtsh2usi32: 4881 case X86::BI__builtin_ia32_vcvtsh2usi64: 4882 case X86::BI__builtin_ia32_sqrtpd512: 4883 case X86::BI__builtin_ia32_sqrtps512: 4884 case X86::BI__builtin_ia32_sqrtph512: 4885 ArgNum = 1; 4886 HasRC = true; 4887 break; 4888 case X86::BI__builtin_ia32_addph512: 4889 case X86::BI__builtin_ia32_divph512: 4890 case X86::BI__builtin_ia32_mulph512: 4891 case X86::BI__builtin_ia32_subph512: 4892 case X86::BI__builtin_ia32_addpd512: 4893 case X86::BI__builtin_ia32_addps512: 4894 case X86::BI__builtin_ia32_divpd512: 4895 case X86::BI__builtin_ia32_divps512: 4896 case X86::BI__builtin_ia32_mulpd512: 4897 case X86::BI__builtin_ia32_mulps512: 4898 case X86::BI__builtin_ia32_subpd512: 4899 case X86::BI__builtin_ia32_subps512: 4900 case X86::BI__builtin_ia32_cvtsi2sd64: 4901 case X86::BI__builtin_ia32_cvtsi2ss32: 4902 case X86::BI__builtin_ia32_cvtsi2ss64: 4903 case X86::BI__builtin_ia32_cvtusi2sd64: 4904 case X86::BI__builtin_ia32_cvtusi2ss32: 4905 case X86::BI__builtin_ia32_cvtusi2ss64: 4906 case X86::BI__builtin_ia32_vcvtusi2sh: 4907 case X86::BI__builtin_ia32_vcvtusi642sh: 4908 case X86::BI__builtin_ia32_vcvtsi2sh: 4909 case X86::BI__builtin_ia32_vcvtsi642sh: 4910 ArgNum = 2; 4911 HasRC = true; 4912 break; 4913 case X86::BI__builtin_ia32_cvtdq2ps512_mask: 4914 case X86::BI__builtin_ia32_cvtudq2ps512_mask: 4915 case X86::BI__builtin_ia32_vcvtpd2ph512_mask: 4916 case X86::BI__builtin_ia32_vcvtps2phx512_mask: 4917 case X86::BI__builtin_ia32_cvtpd2ps512_mask: 4918 case X86::BI__builtin_ia32_cvtpd2dq512_mask: 4919 case X86::BI__builtin_ia32_cvtpd2qq512_mask: 4920 case X86::BI__builtin_ia32_cvtpd2udq512_mask: 4921 case X86::BI__builtin_ia32_cvtpd2uqq512_mask: 4922 case X86::BI__builtin_ia32_cvtps2dq512_mask: 4923 case X86::BI__builtin_ia32_cvtps2qq512_mask: 4924 case X86::BI__builtin_ia32_cvtps2udq512_mask: 4925 case X86::BI__builtin_ia32_cvtps2uqq512_mask: 4926 case X86::BI__builtin_ia32_cvtqq2pd512_mask: 4927 case X86::BI__builtin_ia32_cvtqq2ps512_mask: 4928 case X86::BI__builtin_ia32_cvtuqq2pd512_mask: 4929 case X86::BI__builtin_ia32_cvtuqq2ps512_mask: 4930 case X86::BI__builtin_ia32_vcvtdq2ph512_mask: 4931 case X86::BI__builtin_ia32_vcvtudq2ph512_mask: 4932 case X86::BI__builtin_ia32_vcvtw2ph512_mask: 4933 case X86::BI__builtin_ia32_vcvtuw2ph512_mask: 4934 case X86::BI__builtin_ia32_vcvtph2w512_mask: 4935 case X86::BI__builtin_ia32_vcvtph2uw512_mask: 4936 case X86::BI__builtin_ia32_vcvtph2dq512_mask: 4937 case X86::BI__builtin_ia32_vcvtph2udq512_mask: 4938 case X86::BI__builtin_ia32_vcvtph2qq512_mask: 4939 case X86::BI__builtin_ia32_vcvtph2uqq512_mask: 4940 case X86::BI__builtin_ia32_vcvtqq2ph512_mask: 4941 case X86::BI__builtin_ia32_vcvtuqq2ph512_mask: 4942 ArgNum = 3; 4943 HasRC = true; 4944 break; 4945 case X86::BI__builtin_ia32_addsh_round_mask: 4946 case X86::BI__builtin_ia32_addss_round_mask: 4947 case X86::BI__builtin_ia32_addsd_round_mask: 4948 case X86::BI__builtin_ia32_divsh_round_mask: 4949 case X86::BI__builtin_ia32_divss_round_mask: 4950 case X86::BI__builtin_ia32_divsd_round_mask: 4951 case X86::BI__builtin_ia32_mulsh_round_mask: 4952 case X86::BI__builtin_ia32_mulss_round_mask: 4953 case X86::BI__builtin_ia32_mulsd_round_mask: 4954 case X86::BI__builtin_ia32_subsh_round_mask: 4955 case X86::BI__builtin_ia32_subss_round_mask: 4956 case X86::BI__builtin_ia32_subsd_round_mask: 4957 case X86::BI__builtin_ia32_scalefph512_mask: 4958 case X86::BI__builtin_ia32_scalefpd512_mask: 4959 case X86::BI__builtin_ia32_scalefps512_mask: 4960 case X86::BI__builtin_ia32_scalefsd_round_mask: 4961 case X86::BI__builtin_ia32_scalefss_round_mask: 4962 case X86::BI__builtin_ia32_scalefsh_round_mask: 4963 case X86::BI__builtin_ia32_cvtsd2ss_round_mask: 4964 case X86::BI__builtin_ia32_vcvtss2sh_round_mask: 4965 case X86::BI__builtin_ia32_vcvtsd2sh_round_mask: 4966 case X86::BI__builtin_ia32_sqrtsd_round_mask: 4967 case X86::BI__builtin_ia32_sqrtss_round_mask: 4968 case X86::BI__builtin_ia32_sqrtsh_round_mask: 4969 case X86::BI__builtin_ia32_vfmaddsd3_mask: 4970 case X86::BI__builtin_ia32_vfmaddsd3_maskz: 4971 case X86::BI__builtin_ia32_vfmaddsd3_mask3: 4972 case X86::BI__builtin_ia32_vfmaddss3_mask: 4973 case X86::BI__builtin_ia32_vfmaddss3_maskz: 4974 case X86::BI__builtin_ia32_vfmaddss3_mask3: 4975 case X86::BI__builtin_ia32_vfmaddsh3_mask: 4976 case X86::BI__builtin_ia32_vfmaddsh3_maskz: 4977 case X86::BI__builtin_ia32_vfmaddsh3_mask3: 4978 case X86::BI__builtin_ia32_vfmaddpd512_mask: 4979 case X86::BI__builtin_ia32_vfmaddpd512_maskz: 4980 case X86::BI__builtin_ia32_vfmaddpd512_mask3: 4981 case X86::BI__builtin_ia32_vfmsubpd512_mask3: 4982 case X86::BI__builtin_ia32_vfmaddps512_mask: 4983 case X86::BI__builtin_ia32_vfmaddps512_maskz: 4984 case X86::BI__builtin_ia32_vfmaddps512_mask3: 4985 case X86::BI__builtin_ia32_vfmsubps512_mask3: 4986 case X86::BI__builtin_ia32_vfmaddph512_mask: 4987 case X86::BI__builtin_ia32_vfmaddph512_maskz: 4988 case X86::BI__builtin_ia32_vfmaddph512_mask3: 4989 case X86::BI__builtin_ia32_vfmsubph512_mask3: 4990 case X86::BI__builtin_ia32_vfmaddsubpd512_mask: 4991 case X86::BI__builtin_ia32_vfmaddsubpd512_maskz: 4992 case X86::BI__builtin_ia32_vfmaddsubpd512_mask3: 4993 case X86::BI__builtin_ia32_vfmsubaddpd512_mask3: 4994 case X86::BI__builtin_ia32_vfmaddsubps512_mask: 4995 case X86::BI__builtin_ia32_vfmaddsubps512_maskz: 4996 case X86::BI__builtin_ia32_vfmaddsubps512_mask3: 4997 case X86::BI__builtin_ia32_vfmsubaddps512_mask3: 4998 case X86::BI__builtin_ia32_vfmaddsubph512_mask: 4999 case X86::BI__builtin_ia32_vfmaddsubph512_maskz: 5000 case X86::BI__builtin_ia32_vfmaddsubph512_mask3: 5001 case X86::BI__builtin_ia32_vfmsubaddph512_mask3: 5002 case X86::BI__builtin_ia32_vfmaddcsh_mask: 5003 case X86::BI__builtin_ia32_vfmaddcsh_round_mask: 5004 case X86::BI__builtin_ia32_vfmaddcsh_round_mask3: 5005 case X86::BI__builtin_ia32_vfmaddcph512_mask: 5006 case X86::BI__builtin_ia32_vfmaddcph512_maskz: 5007 case X86::BI__builtin_ia32_vfmaddcph512_mask3: 5008 case X86::BI__builtin_ia32_vfcmaddcsh_mask: 5009 case X86::BI__builtin_ia32_vfcmaddcsh_round_mask: 5010 case X86::BI__builtin_ia32_vfcmaddcsh_round_mask3: 5011 case X86::BI__builtin_ia32_vfcmaddcph512_mask: 5012 case X86::BI__builtin_ia32_vfcmaddcph512_maskz: 5013 case X86::BI__builtin_ia32_vfcmaddcph512_mask3: 5014 case X86::BI__builtin_ia32_vfmulcsh_mask: 5015 case X86::BI__builtin_ia32_vfmulcph512_mask: 5016 case X86::BI__builtin_ia32_vfcmulcsh_mask: 5017 case X86::BI__builtin_ia32_vfcmulcph512_mask: 5018 ArgNum = 4; 5019 HasRC = true; 5020 break; 5021 } 5022 5023 llvm::APSInt Result; 5024 5025 // We can't check the value of a dependent argument. 5026 Expr *Arg = TheCall->getArg(ArgNum); 5027 if (Arg->isTypeDependent() || Arg->isValueDependent()) 5028 return false; 5029 5030 // Check constant-ness first. 5031 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 5032 return true; 5033 5034 // Make sure rounding mode is either ROUND_CUR_DIRECTION or ROUND_NO_EXC bit 5035 // is set. If the intrinsic has rounding control(bits 1:0), make sure its only 5036 // combined with ROUND_NO_EXC. If the intrinsic does not have rounding 5037 // control, allow ROUND_NO_EXC and ROUND_CUR_DIRECTION together. 5038 if (Result == 4/*ROUND_CUR_DIRECTION*/ || 5039 Result == 8/*ROUND_NO_EXC*/ || 5040 (!HasRC && Result == 12/*ROUND_CUR_DIRECTION|ROUND_NO_EXC*/) || 5041 (HasRC && Result.getZExtValue() >= 8 && Result.getZExtValue() <= 11)) 5042 return false; 5043 5044 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_rounding) 5045 << Arg->getSourceRange(); 5046} 5047 5048// Check if the gather/scatter scale is legal. 5049bool Sema::CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, 5050 CallExpr *TheCall) { 5051 unsigned ArgNum = 0; 5052 switch (BuiltinID) { 5053 default: 5054 return false; 5055 case X86::BI__builtin_ia32_gatherpfdpd: 5056 case X86::BI__builtin_ia32_gatherpfdps: 5057 case X86::BI__builtin_ia32_gatherpfqpd: 5058 case X86::BI__builtin_ia32_gatherpfqps: 5059 case X86::BI__builtin_ia32_scatterpfdpd: 5060 case X86::BI__builtin_ia32_scatterpfdps: 5061 case X86::BI__builtin_ia32_scatterpfqpd: 5062 case X86::BI__builtin_ia32_scatterpfqps: 5063 ArgNum = 3; 5064 break; 5065 case X86::BI__builtin_ia32_gatherd_pd: 5066 case X86::BI__builtin_ia32_gatherd_pd256: 5067 case X86::BI__builtin_ia32_gatherq_pd: 5068 case X86::BI__builtin_ia32_gatherq_pd256: 5069 case X86::BI__builtin_ia32_gatherd_ps: 5070 case X86::BI__builtin_ia32_gatherd_ps256: 5071 case X86::BI__builtin_ia32_gatherq_ps: 5072 case X86::BI__builtin_ia32_gatherq_ps256: 5073 case X86::BI__builtin_ia32_gatherd_q: 5074 case X86::BI__builtin_ia32_gatherd_q256: 5075 case X86::BI__builtin_ia32_gatherq_q: 5076 case X86::BI__builtin_ia32_gatherq_q256: 5077 case X86::BI__builtin_ia32_gatherd_d: 5078 case X86::BI__builtin_ia32_gatherd_d256: 5079 case X86::BI__builtin_ia32_gatherq_d: 5080 case X86::BI__builtin_ia32_gatherq_d256: 5081 case X86::BI__builtin_ia32_gather3div2df: 5082 case X86::BI__builtin_ia32_gather3div2di: 5083 case X86::BI__builtin_ia32_gather3div4df: 5084 case X86::BI__builtin_ia32_gather3div4di: 5085 case X86::BI__builtin_ia32_gather3div4sf: 5086 case X86::BI__builtin_ia32_gather3div4si: 5087 case X86::BI__builtin_ia32_gather3div8sf: 5088 case X86::BI__builtin_ia32_gather3div8si: 5089 case X86::BI__builtin_ia32_gather3siv2df: 5090 case X86::BI__builtin_ia32_gather3siv2di: 5091 case X86::BI__builtin_ia32_gather3siv4df: 5092 case X86::BI__builtin_ia32_gather3siv4di: 5093 case X86::BI__builtin_ia32_gather3siv4sf: 5094 case X86::BI__builtin_ia32_gather3siv4si: 5095 case X86::BI__builtin_ia32_gather3siv8sf: 5096 case X86::BI__builtin_ia32_gather3siv8si: 5097 case X86::BI__builtin_ia32_gathersiv8df: 5098 case X86::BI__builtin_ia32_gathersiv16sf: 5099 case X86::BI__builtin_ia32_gatherdiv8df: 5100 case X86::BI__builtin_ia32_gatherdiv16sf: 5101 case X86::BI__builtin_ia32_gathersiv8di: 5102 case X86::BI__builtin_ia32_gathersiv16si: 5103 case X86::BI__builtin_ia32_gatherdiv8di: 5104 case X86::BI__builtin_ia32_gatherdiv16si: 5105 case X86::BI__builtin_ia32_scatterdiv2df: 5106 case X86::BI__builtin_ia32_scatterdiv2di: 5107 case X86::BI__builtin_ia32_scatterdiv4df: 5108 case X86::BI__builtin_ia32_scatterdiv4di: 5109 case X86::BI__builtin_ia32_scatterdiv4sf: 5110 case X86::BI__builtin_ia32_scatterdiv4si: 5111 case X86::BI__builtin_ia32_scatterdiv8sf: 5112 case X86::BI__builtin_ia32_scatterdiv8si: 5113 case X86::BI__builtin_ia32_scattersiv2df: 5114 case X86::BI__builtin_ia32_scattersiv2di: 5115 case X86::BI__builtin_ia32_scattersiv4df: 5116 case X86::BI__builtin_ia32_scattersiv4di: 5117 case X86::BI__builtin_ia32_scattersiv4sf: 5118 case X86::BI__builtin_ia32_scattersiv4si: 5119 case X86::BI__builtin_ia32_scattersiv8sf: 5120 case X86::BI__builtin_ia32_scattersiv8si: 5121 case X86::BI__builtin_ia32_scattersiv8df: 5122 case X86::BI__builtin_ia32_scattersiv16sf: 5123 case X86::BI__builtin_ia32_scatterdiv8df: 5124 case X86::BI__builtin_ia32_scatterdiv16sf: 5125 case X86::BI__builtin_ia32_scattersiv8di: 5126 case X86::BI__builtin_ia32_scattersiv16si: 5127 case X86::BI__builtin_ia32_scatterdiv8di: 5128 case X86::BI__builtin_ia32_scatterdiv16si: 5129 ArgNum = 4; 5130 break; 5131 } 5132 5133 llvm::APSInt Result; 5134 5135 // We can't check the value of a dependent argument. 5136 Expr *Arg = TheCall->getArg(ArgNum); 5137 if (Arg->isTypeDependent() || Arg->isValueDependent()) 5138 return false; 5139 5140 // Check constant-ness first. 5141 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 5142 return true; 5143 5144 if (Result == 1 || Result == 2 || Result == 4 || Result == 8) 5145 return false; 5146 5147 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_scale) 5148 << Arg->getSourceRange(); 5149} 5150 5151enum { TileRegLow = 0, TileRegHigh = 7 }; 5152 5153bool Sema::CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, 5154 ArrayRef<int> ArgNums) { 5155 for (int ArgNum : ArgNums) { 5156 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, TileRegLow, TileRegHigh)) 5157 return true; 5158 } 5159 return false; 5160} 5161 5162bool Sema::CheckX86BuiltinTileDuplicate(CallExpr *TheCall, 5163 ArrayRef<int> ArgNums) { 5164 // Because the max number of tile register is TileRegHigh + 1, so here we use 5165 // each bit to represent the usage of them in bitset. 5166 std::bitset<TileRegHigh + 1> ArgValues; 5167 for (int ArgNum : ArgNums) { 5168 Expr *Arg = TheCall->getArg(ArgNum); 5169 if (Arg->isTypeDependent() || Arg->isValueDependent()) 5170 continue; 5171 5172 llvm::APSInt Result; 5173 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 5174 return true; 5175 int ArgExtValue = Result.getExtValue(); 5176 assert((ArgExtValue >= TileRegLow || ArgExtValue <= TileRegHigh) && 5177 "Incorrect tile register num."); 5178 if (ArgValues.test(ArgExtValue)) 5179 return Diag(TheCall->getBeginLoc(), 5180 diag::err_x86_builtin_tile_arg_duplicate) 5181 << TheCall->getArg(ArgNum)->getSourceRange(); 5182 ArgValues.set(ArgExtValue); 5183 } 5184 return false; 5185} 5186 5187bool Sema::CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall, 5188 ArrayRef<int> ArgNums) { 5189 return CheckX86BuiltinTileArgumentsRange(TheCall, ArgNums) || 5190 CheckX86BuiltinTileDuplicate(TheCall, ArgNums); 5191} 5192 5193bool Sema::CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall) { 5194 switch (BuiltinID) { 5195 default: 5196 return false; 5197 case X86::BI__builtin_ia32_tileloadd64: 5198 case X86::BI__builtin_ia32_tileloaddt164: 5199 case X86::BI__builtin_ia32_tilestored64: 5200 case X86::BI__builtin_ia32_tilezero: 5201 return CheckX86BuiltinTileArgumentsRange(TheCall, 0); 5202 case X86::BI__builtin_ia32_tdpbssd: 5203 case X86::BI__builtin_ia32_tdpbsud: 5204 case X86::BI__builtin_ia32_tdpbusd: 5205 case X86::BI__builtin_ia32_tdpbuud: 5206 case X86::BI__builtin_ia32_tdpbf16ps: 5207 case X86::BI__builtin_ia32_tdpfp16ps: 5208 return CheckX86BuiltinTileRangeAndDuplicate(TheCall, {0, 1, 2}); 5209 } 5210} 5211static bool isX86_32Builtin(unsigned BuiltinID) { 5212 // These builtins only work on x86-32 targets. 5213 switch (BuiltinID) { 5214 case X86::BI__builtin_ia32_readeflags_u32: 5215 case X86::BI__builtin_ia32_writeeflags_u32: 5216 return true; 5217 } 5218 5219 return false; 5220} 5221 5222bool Sema::CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, 5223 CallExpr *TheCall) { 5224 if (BuiltinID == X86::BI__builtin_cpu_supports) 5225 return SemaBuiltinCpuSupports(*this, TI, TheCall); 5226 5227 if (BuiltinID == X86::BI__builtin_cpu_is) 5228 return SemaBuiltinCpuIs(*this, TI, TheCall); 5229 5230 // Check for 32-bit only builtins on a 64-bit target. 5231 const llvm::Triple &TT = TI.getTriple(); 5232 if (TT.getArch() != llvm::Triple::x86 && isX86_32Builtin(BuiltinID)) 5233 return Diag(TheCall->getCallee()->getBeginLoc(), 5234 diag::err_32_bit_builtin_64_bit_tgt); 5235 5236 // If the intrinsic has rounding or SAE make sure its valid. 5237 if (CheckX86BuiltinRoundingOrSAE(BuiltinID, TheCall)) 5238 return true; 5239 5240 // If the intrinsic has a gather/scatter scale immediate make sure its valid. 5241 if (CheckX86BuiltinGatherScatterScale(BuiltinID, TheCall)) 5242 return true; 5243 5244 // If the intrinsic has a tile arguments, make sure they are valid. 5245 if (CheckX86BuiltinTileArguments(BuiltinID, TheCall)) 5246 return true; 5247 5248 // For intrinsics which take an immediate value as part of the instruction, 5249 // range check them here. 5250 int i = 0, l = 0, u = 0; 5251 switch (BuiltinID) { 5252 default: 5253 return false; 5254 case X86::BI__builtin_ia32_vec_ext_v2si: 5255 case X86::BI__builtin_ia32_vec_ext_v2di: 5256 case X86::BI__builtin_ia32_vextractf128_pd256: 5257 case X86::BI__builtin_ia32_vextractf128_ps256: 5258 case X86::BI__builtin_ia32_vextractf128_si256: 5259 case X86::BI__builtin_ia32_extract128i256: 5260 case X86::BI__builtin_ia32_extractf64x4_mask: 5261 case X86::BI__builtin_ia32_extracti64x4_mask: 5262 case X86::BI__builtin_ia32_extractf32x8_mask: 5263 case X86::BI__builtin_ia32_extracti32x8_mask: 5264 case X86::BI__builtin_ia32_extractf64x2_256_mask: 5265 case X86::BI__builtin_ia32_extracti64x2_256_mask: 5266 case X86::BI__builtin_ia32_extractf32x4_256_mask: 5267 case X86::BI__builtin_ia32_extracti32x4_256_mask: 5268 i = 1; l = 0; u = 1; 5269 break; 5270 case X86::BI__builtin_ia32_vec_set_v2di: 5271 case X86::BI__builtin_ia32_vinsertf128_pd256: 5272 case X86::BI__builtin_ia32_vinsertf128_ps256: 5273 case X86::BI__builtin_ia32_vinsertf128_si256: 5274 case X86::BI__builtin_ia32_insert128i256: 5275 case X86::BI__builtin_ia32_insertf32x8: 5276 case X86::BI__builtin_ia32_inserti32x8: 5277 case X86::BI__builtin_ia32_insertf64x4: 5278 case X86::BI__builtin_ia32_inserti64x4: 5279 case X86::BI__builtin_ia32_insertf64x2_256: 5280 case X86::BI__builtin_ia32_inserti64x2_256: 5281 case X86::BI__builtin_ia32_insertf32x4_256: 5282 case X86::BI__builtin_ia32_inserti32x4_256: 5283 i = 2; l = 0; u = 1; 5284 break; 5285 case X86::BI__builtin_ia32_vpermilpd: 5286 case X86::BI__builtin_ia32_vec_ext_v4hi: 5287 case X86::BI__builtin_ia32_vec_ext_v4si: 5288 case X86::BI__builtin_ia32_vec_ext_v4sf: 5289 case X86::BI__builtin_ia32_vec_ext_v4di: 5290 case X86::BI__builtin_ia32_extractf32x4_mask: 5291 case X86::BI__builtin_ia32_extracti32x4_mask: 5292 case X86::BI__builtin_ia32_extractf64x2_512_mask: 5293 case X86::BI__builtin_ia32_extracti64x2_512_mask: 5294 i = 1; l = 0; u = 3; 5295 break; 5296 case X86::BI_mm_prefetch: 5297 case X86::BI__builtin_ia32_vec_ext_v8hi: 5298 case X86::BI__builtin_ia32_vec_ext_v8si: 5299 i = 1; l = 0; u = 7; 5300 break; 5301 case X86::BI__builtin_ia32_sha1rnds4: 5302 case X86::BI__builtin_ia32_blendpd: 5303 case X86::BI__builtin_ia32_shufpd: 5304 case X86::BI__builtin_ia32_vec_set_v4hi: 5305 case X86::BI__builtin_ia32_vec_set_v4si: 5306 case X86::BI__builtin_ia32_vec_set_v4di: 5307 case X86::BI__builtin_ia32_shuf_f32x4_256: 5308 case X86::BI__builtin_ia32_shuf_f64x2_256: 5309 case X86::BI__builtin_ia32_shuf_i32x4_256: 5310 case X86::BI__builtin_ia32_shuf_i64x2_256: 5311 case X86::BI__builtin_ia32_insertf64x2_512: 5312 case X86::BI__builtin_ia32_inserti64x2_512: 5313 case X86::BI__builtin_ia32_insertf32x4: 5314 case X86::BI__builtin_ia32_inserti32x4: 5315 i = 2; l = 0; u = 3; 5316 break; 5317 case X86::BI__builtin_ia32_vpermil2pd: 5318 case X86::BI__builtin_ia32_vpermil2pd256: 5319 case X86::BI__builtin_ia32_vpermil2ps: 5320 case X86::BI__builtin_ia32_vpermil2ps256: 5321 i = 3; l = 0; u = 3; 5322 break; 5323 case X86::BI__builtin_ia32_cmpb128_mask: 5324 case X86::BI__builtin_ia32_cmpw128_mask: 5325 case X86::BI__builtin_ia32_cmpd128_mask: 5326 case X86::BI__builtin_ia32_cmpq128_mask: 5327 case X86::BI__builtin_ia32_cmpb256_mask: 5328 case X86::BI__builtin_ia32_cmpw256_mask: 5329 case X86::BI__builtin_ia32_cmpd256_mask: 5330 case X86::BI__builtin_ia32_cmpq256_mask: 5331 case X86::BI__builtin_ia32_cmpb512_mask: 5332 case X86::BI__builtin_ia32_cmpw512_mask: 5333 case X86::BI__builtin_ia32_cmpd512_mask: 5334 case X86::BI__builtin_ia32_cmpq512_mask: 5335 case X86::BI__builtin_ia32_ucmpb128_mask: 5336 case X86::BI__builtin_ia32_ucmpw128_mask: 5337 case X86::BI__builtin_ia32_ucmpd128_mask: 5338 case X86::BI__builtin_ia32_ucmpq128_mask: 5339 case X86::BI__builtin_ia32_ucmpb256_mask: 5340 case X86::BI__builtin_ia32_ucmpw256_mask: 5341 case X86::BI__builtin_ia32_ucmpd256_mask: 5342 case X86::BI__builtin_ia32_ucmpq256_mask: 5343 case X86::BI__builtin_ia32_ucmpb512_mask: 5344 case X86::BI__builtin_ia32_ucmpw512_mask: 5345 case X86::BI__builtin_ia32_ucmpd512_mask: 5346 case X86::BI__builtin_ia32_ucmpq512_mask: 5347 case X86::BI__builtin_ia32_vpcomub: 5348 case X86::BI__builtin_ia32_vpcomuw: 5349 case X86::BI__builtin_ia32_vpcomud: 5350 case X86::BI__builtin_ia32_vpcomuq: 5351 case X86::BI__builtin_ia32_vpcomb: 5352 case X86::BI__builtin_ia32_vpcomw: 5353 case X86::BI__builtin_ia32_vpcomd: 5354 case X86::BI__builtin_ia32_vpcomq: 5355 case X86::BI__builtin_ia32_vec_set_v8hi: 5356 case X86::BI__builtin_ia32_vec_set_v8si: 5357 i = 2; l = 0; u = 7; 5358 break; 5359 case X86::BI__builtin_ia32_vpermilpd256: 5360 case X86::BI__builtin_ia32_roundps: 5361 case X86::BI__builtin_ia32_roundpd: 5362 case X86::BI__builtin_ia32_roundps256: 5363 case X86::BI__builtin_ia32_roundpd256: 5364 case X86::BI__builtin_ia32_getmantpd128_mask: 5365 case X86::BI__builtin_ia32_getmantpd256_mask: 5366 case X86::BI__builtin_ia32_getmantps128_mask: 5367 case X86::BI__builtin_ia32_getmantps256_mask: 5368 case X86::BI__builtin_ia32_getmantpd512_mask: 5369 case X86::BI__builtin_ia32_getmantps512_mask: 5370 case X86::BI__builtin_ia32_getmantph128_mask: 5371 case X86::BI__builtin_ia32_getmantph256_mask: 5372 case X86::BI__builtin_ia32_getmantph512_mask: 5373 case X86::BI__builtin_ia32_vec_ext_v16qi: 5374 case X86::BI__builtin_ia32_vec_ext_v16hi: 5375 i = 1; l = 0; u = 15; 5376 break; 5377 case X86::BI__builtin_ia32_pblendd128: 5378 case X86::BI__builtin_ia32_blendps: 5379 case X86::BI__builtin_ia32_blendpd256: 5380 case X86::BI__builtin_ia32_shufpd256: 5381 case X86::BI__builtin_ia32_roundss: 5382 case X86::BI__builtin_ia32_roundsd: 5383 case X86::BI__builtin_ia32_rangepd128_mask: 5384 case X86::BI__builtin_ia32_rangepd256_mask: 5385 case X86::BI__builtin_ia32_rangepd512_mask: 5386 case X86::BI__builtin_ia32_rangeps128_mask: 5387 case X86::BI__builtin_ia32_rangeps256_mask: 5388 case X86::BI__builtin_ia32_rangeps512_mask: 5389 case X86::BI__builtin_ia32_getmantsd_round_mask: 5390 case X86::BI__builtin_ia32_getmantss_round_mask: 5391 case X86::BI__builtin_ia32_getmantsh_round_mask: 5392 case X86::BI__builtin_ia32_vec_set_v16qi: 5393 case X86::BI__builtin_ia32_vec_set_v16hi: 5394 i = 2; l = 0; u = 15; 5395 break; 5396 case X86::BI__builtin_ia32_vec_ext_v32qi: 5397 i = 1; l = 0; u = 31; 5398 break; 5399 case X86::BI__builtin_ia32_cmpps: 5400 case X86::BI__builtin_ia32_cmpss: 5401 case X86::BI__builtin_ia32_cmppd: 5402 case X86::BI__builtin_ia32_cmpsd: 5403 case X86::BI__builtin_ia32_cmpps256: 5404 case X86::BI__builtin_ia32_cmppd256: 5405 case X86::BI__builtin_ia32_cmpps128_mask: 5406 case X86::BI__builtin_ia32_cmppd128_mask: 5407 case X86::BI__builtin_ia32_cmpps256_mask: 5408 case X86::BI__builtin_ia32_cmppd256_mask: 5409 case X86::BI__builtin_ia32_cmpps512_mask: 5410 case X86::BI__builtin_ia32_cmppd512_mask: 5411 case X86::BI__builtin_ia32_cmpsd_mask: 5412 case X86::BI__builtin_ia32_cmpss_mask: 5413 case X86::BI__builtin_ia32_vec_set_v32qi: 5414 i = 2; l = 0; u = 31; 5415 break; 5416 case X86::BI__builtin_ia32_permdf256: 5417 case X86::BI__builtin_ia32_permdi256: 5418 case X86::BI__builtin_ia32_permdf512: 5419 case X86::BI__builtin_ia32_permdi512: 5420 case X86::BI__builtin_ia32_vpermilps: 5421 case X86::BI__builtin_ia32_vpermilps256: 5422 case X86::BI__builtin_ia32_vpermilpd512: 5423 case X86::BI__builtin_ia32_vpermilps512: 5424 case X86::BI__builtin_ia32_pshufd: 5425 case X86::BI__builtin_ia32_pshufd256: 5426 case X86::BI__builtin_ia32_pshufd512: 5427 case X86::BI__builtin_ia32_pshufhw: 5428 case X86::BI__builtin_ia32_pshufhw256: 5429 case X86::BI__builtin_ia32_pshufhw512: 5430 case X86::BI__builtin_ia32_pshuflw: 5431 case X86::BI__builtin_ia32_pshuflw256: 5432 case X86::BI__builtin_ia32_pshuflw512: 5433 case X86::BI__builtin_ia32_vcvtps2ph: 5434 case X86::BI__builtin_ia32_vcvtps2ph_mask: 5435 case X86::BI__builtin_ia32_vcvtps2ph256: 5436 case X86::BI__builtin_ia32_vcvtps2ph256_mask: 5437 case X86::BI__builtin_ia32_vcvtps2ph512_mask: 5438 case X86::BI__builtin_ia32_rndscaleps_128_mask: 5439 case X86::BI__builtin_ia32_rndscalepd_128_mask: 5440 case X86::BI__builtin_ia32_rndscaleps_256_mask: 5441 case X86::BI__builtin_ia32_rndscalepd_256_mask: 5442 case X86::BI__builtin_ia32_rndscaleps_mask: 5443 case X86::BI__builtin_ia32_rndscalepd_mask: 5444 case X86::BI__builtin_ia32_rndscaleph_mask: 5445 case X86::BI__builtin_ia32_reducepd128_mask: 5446 case X86::BI__builtin_ia32_reducepd256_mask: 5447 case X86::BI__builtin_ia32_reducepd512_mask: 5448 case X86::BI__builtin_ia32_reduceps128_mask: 5449 case X86::BI__builtin_ia32_reduceps256_mask: 5450 case X86::BI__builtin_ia32_reduceps512_mask: 5451 case X86::BI__builtin_ia32_reduceph128_mask: 5452 case X86::BI__builtin_ia32_reduceph256_mask: 5453 case X86::BI__builtin_ia32_reduceph512_mask: 5454 case X86::BI__builtin_ia32_prold512: 5455 case X86::BI__builtin_ia32_prolq512: 5456 case X86::BI__builtin_ia32_prold128: 5457 case X86::BI__builtin_ia32_prold256: 5458 case X86::BI__builtin_ia32_prolq128: 5459 case X86::BI__builtin_ia32_prolq256: 5460 case X86::BI__builtin_ia32_prord512: 5461 case X86::BI__builtin_ia32_prorq512: 5462 case X86::BI__builtin_ia32_prord128: 5463 case X86::BI__builtin_ia32_prord256: 5464 case X86::BI__builtin_ia32_prorq128: 5465 case X86::BI__builtin_ia32_prorq256: 5466 case X86::BI__builtin_ia32_fpclasspd128_mask: 5467 case X86::BI__builtin_ia32_fpclasspd256_mask: 5468 case X86::BI__builtin_ia32_fpclassps128_mask: 5469 case X86::BI__builtin_ia32_fpclassps256_mask: 5470 case X86::BI__builtin_ia32_fpclassps512_mask: 5471 case X86::BI__builtin_ia32_fpclasspd512_mask: 5472 case X86::BI__builtin_ia32_fpclassph128_mask: 5473 case X86::BI__builtin_ia32_fpclassph256_mask: 5474 case X86::BI__builtin_ia32_fpclassph512_mask: 5475 case X86::BI__builtin_ia32_fpclasssd_mask: 5476 case X86::BI__builtin_ia32_fpclassss_mask: 5477 case X86::BI__builtin_ia32_fpclasssh_mask: 5478 case X86::BI__builtin_ia32_pslldqi128_byteshift: 5479 case X86::BI__builtin_ia32_pslldqi256_byteshift: 5480 case X86::BI__builtin_ia32_pslldqi512_byteshift: 5481 case X86::BI__builtin_ia32_psrldqi128_byteshift: 5482 case X86::BI__builtin_ia32_psrldqi256_byteshift: 5483 case X86::BI__builtin_ia32_psrldqi512_byteshift: 5484 case X86::BI__builtin_ia32_kshiftliqi: 5485 case X86::BI__builtin_ia32_kshiftlihi: 5486 case X86::BI__builtin_ia32_kshiftlisi: 5487 case X86::BI__builtin_ia32_kshiftlidi: 5488 case X86::BI__builtin_ia32_kshiftriqi: 5489 case X86::BI__builtin_ia32_kshiftrihi: 5490 case X86::BI__builtin_ia32_kshiftrisi: 5491 case X86::BI__builtin_ia32_kshiftridi: 5492 i = 1; l = 0; u = 255; 5493 break; 5494 case X86::BI__builtin_ia32_vperm2f128_pd256: 5495 case X86::BI__builtin_ia32_vperm2f128_ps256: 5496 case X86::BI__builtin_ia32_vperm2f128_si256: 5497 case X86::BI__builtin_ia32_permti256: 5498 case X86::BI__builtin_ia32_pblendw128: 5499 case X86::BI__builtin_ia32_pblendw256: 5500 case X86::BI__builtin_ia32_blendps256: 5501 case X86::BI__builtin_ia32_pblendd256: 5502 case X86::BI__builtin_ia32_palignr128: 5503 case X86::BI__builtin_ia32_palignr256: 5504 case X86::BI__builtin_ia32_palignr512: 5505 case X86::BI__builtin_ia32_alignq512: 5506 case X86::BI__builtin_ia32_alignd512: 5507 case X86::BI__builtin_ia32_alignd128: 5508 case X86::BI__builtin_ia32_alignd256: 5509 case X86::BI__builtin_ia32_alignq128: 5510 case X86::BI__builtin_ia32_alignq256: 5511 case X86::BI__builtin_ia32_vcomisd: 5512 case X86::BI__builtin_ia32_vcomiss: 5513 case X86::BI__builtin_ia32_shuf_f32x4: 5514 case X86::BI__builtin_ia32_shuf_f64x2: 5515 case X86::BI__builtin_ia32_shuf_i32x4: 5516 case X86::BI__builtin_ia32_shuf_i64x2: 5517 case X86::BI__builtin_ia32_shufpd512: 5518 case X86::BI__builtin_ia32_shufps: 5519 case X86::BI__builtin_ia32_shufps256: 5520 case X86::BI__builtin_ia32_shufps512: 5521 case X86::BI__builtin_ia32_dbpsadbw128: 5522 case X86::BI__builtin_ia32_dbpsadbw256: 5523 case X86::BI__builtin_ia32_dbpsadbw512: 5524 case X86::BI__builtin_ia32_vpshldd128: 5525 case X86::BI__builtin_ia32_vpshldd256: 5526 case X86::BI__builtin_ia32_vpshldd512: 5527 case X86::BI__builtin_ia32_vpshldq128: 5528 case X86::BI__builtin_ia32_vpshldq256: 5529 case X86::BI__builtin_ia32_vpshldq512: 5530 case X86::BI__builtin_ia32_vpshldw128: 5531 case X86::BI__builtin_ia32_vpshldw256: 5532 case X86::BI__builtin_ia32_vpshldw512: 5533 case X86::BI__builtin_ia32_vpshrdd128: 5534 case X86::BI__builtin_ia32_vpshrdd256: 5535 case X86::BI__builtin_ia32_vpshrdd512: 5536 case X86::BI__builtin_ia32_vpshrdq128: 5537 case X86::BI__builtin_ia32_vpshrdq256: 5538 case X86::BI__builtin_ia32_vpshrdq512: 5539 case X86::BI__builtin_ia32_vpshrdw128: 5540 case X86::BI__builtin_ia32_vpshrdw256: 5541 case X86::BI__builtin_ia32_vpshrdw512: 5542 i = 2; l = 0; u = 255; 5543 break; 5544 case X86::BI__builtin_ia32_fixupimmpd512_mask: 5545 case X86::BI__builtin_ia32_fixupimmpd512_maskz: 5546 case X86::BI__builtin_ia32_fixupimmps512_mask: 5547 case X86::BI__builtin_ia32_fixupimmps512_maskz: 5548 case X86::BI__builtin_ia32_fixupimmsd_mask: 5549 case X86::BI__builtin_ia32_fixupimmsd_maskz: 5550 case X86::BI__builtin_ia32_fixupimmss_mask: 5551 case X86::BI__builtin_ia32_fixupimmss_maskz: 5552 case X86::BI__builtin_ia32_fixupimmpd128_mask: 5553 case X86::BI__builtin_ia32_fixupimmpd128_maskz: 5554 case X86::BI__builtin_ia32_fixupimmpd256_mask: 5555 case X86::BI__builtin_ia32_fixupimmpd256_maskz: 5556 case X86::BI__builtin_ia32_fixupimmps128_mask: 5557 case X86::BI__builtin_ia32_fixupimmps128_maskz: 5558 case X86::BI__builtin_ia32_fixupimmps256_mask: 5559 case X86::BI__builtin_ia32_fixupimmps256_maskz: 5560 case X86::BI__builtin_ia32_pternlogd512_mask: 5561 case X86::BI__builtin_ia32_pternlogd512_maskz: 5562 case X86::BI__builtin_ia32_pternlogq512_mask: 5563 case X86::BI__builtin_ia32_pternlogq512_maskz: 5564 case X86::BI__builtin_ia32_pternlogd128_mask: 5565 case X86::BI__builtin_ia32_pternlogd128_maskz: 5566 case X86::BI__builtin_ia32_pternlogd256_mask: 5567 case X86::BI__builtin_ia32_pternlogd256_maskz: 5568 case X86::BI__builtin_ia32_pternlogq128_mask: 5569 case X86::BI__builtin_ia32_pternlogq128_maskz: 5570 case X86::BI__builtin_ia32_pternlogq256_mask: 5571 case X86::BI__builtin_ia32_pternlogq256_maskz: 5572 i = 3; l = 0; u = 255; 5573 break; 5574 case X86::BI__builtin_ia32_gatherpfdpd: 5575 case X86::BI__builtin_ia32_gatherpfdps: 5576 case X86::BI__builtin_ia32_gatherpfqpd: 5577 case X86::BI__builtin_ia32_gatherpfqps: 5578 case X86::BI__builtin_ia32_scatterpfdpd: 5579 case X86::BI__builtin_ia32_scatterpfdps: 5580 case X86::BI__builtin_ia32_scatterpfqpd: 5581 case X86::BI__builtin_ia32_scatterpfqps: 5582 i = 4; l = 2; u = 3; 5583 break; 5584 case X86::BI__builtin_ia32_reducesd_mask: 5585 case X86::BI__builtin_ia32_reducess_mask: 5586 case X86::BI__builtin_ia32_rndscalesd_round_mask: 5587 case X86::BI__builtin_ia32_rndscaless_round_mask: 5588 case X86::BI__builtin_ia32_rndscalesh_round_mask: 5589 case X86::BI__builtin_ia32_reducesh_mask: 5590 i = 4; l = 0; u = 255; 5591 break; 5592 case X86::BI__builtin_ia32_cmpccxadd32: 5593 case X86::BI__builtin_ia32_cmpccxadd64: 5594 i = 3; l = 0; u = 15; 5595 break; 5596 } 5597 5598 // Note that we don't force a hard error on the range check here, allowing 5599 // template-generated or macro-generated dead code to potentially have out-of- 5600 // range values. These need to code generate, but don't need to necessarily 5601 // make any sense. We use a warning that defaults to an error. 5602 return SemaBuiltinConstantArgRange(TheCall, i, l, u, /*RangeIsError*/ false); 5603} 5604 5605/// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo 5606/// parameter with the FormatAttr's correct format_idx and firstDataArg. 5607/// Returns true when the format fits the function and the FormatStringInfo has 5608/// been populated. 5609bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, 5610 bool IsVariadic, FormatStringInfo *FSI) { 5611 if (Format->getFirstArg() == 0) 5612 FSI->ArgPassingKind = FAPK_VAList; 5613 else if (IsVariadic) 5614 FSI->ArgPassingKind = FAPK_Variadic; 5615 else 5616 FSI->ArgPassingKind = FAPK_Fixed; 5617 FSI->FormatIdx = Format->getFormatIdx() - 1; 5618 FSI->FirstDataArg = 5619 FSI->ArgPassingKind == FAPK_VAList ? 0 : Format->getFirstArg() - 1; 5620 5621 // The way the format attribute works in GCC, the implicit this argument 5622 // of member functions is counted. However, it doesn't appear in our own 5623 // lists, so decrement format_idx in that case. 5624 if (IsCXXMember) { 5625 if(FSI->FormatIdx == 0) 5626 return false; 5627 --FSI->FormatIdx; 5628 if (FSI->FirstDataArg != 0) 5629 --FSI->FirstDataArg; 5630 } 5631 return true; 5632} 5633 5634/// Checks if a the given expression evaluates to null. 5635/// 5636/// Returns true if the value evaluates to null. 5637static bool CheckNonNullExpr(Sema &S, const Expr *Expr) { 5638 // If the expression has non-null type, it doesn't evaluate to null. 5639 if (auto nullability = Expr->IgnoreImplicit()->getType()->getNullability()) { 5640 if (*nullability == NullabilityKind::NonNull) 5641 return false; 5642 } 5643 5644 // As a special case, transparent unions initialized with zero are 5645 // considered null for the purposes of the nonnull attribute. 5646 if (const RecordType *UT = Expr->getType()->getAsUnionType()) { 5647 if (UT->getDecl()->hasAttr<TransparentUnionAttr>()) 5648 if (const CompoundLiteralExpr *CLE = 5649 dyn_cast<CompoundLiteralExpr>(Expr)) 5650 if (const InitListExpr *ILE = 5651 dyn_cast<InitListExpr>(CLE->getInitializer())) 5652 Expr = ILE->getInit(0); 5653 } 5654 5655 bool Result; 5656 return (!Expr->isValueDependent() && 5657 Expr->EvaluateAsBooleanCondition(Result, S.Context) && 5658 !Result); 5659} 5660 5661static void CheckNonNullArgument(Sema &S, 5662 const Expr *ArgExpr, 5663 SourceLocation CallSiteLoc) { 5664 if (CheckNonNullExpr(S, ArgExpr)) 5665 S.DiagRuntimeBehavior(CallSiteLoc, ArgExpr, 5666 S.PDiag(diag::warn_null_arg) 5667 << ArgExpr->getSourceRange()); 5668} 5669 5670bool Sema::GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx) { 5671 FormatStringInfo FSI; 5672 if ((GetFormatStringType(Format) == FST_NSString) && 5673 getFormatStringInfo(Format, false, true, &FSI)) { 5674 Idx = FSI.FormatIdx; 5675 return true; 5676 } 5677 return false; 5678} 5679 5680/// Diagnose use of %s directive in an NSString which is being passed 5681/// as formatting string to formatting method. 5682static void 5683DiagnoseCStringFormatDirectiveInCFAPI(Sema &S, 5684 const NamedDecl *FDecl, 5685 Expr **Args, 5686 unsigned NumArgs) { 5687 unsigned Idx = 0; 5688 bool Format = false; 5689 ObjCStringFormatFamily SFFamily = FDecl->getObjCFStringFormattingFamily(); 5690 if (SFFamily == ObjCStringFormatFamily::SFF_CFString) { 5691 Idx = 2; 5692 Format = true; 5693 } 5694 else 5695 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 5696 if (S.GetFormatNSStringIdx(I, Idx)) { 5697 Format = true; 5698 break; 5699 } 5700 } 5701 if (!Format || NumArgs <= Idx) 5702 return; 5703 const Expr *FormatExpr = Args[Idx]; 5704 if (const CStyleCastExpr *CSCE = dyn_cast<CStyleCastExpr>(FormatExpr)) 5705 FormatExpr = CSCE->getSubExpr(); 5706 const StringLiteral *FormatString; 5707 if (const ObjCStringLiteral *OSL = 5708 dyn_cast<ObjCStringLiteral>(FormatExpr->IgnoreParenImpCasts())) 5709 FormatString = OSL->getString(); 5710 else 5711 FormatString = dyn_cast<StringLiteral>(FormatExpr->IgnoreParenImpCasts()); 5712 if (!FormatString) 5713 return; 5714 if (S.FormatStringHasSArg(FormatString)) { 5715 S.Diag(FormatExpr->getExprLoc(), diag::warn_objc_cdirective_format_string) 5716 << "%s" << 1 << 1; 5717 S.Diag(FDecl->getLocation(), diag::note_entity_declared_at) 5718 << FDecl->getDeclName(); 5719 } 5720} 5721 5722/// Determine whether the given type has a non-null nullability annotation. 5723static bool isNonNullType(QualType type) { 5724 if (auto nullability = type->getNullability()) 5725 return *nullability == NullabilityKind::NonNull; 5726 5727 return false; 5728} 5729 5730static void CheckNonNullArguments(Sema &S, 5731 const NamedDecl *FDecl, 5732 const FunctionProtoType *Proto, 5733 ArrayRef<const Expr *> Args, 5734 SourceLocation CallSiteLoc) { 5735 assert((FDecl || Proto) && "Need a function declaration or prototype"); 5736 5737 // Already checked by constant evaluator. 5738 if (S.isConstantEvaluated()) 5739 return; 5740 // Check the attributes attached to the method/function itself. 5741 llvm::SmallBitVector NonNullArgs; 5742 if (FDecl) { 5743 // Handle the nonnull attribute on the function/method declaration itself. 5744 for (const auto *NonNull : FDecl->specific_attrs<NonNullAttr>()) { 5745 if (!NonNull->args_size()) { 5746 // Easy case: all pointer arguments are nonnull. 5747 for (const auto *Arg : Args) 5748 if (S.isValidPointerAttrType(Arg->getType())) 5749 CheckNonNullArgument(S, Arg, CallSiteLoc); 5750 return; 5751 } 5752 5753 for (const ParamIdx &Idx : NonNull->args()) { 5754 unsigned IdxAST = Idx.getASTIndex(); 5755 if (IdxAST >= Args.size()) 5756 continue; 5757 if (NonNullArgs.empty()) 5758 NonNullArgs.resize(Args.size()); 5759 NonNullArgs.set(IdxAST); 5760 } 5761 } 5762 } 5763 5764 if (FDecl && (isa<FunctionDecl>(FDecl) || isa<ObjCMethodDecl>(FDecl))) { 5765 // Handle the nonnull attribute on the parameters of the 5766 // function/method. 5767 ArrayRef<ParmVarDecl*> parms; 5768 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(FDecl)) 5769 parms = FD->parameters(); 5770 else 5771 parms = cast<ObjCMethodDecl>(FDecl)->parameters(); 5772 5773 unsigned ParamIndex = 0; 5774 for (ArrayRef<ParmVarDecl*>::iterator I = parms.begin(), E = parms.end(); 5775 I != E; ++I, ++ParamIndex) { 5776 const ParmVarDecl *PVD = *I; 5777 if (PVD->hasAttr<NonNullAttr>() || isNonNullType(PVD->getType())) { 5778 if (NonNullArgs.empty()) 5779 NonNullArgs.resize(Args.size()); 5780 5781 NonNullArgs.set(ParamIndex); 5782 } 5783 } 5784 } else { 5785 // If we have a non-function, non-method declaration but no 5786 // function prototype, try to dig out the function prototype. 5787 if (!Proto) { 5788 if (const ValueDecl *VD = dyn_cast<ValueDecl>(FDecl)) { 5789 QualType type = VD->getType().getNonReferenceType(); 5790 if (auto pointerType = type->getAs<PointerType>()) 5791 type = pointerType->getPointeeType(); 5792 else if (auto blockType = type->getAs<BlockPointerType>()) 5793 type = blockType->getPointeeType(); 5794 // FIXME: data member pointers? 5795 5796 // Dig out the function prototype, if there is one. 5797 Proto = type->getAs<FunctionProtoType>(); 5798 } 5799 } 5800 5801 // Fill in non-null argument information from the nullability 5802 // information on the parameter types (if we have them). 5803 if (Proto) { 5804 unsigned Index = 0; 5805 for (auto paramType : Proto->getParamTypes()) { 5806 if (isNonNullType(paramType)) { 5807 if (NonNullArgs.empty()) 5808 NonNullArgs.resize(Args.size()); 5809 5810 NonNullArgs.set(Index); 5811 } 5812 5813 ++Index; 5814 } 5815 } 5816 } 5817 5818 // Check for non-null arguments. 5819 for (unsigned ArgIndex = 0, ArgIndexEnd = NonNullArgs.size(); 5820 ArgIndex != ArgIndexEnd; ++ArgIndex) { 5821 if (NonNullArgs[ArgIndex]) 5822 CheckNonNullArgument(S, Args[ArgIndex], Args[ArgIndex]->getExprLoc()); 5823 } 5824} 5825 5826// 16 byte ByVal alignment not due to a vector member is not honoured by XL 5827// on AIX. Emit a warning here that users are generating binary incompatible 5828// code to be safe. 5829// Here we try to get information about the alignment of the struct member 5830// from the struct passed to the caller function. We only warn when the struct 5831// is passed byval, hence the series of checks and early returns if we are a not 5832// passing a struct byval. 5833void Sema::checkAIXMemberAlignment(SourceLocation Loc, const Expr *Arg) { 5834 const auto *ICE = dyn_cast<ImplicitCastExpr>(Arg->IgnoreParens()); 5835 if (!ICE) 5836 return; 5837 5838 const auto *DR = dyn_cast<DeclRefExpr>(ICE->getSubExpr()); 5839 if (!DR) 5840 return; 5841 5842 const auto *PD = dyn_cast<ParmVarDecl>(DR->getDecl()); 5843 if (!PD || !PD->getType()->isRecordType()) 5844 return; 5845 5846 QualType ArgType = Arg->getType(); 5847 for (const FieldDecl *FD : 5848 ArgType->castAs<RecordType>()->getDecl()->fields()) { 5849 if (const auto *AA = FD->getAttr<AlignedAttr>()) { 5850 CharUnits Alignment = 5851 Context.toCharUnitsFromBits(AA->getAlignment(Context)); 5852 if (Alignment.getQuantity() == 16) { 5853 Diag(FD->getLocation(), diag::warn_not_xl_compatible) << FD; 5854 Diag(Loc, diag::note_misaligned_member_used_here) << PD; 5855 } 5856 } 5857 } 5858} 5859 5860/// Warn if a pointer or reference argument passed to a function points to an 5861/// object that is less aligned than the parameter. This can happen when 5862/// creating a typedef with a lower alignment than the original type and then 5863/// calling functions defined in terms of the original type. 5864void Sema::CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl, 5865 StringRef ParamName, QualType ArgTy, 5866 QualType ParamTy) { 5867 5868 // If a function accepts a pointer or reference type 5869 if (!ParamTy->isPointerType() && !ParamTy->isReferenceType()) 5870 return; 5871 5872 // If the parameter is a pointer type, get the pointee type for the 5873 // argument too. If the parameter is a reference type, don't try to get 5874 // the pointee type for the argument. 5875 if (ParamTy->isPointerType()) 5876 ArgTy = ArgTy->getPointeeType(); 5877 5878 // Remove reference or pointer 5879 ParamTy = ParamTy->getPointeeType(); 5880 5881 // Find expected alignment, and the actual alignment of the passed object. 5882 // getTypeAlignInChars requires complete types 5883 if (ArgTy.isNull() || ParamTy->isDependentType() || 5884 ParamTy->isIncompleteType() || ArgTy->isIncompleteType() || 5885 ParamTy->isUndeducedType() || ArgTy->isUndeducedType()) 5886 return; 5887 5888 CharUnits ParamAlign = Context.getTypeAlignInChars(ParamTy); 5889 CharUnits ArgAlign = Context.getTypeAlignInChars(ArgTy); 5890 5891 // If the argument is less aligned than the parameter, there is a 5892 // potential alignment issue. 5893 if (ArgAlign < ParamAlign) 5894 Diag(Loc, diag::warn_param_mismatched_alignment) 5895 << (int)ArgAlign.getQuantity() << (int)ParamAlign.getQuantity() 5896 << ParamName << (FDecl != nullptr) << FDecl; 5897} 5898 5899/// Handles the checks for format strings, non-POD arguments to vararg 5900/// functions, NULL arguments passed to non-NULL parameters, and diagnose_if 5901/// attributes. 5902void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, 5903 const Expr *ThisArg, ArrayRef<const Expr *> Args, 5904 bool IsMemberFunction, SourceLocation Loc, 5905 SourceRange Range, VariadicCallType CallType) { 5906 // FIXME: We should check as much as we can in the template definition. 5907 if (CurContext->isDependentContext()) 5908 return; 5909 5910 // Printf and scanf checking. 5911 llvm::SmallBitVector CheckedVarArgs; 5912 if (FDecl) { 5913 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { 5914 // Only create vector if there are format attributes. 5915 CheckedVarArgs.resize(Args.size()); 5916 5917 CheckFormatArguments(I, Args, IsMemberFunction, CallType, Loc, Range, 5918 CheckedVarArgs); 5919 } 5920 } 5921 5922 // Refuse POD arguments that weren't caught by the format string 5923 // checks above. 5924 auto *FD = dyn_cast_or_null<FunctionDecl>(FDecl); 5925 if (CallType != VariadicDoesNotApply && 5926 (!FD || FD->getBuiltinID() != Builtin::BI__noop)) { 5927 unsigned NumParams = Proto ? Proto->getNumParams() 5928 : FDecl && isa<FunctionDecl>(FDecl) 5929 ? cast<FunctionDecl>(FDecl)->getNumParams() 5930 : FDecl && isa<ObjCMethodDecl>(FDecl) 5931 ? cast<ObjCMethodDecl>(FDecl)->param_size() 5932 : 0; 5933 5934 for (unsigned ArgIdx = NumParams; ArgIdx < Args.size(); ++ArgIdx) { 5935 // Args[ArgIdx] can be null in malformed code. 5936 if (const Expr *Arg = Args[ArgIdx]) { 5937 if (CheckedVarArgs.empty() || !CheckedVarArgs[ArgIdx]) 5938 checkVariadicArgument(Arg, CallType); 5939 } 5940 } 5941 } 5942 5943 if (FDecl || Proto) { 5944 CheckNonNullArguments(*this, FDecl, Proto, Args, Loc); 5945 5946 // Type safety checking. 5947 if (FDecl) { 5948 for (const auto *I : FDecl->specific_attrs<ArgumentWithTypeTagAttr>()) 5949 CheckArgumentWithTypeTag(I, Args, Loc); 5950 } 5951 } 5952 5953 // Check that passed arguments match the alignment of original arguments. 5954 // Try to get the missing prototype from the declaration. 5955 if (!Proto && FDecl) { 5956 const auto *FT = FDecl->getFunctionType(); 5957 if (isa_and_nonnull<FunctionProtoType>(FT)) 5958 Proto = cast<FunctionProtoType>(FDecl->getFunctionType()); 5959 } 5960 if (Proto) { 5961 // For variadic functions, we may have more args than parameters. 5962 // For some K&R functions, we may have less args than parameters. 5963 const auto N = std::min<unsigned>(Proto->getNumParams(), Args.size()); 5964 for (unsigned ArgIdx = 0; ArgIdx < N; ++ArgIdx) { 5965 // Args[ArgIdx] can be null in malformed code. 5966 if (const Expr *Arg = Args[ArgIdx]) { 5967 if (Arg->containsErrors()) 5968 continue; 5969 5970 if (Context.getTargetInfo().getTriple().isOSAIX() && FDecl && Arg && 5971 FDecl->hasLinkage() && 5972 FDecl->getFormalLinkage() != InternalLinkage && 5973 CallType == VariadicDoesNotApply) 5974 checkAIXMemberAlignment((Arg->getExprLoc()), Arg); 5975 5976 QualType ParamTy = Proto->getParamType(ArgIdx); 5977 QualType ArgTy = Arg->getType(); 5978 CheckArgAlignment(Arg->getExprLoc(), FDecl, std::to_string(ArgIdx + 1), 5979 ArgTy, ParamTy); 5980 } 5981 } 5982 } 5983 5984 if (FDecl && FDecl->hasAttr<AllocAlignAttr>()) { 5985 auto *AA = FDecl->getAttr<AllocAlignAttr>(); 5986 const Expr *Arg = Args[AA->getParamIndex().getASTIndex()]; 5987 if (!Arg->isValueDependent()) { 5988 Expr::EvalResult Align; 5989 if (Arg->EvaluateAsInt(Align, Context)) { 5990 const llvm::APSInt &I = Align.Val.getInt(); 5991 if (!I.isPowerOf2()) 5992 Diag(Arg->getExprLoc(), diag::warn_alignment_not_power_of_two) 5993 << Arg->getSourceRange(); 5994 5995 if (I > Sema::MaximumAlignment) 5996 Diag(Arg->getExprLoc(), diag::warn_assume_aligned_too_great) 5997 << Arg->getSourceRange() << Sema::MaximumAlignment; 5998 } 5999 } 6000 } 6001 6002 if (FD) 6003 diagnoseArgDependentDiagnoseIfAttrs(FD, ThisArg, Args, Loc); 6004} 6005 6006/// CheckConstructorCall - Check a constructor call for correctness and safety 6007/// properties not enforced by the C type system. 6008void Sema::CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType, 6009 ArrayRef<const Expr *> Args, 6010 const FunctionProtoType *Proto, 6011 SourceLocation Loc) { 6012 VariadicCallType CallType = 6013 Proto->isVariadic() ? VariadicConstructor : VariadicDoesNotApply; 6014 6015 auto *Ctor = cast<CXXConstructorDecl>(FDecl); 6016 CheckArgAlignment(Loc, FDecl, "'this'", Context.getPointerType(ThisType), 6017 Context.getPointerType(Ctor->getThisObjectType())); 6018 6019 checkCall(FDecl, Proto, /*ThisArg=*/nullptr, Args, /*IsMemberFunction=*/true, 6020 Loc, SourceRange(), CallType); 6021} 6022 6023/// CheckFunctionCall - Check a direct function call for various correctness 6024/// and safety properties not strictly enforced by the C type system. 6025bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, 6026 const FunctionProtoType *Proto) { 6027 bool IsMemberOperatorCall = isa<CXXOperatorCallExpr>(TheCall) && 6028 isa<CXXMethodDecl>(FDecl); 6029 bool IsMemberFunction = isa<CXXMemberCallExpr>(TheCall) || 6030 IsMemberOperatorCall; 6031 VariadicCallType CallType = getVariadicCallType(FDecl, Proto, 6032 TheCall->getCallee()); 6033 Expr** Args = TheCall->getArgs(); 6034 unsigned NumArgs = TheCall->getNumArgs(); 6035 6036 Expr *ImplicitThis = nullptr; 6037 if (IsMemberOperatorCall && !FDecl->isStatic()) { 6038 // If this is a call to a non-static member operator, hide the first 6039 // argument from checkCall. 6040 // FIXME: Our choice of AST representation here is less than ideal. 6041 ImplicitThis = Args[0]; 6042 ++Args; 6043 --NumArgs; 6044 } else if (IsMemberFunction && !FDecl->isStatic()) 6045 ImplicitThis = 6046 cast<CXXMemberCallExpr>(TheCall)->getImplicitObjectArgument(); 6047 6048 if (ImplicitThis) { 6049 // ImplicitThis may or may not be a pointer, depending on whether . or -> is 6050 // used. 6051 QualType ThisType = ImplicitThis->getType(); 6052 if (!ThisType->isPointerType()) { 6053 assert(!ThisType->isReferenceType()); 6054 ThisType = Context.getPointerType(ThisType); 6055 } 6056 6057 QualType ThisTypeFromDecl = 6058 Context.getPointerType(cast<CXXMethodDecl>(FDecl)->getThisObjectType()); 6059 6060 CheckArgAlignment(TheCall->getRParenLoc(), FDecl, "'this'", ThisType, 6061 ThisTypeFromDecl); 6062 } 6063 6064 checkCall(FDecl, Proto, ImplicitThis, llvm::ArrayRef(Args, NumArgs), 6065 IsMemberFunction, TheCall->getRParenLoc(), 6066 TheCall->getCallee()->getSourceRange(), CallType); 6067 6068 IdentifierInfo *FnInfo = FDecl->getIdentifier(); 6069 // None of the checks below are needed for functions that don't have 6070 // simple names (e.g., C++ conversion functions). 6071 if (!FnInfo) 6072 return false; 6073 6074 // Enforce TCB except for builtin calls, which are always allowed. 6075 if (FDecl->getBuiltinID() == 0) 6076 CheckTCBEnforcement(TheCall->getExprLoc(), FDecl); 6077 6078 CheckAbsoluteValueFunction(TheCall, FDecl); 6079 CheckMaxUnsignedZero(TheCall, FDecl); 6080 6081 if (getLangOpts().ObjC) 6082 DiagnoseCStringFormatDirectiveInCFAPI(*this, FDecl, Args, NumArgs); 6083 6084 unsigned CMId = FDecl->getMemoryFunctionKind(); 6085 6086 // Handle memory setting and copying functions. 6087 switch (CMId) { 6088 case 0: 6089 return false; 6090 case Builtin::BIstrlcpy: // fallthrough 6091 case Builtin::BIstrlcat: 6092 CheckStrlcpycatArguments(TheCall, FnInfo); 6093 break; 6094 case Builtin::BIstrncat: 6095 CheckStrncatArguments(TheCall, FnInfo); 6096 break; 6097 case Builtin::BIfree: 6098 CheckFreeArguments(TheCall); 6099 break; 6100 default: 6101 CheckMemaccessArguments(TheCall, CMId, FnInfo); 6102 } 6103 6104 return false; 6105} 6106 6107bool Sema::CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation lbrac, 6108 ArrayRef<const Expr *> Args) { 6109 VariadicCallType CallType = 6110 Method->isVariadic() ? VariadicMethod : VariadicDoesNotApply; 6111 6112 checkCall(Method, nullptr, /*ThisArg=*/nullptr, Args, 6113 /*IsMemberFunction=*/false, lbrac, Method->getSourceRange(), 6114 CallType); 6115 6116 CheckTCBEnforcement(lbrac, Method); 6117 6118 return false; 6119} 6120 6121bool Sema::CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, 6122 const FunctionProtoType *Proto) { 6123 QualType Ty; 6124 if (const auto *V = dyn_cast<VarDecl>(NDecl)) 6125 Ty = V->getType().getNonReferenceType(); 6126 else if (const auto *F = dyn_cast<FieldDecl>(NDecl)) 6127 Ty = F->getType().getNonReferenceType(); 6128 else 6129 return false; 6130 6131 if (!Ty->isBlockPointerType() && !Ty->isFunctionPointerType() && 6132 !Ty->isFunctionProtoType()) 6133 return false; 6134 6135 VariadicCallType CallType; 6136 if (!Proto || !Proto->isVariadic()) { 6137 CallType = VariadicDoesNotApply; 6138 } else if (Ty->isBlockPointerType()) { 6139 CallType = VariadicBlock; 6140 } else { // Ty->isFunctionPointerType() 6141 CallType = VariadicFunction; 6142 } 6143 6144 checkCall(NDecl, Proto, /*ThisArg=*/nullptr, 6145 llvm::ArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 6146 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 6147 TheCall->getCallee()->getSourceRange(), CallType); 6148 6149 return false; 6150} 6151 6152/// Checks function calls when a FunctionDecl or a NamedDecl is not available, 6153/// such as function pointers returned from functions. 6154bool Sema::CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto) { 6155 VariadicCallType CallType = getVariadicCallType(/*FDecl=*/nullptr, Proto, 6156 TheCall->getCallee()); 6157 checkCall(/*FDecl=*/nullptr, Proto, /*ThisArg=*/nullptr, 6158 llvm::ArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), 6159 /*IsMemberFunction=*/false, TheCall->getRParenLoc(), 6160 TheCall->getCallee()->getSourceRange(), CallType); 6161 6162 return false; 6163} 6164 6165static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) { 6166 if (!llvm::isValidAtomicOrderingCABI(Ordering)) 6167 return false; 6168 6169 auto OrderingCABI = (llvm::AtomicOrderingCABI)Ordering; 6170 switch (Op) { 6171 case AtomicExpr::AO__c11_atomic_init: 6172 case AtomicExpr::AO__opencl_atomic_init: 6173 llvm_unreachable("There is no ordering argument for an init"); 6174 6175 case AtomicExpr::AO__c11_atomic_load: 6176 case AtomicExpr::AO__opencl_atomic_load: 6177 case AtomicExpr::AO__hip_atomic_load: 6178 case AtomicExpr::AO__atomic_load_n: 6179 case AtomicExpr::AO__atomic_load: 6180 return OrderingCABI != llvm::AtomicOrderingCABI::release && 6181 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 6182 6183 case AtomicExpr::AO__c11_atomic_store: 6184 case AtomicExpr::AO__opencl_atomic_store: 6185 case AtomicExpr::AO__hip_atomic_store: 6186 case AtomicExpr::AO__atomic_store: 6187 case AtomicExpr::AO__atomic_store_n: 6188 return OrderingCABI != llvm::AtomicOrderingCABI::consume && 6189 OrderingCABI != llvm::AtomicOrderingCABI::acquire && 6190 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; 6191 6192 default: 6193 return true; 6194 } 6195} 6196 6197ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult, 6198 AtomicExpr::AtomicOp Op) { 6199 CallExpr *TheCall = cast<CallExpr>(TheCallResult.get()); 6200 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 6201 MultiExprArg Args{TheCall->getArgs(), TheCall->getNumArgs()}; 6202 return BuildAtomicExpr({TheCall->getBeginLoc(), TheCall->getEndLoc()}, 6203 DRE->getSourceRange(), TheCall->getRParenLoc(), Args, 6204 Op); 6205} 6206 6207ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, 6208 SourceLocation RParenLoc, MultiExprArg Args, 6209 AtomicExpr::AtomicOp Op, 6210 AtomicArgumentOrder ArgOrder) { 6211 // All the non-OpenCL operations take one of the following forms. 6212 // The OpenCL operations take the __c11 forms with one extra argument for 6213 // synchronization scope. 6214 enum { 6215 // C __c11_atomic_init(A *, C) 6216 Init, 6217 6218 // C __c11_atomic_load(A *, int) 6219 Load, 6220 6221 // void __atomic_load(A *, CP, int) 6222 LoadCopy, 6223 6224 // void __atomic_store(A *, CP, int) 6225 Copy, 6226 6227 // C __c11_atomic_add(A *, M, int) 6228 Arithmetic, 6229 6230 // C __atomic_exchange_n(A *, CP, int) 6231 Xchg, 6232 6233 // void __atomic_exchange(A *, C *, CP, int) 6234 GNUXchg, 6235 6236 // bool __c11_atomic_compare_exchange_strong(A *, C *, CP, int, int) 6237 C11CmpXchg, 6238 6239 // bool __atomic_compare_exchange(A *, C *, CP, bool, int, int) 6240 GNUCmpXchg 6241 } Form = Init; 6242 6243 const unsigned NumForm = GNUCmpXchg + 1; 6244 const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 3, 4, 5, 6 }; 6245 const unsigned NumVals[] = { 1, 0, 1, 1, 1, 1, 2, 2, 3 }; 6246 // where: 6247 // C is an appropriate type, 6248 // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins, 6249 // CP is C for __c11 builtins and GNU _n builtins and is C * otherwise, 6250 // M is C if C is an integer, and ptrdiff_t if C is a pointer, and 6251 // the int parameters are for orderings. 6252 6253 static_assert(sizeof(NumArgs)/sizeof(NumArgs[0]) == NumForm 6254 && sizeof(NumVals)/sizeof(NumVals[0]) == NumForm, 6255 "need to update code for modified forms"); 6256 static_assert(AtomicExpr::AO__c11_atomic_init == 0 && 6257 AtomicExpr::AO__c11_atomic_fetch_min + 1 == 6258 AtomicExpr::AO__atomic_load, 6259 "need to update code for modified C11 atomics"); 6260 bool IsOpenCL = Op >= AtomicExpr::AO__opencl_atomic_init && 6261 Op <= AtomicExpr::AO__opencl_atomic_fetch_max; 6262 bool IsHIP = Op >= AtomicExpr::AO__hip_atomic_load && 6263 Op <= AtomicExpr::AO__hip_atomic_fetch_max; 6264 bool IsC11 = (Op >= AtomicExpr::AO__c11_atomic_init && 6265 Op <= AtomicExpr::AO__c11_atomic_fetch_min) || 6266 IsOpenCL; 6267 bool IsN = Op == AtomicExpr::AO__atomic_load_n || 6268 Op == AtomicExpr::AO__atomic_store_n || 6269 Op == AtomicExpr::AO__atomic_exchange_n || 6270 Op == AtomicExpr::AO__atomic_compare_exchange_n; 6271 bool IsAddSub = false; 6272 6273 switch (Op) { 6274 case AtomicExpr::AO__c11_atomic_init: 6275 case AtomicExpr::AO__opencl_atomic_init: 6276 Form = Init; 6277 break; 6278 6279 case AtomicExpr::AO__c11_atomic_load: 6280 case AtomicExpr::AO__opencl_atomic_load: 6281 case AtomicExpr::AO__hip_atomic_load: 6282 case AtomicExpr::AO__atomic_load_n: 6283 Form = Load; 6284 break; 6285 6286 case AtomicExpr::AO__atomic_load: 6287 Form = LoadCopy; 6288 break; 6289 6290 case AtomicExpr::AO__c11_atomic_store: 6291 case AtomicExpr::AO__opencl_atomic_store: 6292 case AtomicExpr::AO__hip_atomic_store: 6293 case AtomicExpr::AO__atomic_store: 6294 case AtomicExpr::AO__atomic_store_n: 6295 Form = Copy; 6296 break; 6297 case AtomicExpr::AO__hip_atomic_fetch_add: 6298 case AtomicExpr::AO__hip_atomic_fetch_min: 6299 case AtomicExpr::AO__hip_atomic_fetch_max: 6300 case AtomicExpr::AO__c11_atomic_fetch_add: 6301 case AtomicExpr::AO__c11_atomic_fetch_sub: 6302 case AtomicExpr::AO__opencl_atomic_fetch_add: 6303 case AtomicExpr::AO__opencl_atomic_fetch_sub: 6304 case AtomicExpr::AO__atomic_fetch_add: 6305 case AtomicExpr::AO__atomic_fetch_sub: 6306 case AtomicExpr::AO__atomic_add_fetch: 6307 case AtomicExpr::AO__atomic_sub_fetch: 6308 IsAddSub = true; 6309 Form = Arithmetic; 6310 break; 6311 case AtomicExpr::AO__c11_atomic_fetch_and: 6312 case AtomicExpr::AO__c11_atomic_fetch_or: 6313 case AtomicExpr::AO__c11_atomic_fetch_xor: 6314 case AtomicExpr::AO__hip_atomic_fetch_and: 6315 case AtomicExpr::AO__hip_atomic_fetch_or: 6316 case AtomicExpr::AO__hip_atomic_fetch_xor: 6317 case AtomicExpr::AO__c11_atomic_fetch_nand: 6318 case AtomicExpr::AO__opencl_atomic_fetch_and: 6319 case AtomicExpr::AO__opencl_atomic_fetch_or: 6320 case AtomicExpr::AO__opencl_atomic_fetch_xor: 6321 case AtomicExpr::AO__atomic_fetch_and: 6322 case AtomicExpr::AO__atomic_fetch_or: 6323 case AtomicExpr::AO__atomic_fetch_xor: 6324 case AtomicExpr::AO__atomic_fetch_nand: 6325 case AtomicExpr::AO__atomic_and_fetch: 6326 case AtomicExpr::AO__atomic_or_fetch: 6327 case AtomicExpr::AO__atomic_xor_fetch: 6328 case AtomicExpr::AO__atomic_nand_fetch: 6329 Form = Arithmetic; 6330 break; 6331 case AtomicExpr::AO__c11_atomic_fetch_min: 6332 case AtomicExpr::AO__c11_atomic_fetch_max: 6333 case AtomicExpr::AO__opencl_atomic_fetch_min: 6334 case AtomicExpr::AO__opencl_atomic_fetch_max: 6335 case AtomicExpr::AO__atomic_min_fetch: 6336 case AtomicExpr::AO__atomic_max_fetch: 6337 case AtomicExpr::AO__atomic_fetch_min: 6338 case AtomicExpr::AO__atomic_fetch_max: 6339 Form = Arithmetic; 6340 break; 6341 6342 case AtomicExpr::AO__c11_atomic_exchange: 6343 case AtomicExpr::AO__hip_atomic_exchange: 6344 case AtomicExpr::AO__opencl_atomic_exchange: 6345 case AtomicExpr::AO__atomic_exchange_n: 6346 Form = Xchg; 6347 break; 6348 6349 case AtomicExpr::AO__atomic_exchange: 6350 Form = GNUXchg; 6351 break; 6352 6353 case AtomicExpr::AO__c11_atomic_compare_exchange_strong: 6354 case AtomicExpr::AO__c11_atomic_compare_exchange_weak: 6355 case AtomicExpr::AO__hip_atomic_compare_exchange_strong: 6356 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong: 6357 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak: 6358 case AtomicExpr::AO__hip_atomic_compare_exchange_weak: 6359 Form = C11CmpXchg; 6360 break; 6361 6362 case AtomicExpr::AO__atomic_compare_exchange: 6363 case AtomicExpr::AO__atomic_compare_exchange_n: 6364 Form = GNUCmpXchg; 6365 break; 6366 } 6367 6368 unsigned AdjustedNumArgs = NumArgs[Form]; 6369 if ((IsOpenCL || IsHIP) && Op != AtomicExpr::AO__opencl_atomic_init) 6370 ++AdjustedNumArgs; 6371 // Check we have the right number of arguments. 6372 if (Args.size() < AdjustedNumArgs) { 6373 Diag(CallRange.getEnd(), diag::err_typecheck_call_too_few_args) 6374 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size()) 6375 << ExprRange; 6376 return ExprError(); 6377 } else if (Args.size() > AdjustedNumArgs) { 6378 Diag(Args[AdjustedNumArgs]->getBeginLoc(), 6379 diag::err_typecheck_call_too_many_args) 6380 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size()) 6381 << ExprRange; 6382 return ExprError(); 6383 } 6384 6385 // Inspect the first argument of the atomic operation. 6386 Expr *Ptr = Args[0]; 6387 ExprResult ConvertedPtr = DefaultFunctionArrayLvalueConversion(Ptr); 6388 if (ConvertedPtr.isInvalid()) 6389 return ExprError(); 6390 6391 Ptr = ConvertedPtr.get(); 6392 const PointerType *pointerType = Ptr->getType()->getAs<PointerType>(); 6393 if (!pointerType) { 6394 Diag(ExprRange.getBegin(), diag::err_atomic_builtin_must_be_pointer) 6395 << Ptr->getType() << Ptr->getSourceRange(); 6396 return ExprError(); 6397 } 6398 6399 // For a __c11 builtin, this should be a pointer to an _Atomic type. 6400 QualType AtomTy = pointerType->getPointeeType(); // 'A' 6401 QualType ValType = AtomTy; // 'C' 6402 if (IsC11) { 6403 if (!AtomTy->isAtomicType()) { 6404 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic) 6405 << Ptr->getType() << Ptr->getSourceRange(); 6406 return ExprError(); 6407 } 6408 if ((Form != Load && Form != LoadCopy && AtomTy.isConstQualified()) || 6409 AtomTy.getAddressSpace() == LangAS::opencl_constant) { 6410 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_atomic) 6411 << (AtomTy.isConstQualified() ? 0 : 1) << Ptr->getType() 6412 << Ptr->getSourceRange(); 6413 return ExprError(); 6414 } 6415 ValType = AtomTy->castAs<AtomicType>()->getValueType(); 6416 } else if (Form != Load && Form != LoadCopy) { 6417 if (ValType.isConstQualified()) { 6418 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_pointer) 6419 << Ptr->getType() << Ptr->getSourceRange(); 6420 return ExprError(); 6421 } 6422 } 6423 6424 // For an arithmetic operation, the implied arithmetic must be well-formed. 6425 if (Form == Arithmetic) { 6426 // GCC does not enforce these rules for GNU atomics, but we do to help catch 6427 // trivial type errors. 6428 auto IsAllowedValueType = [&](QualType ValType) { 6429 if (ValType->isIntegerType()) 6430 return true; 6431 if (ValType->isPointerType()) 6432 return true; 6433 if (!ValType->isFloatingType()) 6434 return false; 6435 // LLVM Parser does not allow atomicrmw with x86_fp80 type. 6436 if (ValType->isSpecificBuiltinType(BuiltinType::LongDouble) && 6437 &Context.getTargetInfo().getLongDoubleFormat() == 6438 &llvm::APFloat::x87DoubleExtended()) 6439 return false; 6440 return true; 6441 }; 6442 if (IsAddSub && !IsAllowedValueType(ValType)) { 6443 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_ptr_or_fp) 6444 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 6445 return ExprError(); 6446 } 6447 if (!IsAddSub && !ValType->isIntegerType()) { 6448 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int) 6449 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 6450 return ExprError(); 6451 } 6452 if (IsC11 && ValType->isPointerType() && 6453 RequireCompleteType(Ptr->getBeginLoc(), ValType->getPointeeType(), 6454 diag::err_incomplete_type)) { 6455 return ExprError(); 6456 } 6457 } else if (IsN && !ValType->isIntegerType() && !ValType->isPointerType()) { 6458 // For __atomic_*_n operations, the value type must be a scalar integral or 6459 // pointer type which is 1, 2, 4, 8 or 16 bytes in length. 6460 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_or_ptr) 6461 << IsC11 << Ptr->getType() << Ptr->getSourceRange(); 6462 return ExprError(); 6463 } 6464 6465 if (!IsC11 && !AtomTy.isTriviallyCopyableType(Context) && 6466 !AtomTy->isScalarType()) { 6467 // For GNU atomics, require a trivially-copyable type. This is not part of 6468 // the GNU atomics specification but we enforce it for consistency with 6469 // other atomics which generally all require a trivially-copyable type. This 6470 // is because atomics just copy bits. 6471 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_trivial_copy) 6472 << Ptr->getType() << Ptr->getSourceRange(); 6473 return ExprError(); 6474 } 6475 6476 switch (ValType.getObjCLifetime()) { 6477 case Qualifiers::OCL_None: 6478 case Qualifiers::OCL_ExplicitNone: 6479 // okay 6480 break; 6481 6482 case Qualifiers::OCL_Weak: 6483 case Qualifiers::OCL_Strong: 6484 case Qualifiers::OCL_Autoreleasing: 6485 // FIXME: Can this happen? By this point, ValType should be known 6486 // to be trivially copyable. 6487 Diag(ExprRange.getBegin(), diag::err_arc_atomic_ownership) 6488 << ValType << Ptr->getSourceRange(); 6489 return ExprError(); 6490 } 6491 6492 // All atomic operations have an overload which takes a pointer to a volatile 6493 // 'A'. We shouldn't let the volatile-ness of the pointee-type inject itself 6494 // into the result or the other operands. Similarly atomic_load takes a 6495 // pointer to a const 'A'. 6496 ValType.removeLocalVolatile(); 6497 ValType.removeLocalConst(); 6498 QualType ResultType = ValType; 6499 if (Form == Copy || Form == LoadCopy || Form == GNUXchg || 6500 Form == Init) 6501 ResultType = Context.VoidTy; 6502 else if (Form == C11CmpXchg || Form == GNUCmpXchg) 6503 ResultType = Context.BoolTy; 6504 6505 // The type of a parameter passed 'by value'. In the GNU atomics, such 6506 // arguments are actually passed as pointers. 6507 QualType ByValType = ValType; // 'CP' 6508 bool IsPassedByAddress = false; 6509 if (!IsC11 && !IsHIP && !IsN) { 6510 ByValType = Ptr->getType(); 6511 IsPassedByAddress = true; 6512 } 6513 6514 SmallVector<Expr *, 5> APIOrderedArgs; 6515 if (ArgOrder == Sema::AtomicArgumentOrder::AST) { 6516 APIOrderedArgs.push_back(Args[0]); 6517 switch (Form) { 6518 case Init: 6519 case Load: 6520 APIOrderedArgs.push_back(Args[1]); // Val1/Order 6521 break; 6522 case LoadCopy: 6523 case Copy: 6524 case Arithmetic: 6525 case Xchg: 6526 APIOrderedArgs.push_back(Args[2]); // Val1 6527 APIOrderedArgs.push_back(Args[1]); // Order 6528 break; 6529 case GNUXchg: 6530 APIOrderedArgs.push_back(Args[2]); // Val1 6531 APIOrderedArgs.push_back(Args[3]); // Val2 6532 APIOrderedArgs.push_back(Args[1]); // Order 6533 break; 6534 case C11CmpXchg: 6535 APIOrderedArgs.push_back(Args[2]); // Val1 6536 APIOrderedArgs.push_back(Args[4]); // Val2 6537 APIOrderedArgs.push_back(Args[1]); // Order 6538 APIOrderedArgs.push_back(Args[3]); // OrderFail 6539 break; 6540 case GNUCmpXchg: 6541 APIOrderedArgs.push_back(Args[2]); // Val1 6542 APIOrderedArgs.push_back(Args[4]); // Val2 6543 APIOrderedArgs.push_back(Args[5]); // Weak 6544 APIOrderedArgs.push_back(Args[1]); // Order 6545 APIOrderedArgs.push_back(Args[3]); // OrderFail 6546 break; 6547 } 6548 } else 6549 APIOrderedArgs.append(Args.begin(), Args.end()); 6550 6551 // The first argument's non-CV pointer type is used to deduce the type of 6552 // subsequent arguments, except for: 6553 // - weak flag (always converted to bool) 6554 // - memory order (always converted to int) 6555 // - scope (always converted to int) 6556 for (unsigned i = 0; i != APIOrderedArgs.size(); ++i) { 6557 QualType Ty; 6558 if (i < NumVals[Form] + 1) { 6559 switch (i) { 6560 case 0: 6561 // The first argument is always a pointer. It has a fixed type. 6562 // It is always dereferenced, a nullptr is undefined. 6563 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin()); 6564 // Nothing else to do: we already know all we want about this pointer. 6565 continue; 6566 case 1: 6567 // The second argument is the non-atomic operand. For arithmetic, this 6568 // is always passed by value, and for a compare_exchange it is always 6569 // passed by address. For the rest, GNU uses by-address and C11 uses 6570 // by-value. 6571 assert(Form != Load); 6572 if (Form == Arithmetic && ValType->isPointerType()) 6573 Ty = Context.getPointerDiffType(); 6574 else if (Form == Init || Form == Arithmetic) 6575 Ty = ValType; 6576 else if (Form == Copy || Form == Xchg) { 6577 if (IsPassedByAddress) { 6578 // The value pointer is always dereferenced, a nullptr is undefined. 6579 CheckNonNullArgument(*this, APIOrderedArgs[i], 6580 ExprRange.getBegin()); 6581 } 6582 Ty = ByValType; 6583 } else { 6584 Expr *ValArg = APIOrderedArgs[i]; 6585 // The value pointer is always dereferenced, a nullptr is undefined. 6586 CheckNonNullArgument(*this, ValArg, ExprRange.getBegin()); 6587 LangAS AS = LangAS::Default; 6588 // Keep address space of non-atomic pointer type. 6589 if (const PointerType *PtrTy = 6590 ValArg->getType()->getAs<PointerType>()) { 6591 AS = PtrTy->getPointeeType().getAddressSpace(); 6592 } 6593 Ty = Context.getPointerType( 6594 Context.getAddrSpaceQualType(ValType.getUnqualifiedType(), AS)); 6595 } 6596 break; 6597 case 2: 6598 // The third argument to compare_exchange / GNU exchange is the desired 6599 // value, either by-value (for the C11 and *_n variant) or as a pointer. 6600 if (IsPassedByAddress) 6601 CheckNonNullArgument(*this, APIOrderedArgs[i], ExprRange.getBegin()); 6602 Ty = ByValType; 6603 break; 6604 case 3: 6605 // The fourth argument to GNU compare_exchange is a 'weak' flag. 6606 Ty = Context.BoolTy; 6607 break; 6608 } 6609 } else { 6610 // The order(s) and scope are always converted to int. 6611 Ty = Context.IntTy; 6612 } 6613 6614 InitializedEntity Entity = 6615 InitializedEntity::InitializeParameter(Context, Ty, false); 6616 ExprResult Arg = APIOrderedArgs[i]; 6617 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 6618 if (Arg.isInvalid()) 6619 return true; 6620 APIOrderedArgs[i] = Arg.get(); 6621 } 6622 6623 // Permute the arguments into a 'consistent' order. 6624 SmallVector<Expr*, 5> SubExprs; 6625 SubExprs.push_back(Ptr); 6626 switch (Form) { 6627 case Init: 6628 // Note, AtomicExpr::getVal1() has a special case for this atomic. 6629 SubExprs.push_back(APIOrderedArgs[1]); // Val1 6630 break; 6631 case Load: 6632 SubExprs.push_back(APIOrderedArgs[1]); // Order 6633 break; 6634 case LoadCopy: 6635 case Copy: 6636 case Arithmetic: 6637 case Xchg: 6638 SubExprs.push_back(APIOrderedArgs[2]); // Order 6639 SubExprs.push_back(APIOrderedArgs[1]); // Val1 6640 break; 6641 case GNUXchg: 6642 // Note, AtomicExpr::getVal2() has a special case for this atomic. 6643 SubExprs.push_back(APIOrderedArgs[3]); // Order 6644 SubExprs.push_back(APIOrderedArgs[1]); // Val1 6645 SubExprs.push_back(APIOrderedArgs[2]); // Val2 6646 break; 6647 case C11CmpXchg: 6648 SubExprs.push_back(APIOrderedArgs[3]); // Order 6649 SubExprs.push_back(APIOrderedArgs[1]); // Val1 6650 SubExprs.push_back(APIOrderedArgs[4]); // OrderFail 6651 SubExprs.push_back(APIOrderedArgs[2]); // Val2 6652 break; 6653 case GNUCmpXchg: 6654 SubExprs.push_back(APIOrderedArgs[4]); // Order 6655 SubExprs.push_back(APIOrderedArgs[1]); // Val1 6656 SubExprs.push_back(APIOrderedArgs[5]); // OrderFail 6657 SubExprs.push_back(APIOrderedArgs[2]); // Val2 6658 SubExprs.push_back(APIOrderedArgs[3]); // Weak 6659 break; 6660 } 6661 6662 if (SubExprs.size() >= 2 && Form != Init) { 6663 if (std::optional<llvm::APSInt> Result = 6664 SubExprs[1]->getIntegerConstantExpr(Context)) 6665 if (!isValidOrderingForOp(Result->getSExtValue(), Op)) 6666 Diag(SubExprs[1]->getBeginLoc(), 6667 diag::warn_atomic_op_has_invalid_memory_order) 6668 << SubExprs[1]->getSourceRange(); 6669 } 6670 6671 if (auto ScopeModel = AtomicExpr::getScopeModel(Op)) { 6672 auto *Scope = Args[Args.size() - 1]; 6673 if (std::optional<llvm::APSInt> Result = 6674 Scope->getIntegerConstantExpr(Context)) { 6675 if (!ScopeModel->isValid(Result->getZExtValue())) 6676 Diag(Scope->getBeginLoc(), diag::err_atomic_op_has_invalid_synch_scope) 6677 << Scope->getSourceRange(); 6678 } 6679 SubExprs.push_back(Scope); 6680 } 6681 6682 AtomicExpr *AE = new (Context) 6683 AtomicExpr(ExprRange.getBegin(), SubExprs, ResultType, Op, RParenLoc); 6684 6685 if ((Op == AtomicExpr::AO__c11_atomic_load || 6686 Op == AtomicExpr::AO__c11_atomic_store || 6687 Op == AtomicExpr::AO__opencl_atomic_load || 6688 Op == AtomicExpr::AO__hip_atomic_load || 6689 Op == AtomicExpr::AO__opencl_atomic_store || 6690 Op == AtomicExpr::AO__hip_atomic_store) && 6691 Context.AtomicUsesUnsupportedLibcall(AE)) 6692 Diag(AE->getBeginLoc(), diag::err_atomic_load_store_uses_lib) 6693 << ((Op == AtomicExpr::AO__c11_atomic_load || 6694 Op == AtomicExpr::AO__opencl_atomic_load || 6695 Op == AtomicExpr::AO__hip_atomic_load) 6696 ? 0 6697 : 1); 6698 6699 if (ValType->isBitIntType()) { 6700 Diag(Ptr->getExprLoc(), diag::err_atomic_builtin_bit_int_prohibit); 6701 return ExprError(); 6702 } 6703 6704 return AE; 6705} 6706 6707/// checkBuiltinArgument - Given a call to a builtin function, perform 6708/// normal type-checking on the given argument, updating the call in 6709/// place. This is useful when a builtin function requires custom 6710/// type-checking for some of its arguments but not necessarily all of 6711/// them. 6712/// 6713/// Returns true on error. 6714static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) { 6715 FunctionDecl *Fn = E->getDirectCallee(); 6716 assert(Fn && "builtin call without direct callee!"); 6717 6718 ParmVarDecl *Param = Fn->getParamDecl(ArgIndex); 6719 InitializedEntity Entity = 6720 InitializedEntity::InitializeParameter(S.Context, Param); 6721 6722 ExprResult Arg = E->getArg(ArgIndex); 6723 Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg); 6724 if (Arg.isInvalid()) 6725 return true; 6726 6727 E->setArg(ArgIndex, Arg.get()); 6728 return false; 6729} 6730 6731/// We have a call to a function like __sync_fetch_and_add, which is an 6732/// overloaded function based on the pointer type of its first argument. 6733/// The main BuildCallExpr routines have already promoted the types of 6734/// arguments because all of these calls are prototyped as void(...). 6735/// 6736/// This function goes through and does final semantic checking for these 6737/// builtins, as well as generating any warnings. 6738ExprResult 6739Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) { 6740 CallExpr *TheCall = static_cast<CallExpr *>(TheCallResult.get()); 6741 Expr *Callee = TheCall->getCallee(); 6742 DeclRefExpr *DRE = cast<DeclRefExpr>(Callee->IgnoreParenCasts()); 6743 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 6744 6745 // Ensure that we have at least one argument to do type inference from. 6746 if (TheCall->getNumArgs() < 1) { 6747 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 6748 << 0 << 1 << TheCall->getNumArgs() << Callee->getSourceRange(); 6749 return ExprError(); 6750 } 6751 6752 // Inspect the first argument of the atomic builtin. This should always be 6753 // a pointer type, whose element is an integral scalar or pointer type. 6754 // Because it is a pointer type, we don't have to worry about any implicit 6755 // casts here. 6756 // FIXME: We don't allow floating point scalars as input. 6757 Expr *FirstArg = TheCall->getArg(0); 6758 ExprResult FirstArgResult = DefaultFunctionArrayLvalueConversion(FirstArg); 6759 if (FirstArgResult.isInvalid()) 6760 return ExprError(); 6761 FirstArg = FirstArgResult.get(); 6762 TheCall->setArg(0, FirstArg); 6763 6764 const PointerType *pointerType = FirstArg->getType()->getAs<PointerType>(); 6765 if (!pointerType) { 6766 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer) 6767 << FirstArg->getType() << FirstArg->getSourceRange(); 6768 return ExprError(); 6769 } 6770 6771 QualType ValType = pointerType->getPointeeType(); 6772 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 6773 !ValType->isBlockPointerType()) { 6774 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intptr) 6775 << FirstArg->getType() << FirstArg->getSourceRange(); 6776 return ExprError(); 6777 } 6778 6779 if (ValType.isConstQualified()) { 6780 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_cannot_be_const) 6781 << FirstArg->getType() << FirstArg->getSourceRange(); 6782 return ExprError(); 6783 } 6784 6785 switch (ValType.getObjCLifetime()) { 6786 case Qualifiers::OCL_None: 6787 case Qualifiers::OCL_ExplicitNone: 6788 // okay 6789 break; 6790 6791 case Qualifiers::OCL_Weak: 6792 case Qualifiers::OCL_Strong: 6793 case Qualifiers::OCL_Autoreleasing: 6794 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership) 6795 << ValType << FirstArg->getSourceRange(); 6796 return ExprError(); 6797 } 6798 6799 // Strip any qualifiers off ValType. 6800 ValType = ValType.getUnqualifiedType(); 6801 6802 // The majority of builtins return a value, but a few have special return 6803 // types, so allow them to override appropriately below. 6804 QualType ResultType = ValType; 6805 6806 // We need to figure out which concrete builtin this maps onto. For example, 6807 // __sync_fetch_and_add with a 2 byte object turns into 6808 // __sync_fetch_and_add_2. 6809#define BUILTIN_ROW(x) \ 6810 { Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \ 6811 Builtin::BI##x##_8, Builtin::BI##x##_16 } 6812 6813 static const unsigned BuiltinIndices[][5] = { 6814 BUILTIN_ROW(__sync_fetch_and_add), 6815 BUILTIN_ROW(__sync_fetch_and_sub), 6816 BUILTIN_ROW(__sync_fetch_and_or), 6817 BUILTIN_ROW(__sync_fetch_and_and), 6818 BUILTIN_ROW(__sync_fetch_and_xor), 6819 BUILTIN_ROW(__sync_fetch_and_nand), 6820 6821 BUILTIN_ROW(__sync_add_and_fetch), 6822 BUILTIN_ROW(__sync_sub_and_fetch), 6823 BUILTIN_ROW(__sync_and_and_fetch), 6824 BUILTIN_ROW(__sync_or_and_fetch), 6825 BUILTIN_ROW(__sync_xor_and_fetch), 6826 BUILTIN_ROW(__sync_nand_and_fetch), 6827 6828 BUILTIN_ROW(__sync_val_compare_and_swap), 6829 BUILTIN_ROW(__sync_bool_compare_and_swap), 6830 BUILTIN_ROW(__sync_lock_test_and_set), 6831 BUILTIN_ROW(__sync_lock_release), 6832 BUILTIN_ROW(__sync_swap) 6833 }; 6834#undef BUILTIN_ROW 6835 6836 // Determine the index of the size. 6837 unsigned SizeIndex; 6838 switch (Context.getTypeSizeInChars(ValType).getQuantity()) { 6839 case 1: SizeIndex = 0; break; 6840 case 2: SizeIndex = 1; break; 6841 case 4: SizeIndex = 2; break; 6842 case 8: SizeIndex = 3; break; 6843 case 16: SizeIndex = 4; break; 6844 default: 6845 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_pointer_size) 6846 << FirstArg->getType() << FirstArg->getSourceRange(); 6847 return ExprError(); 6848 } 6849 6850 // Each of these builtins has one pointer argument, followed by some number of 6851 // values (0, 1 or 2) followed by a potentially empty varags list of stuff 6852 // that we ignore. Find out which row of BuiltinIndices to read from as well 6853 // as the number of fixed args. 6854 unsigned BuiltinID = FDecl->getBuiltinID(); 6855 unsigned BuiltinIndex, NumFixed = 1; 6856 bool WarnAboutSemanticsChange = false; 6857 switch (BuiltinID) { 6858 default: llvm_unreachable("Unknown overloaded atomic builtin!"); 6859 case Builtin::BI__sync_fetch_and_add: 6860 case Builtin::BI__sync_fetch_and_add_1: 6861 case Builtin::BI__sync_fetch_and_add_2: 6862 case Builtin::BI__sync_fetch_and_add_4: 6863 case Builtin::BI__sync_fetch_and_add_8: 6864 case Builtin::BI__sync_fetch_and_add_16: 6865 BuiltinIndex = 0; 6866 break; 6867 6868 case Builtin::BI__sync_fetch_and_sub: 6869 case Builtin::BI__sync_fetch_and_sub_1: 6870 case Builtin::BI__sync_fetch_and_sub_2: 6871 case Builtin::BI__sync_fetch_and_sub_4: 6872 case Builtin::BI__sync_fetch_and_sub_8: 6873 case Builtin::BI__sync_fetch_and_sub_16: 6874 BuiltinIndex = 1; 6875 break; 6876 6877 case Builtin::BI__sync_fetch_and_or: 6878 case Builtin::BI__sync_fetch_and_or_1: 6879 case Builtin::BI__sync_fetch_and_or_2: 6880 case Builtin::BI__sync_fetch_and_or_4: 6881 case Builtin::BI__sync_fetch_and_or_8: 6882 case Builtin::BI__sync_fetch_and_or_16: 6883 BuiltinIndex = 2; 6884 break; 6885 6886 case Builtin::BI__sync_fetch_and_and: 6887 case Builtin::BI__sync_fetch_and_and_1: 6888 case Builtin::BI__sync_fetch_and_and_2: 6889 case Builtin::BI__sync_fetch_and_and_4: 6890 case Builtin::BI__sync_fetch_and_and_8: 6891 case Builtin::BI__sync_fetch_and_and_16: 6892 BuiltinIndex = 3; 6893 break; 6894 6895 case Builtin::BI__sync_fetch_and_xor: 6896 case Builtin::BI__sync_fetch_and_xor_1: 6897 case Builtin::BI__sync_fetch_and_xor_2: 6898 case Builtin::BI__sync_fetch_and_xor_4: 6899 case Builtin::BI__sync_fetch_and_xor_8: 6900 case Builtin::BI__sync_fetch_and_xor_16: 6901 BuiltinIndex = 4; 6902 break; 6903 6904 case Builtin::BI__sync_fetch_and_nand: 6905 case Builtin::BI__sync_fetch_and_nand_1: 6906 case Builtin::BI__sync_fetch_and_nand_2: 6907 case Builtin::BI__sync_fetch_and_nand_4: 6908 case Builtin::BI__sync_fetch_and_nand_8: 6909 case Builtin::BI__sync_fetch_and_nand_16: 6910 BuiltinIndex = 5; 6911 WarnAboutSemanticsChange = true; 6912 break; 6913 6914 case Builtin::BI__sync_add_and_fetch: 6915 case Builtin::BI__sync_add_and_fetch_1: 6916 case Builtin::BI__sync_add_and_fetch_2: 6917 case Builtin::BI__sync_add_and_fetch_4: 6918 case Builtin::BI__sync_add_and_fetch_8: 6919 case Builtin::BI__sync_add_and_fetch_16: 6920 BuiltinIndex = 6; 6921 break; 6922 6923 case Builtin::BI__sync_sub_and_fetch: 6924 case Builtin::BI__sync_sub_and_fetch_1: 6925 case Builtin::BI__sync_sub_and_fetch_2: 6926 case Builtin::BI__sync_sub_and_fetch_4: 6927 case Builtin::BI__sync_sub_and_fetch_8: 6928 case Builtin::BI__sync_sub_and_fetch_16: 6929 BuiltinIndex = 7; 6930 break; 6931 6932 case Builtin::BI__sync_and_and_fetch: 6933 case Builtin::BI__sync_and_and_fetch_1: 6934 case Builtin::BI__sync_and_and_fetch_2: 6935 case Builtin::BI__sync_and_and_fetch_4: 6936 case Builtin::BI__sync_and_and_fetch_8: 6937 case Builtin::BI__sync_and_and_fetch_16: 6938 BuiltinIndex = 8; 6939 break; 6940 6941 case Builtin::BI__sync_or_and_fetch: 6942 case Builtin::BI__sync_or_and_fetch_1: 6943 case Builtin::BI__sync_or_and_fetch_2: 6944 case Builtin::BI__sync_or_and_fetch_4: 6945 case Builtin::BI__sync_or_and_fetch_8: 6946 case Builtin::BI__sync_or_and_fetch_16: 6947 BuiltinIndex = 9; 6948 break; 6949 6950 case Builtin::BI__sync_xor_and_fetch: 6951 case Builtin::BI__sync_xor_and_fetch_1: 6952 case Builtin::BI__sync_xor_and_fetch_2: 6953 case Builtin::BI__sync_xor_and_fetch_4: 6954 case Builtin::BI__sync_xor_and_fetch_8: 6955 case Builtin::BI__sync_xor_and_fetch_16: 6956 BuiltinIndex = 10; 6957 break; 6958 6959 case Builtin::BI__sync_nand_and_fetch: 6960 case Builtin::BI__sync_nand_and_fetch_1: 6961 case Builtin::BI__sync_nand_and_fetch_2: 6962 case Builtin::BI__sync_nand_and_fetch_4: 6963 case Builtin::BI__sync_nand_and_fetch_8: 6964 case Builtin::BI__sync_nand_and_fetch_16: 6965 BuiltinIndex = 11; 6966 WarnAboutSemanticsChange = true; 6967 break; 6968 6969 case Builtin::BI__sync_val_compare_and_swap: 6970 case Builtin::BI__sync_val_compare_and_swap_1: 6971 case Builtin::BI__sync_val_compare_and_swap_2: 6972 case Builtin::BI__sync_val_compare_and_swap_4: 6973 case Builtin::BI__sync_val_compare_and_swap_8: 6974 case Builtin::BI__sync_val_compare_and_swap_16: 6975 BuiltinIndex = 12; 6976 NumFixed = 2; 6977 break; 6978 6979 case Builtin::BI__sync_bool_compare_and_swap: 6980 case Builtin::BI__sync_bool_compare_and_swap_1: 6981 case Builtin::BI__sync_bool_compare_and_swap_2: 6982 case Builtin::BI__sync_bool_compare_and_swap_4: 6983 case Builtin::BI__sync_bool_compare_and_swap_8: 6984 case Builtin::BI__sync_bool_compare_and_swap_16: 6985 BuiltinIndex = 13; 6986 NumFixed = 2; 6987 ResultType = Context.BoolTy; 6988 break; 6989 6990 case Builtin::BI__sync_lock_test_and_set: 6991 case Builtin::BI__sync_lock_test_and_set_1: 6992 case Builtin::BI__sync_lock_test_and_set_2: 6993 case Builtin::BI__sync_lock_test_and_set_4: 6994 case Builtin::BI__sync_lock_test_and_set_8: 6995 case Builtin::BI__sync_lock_test_and_set_16: 6996 BuiltinIndex = 14; 6997 break; 6998 6999 case Builtin::BI__sync_lock_release: 7000 case Builtin::BI__sync_lock_release_1: 7001 case Builtin::BI__sync_lock_release_2: 7002 case Builtin::BI__sync_lock_release_4: 7003 case Builtin::BI__sync_lock_release_8: 7004 case Builtin::BI__sync_lock_release_16: 7005 BuiltinIndex = 15; 7006 NumFixed = 0; 7007 ResultType = Context.VoidTy; 7008 break; 7009 7010 case Builtin::BI__sync_swap: 7011 case Builtin::BI__sync_swap_1: 7012 case Builtin::BI__sync_swap_2: 7013 case Builtin::BI__sync_swap_4: 7014 case Builtin::BI__sync_swap_8: 7015 case Builtin::BI__sync_swap_16: 7016 BuiltinIndex = 16; 7017 break; 7018 } 7019 7020 // Now that we know how many fixed arguments we expect, first check that we 7021 // have at least that many. 7022 if (TheCall->getNumArgs() < 1+NumFixed) { 7023 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) 7024 << 0 << 1 + NumFixed << TheCall->getNumArgs() 7025 << Callee->getSourceRange(); 7026 return ExprError(); 7027 } 7028 7029 Diag(TheCall->getEndLoc(), diag::warn_atomic_implicit_seq_cst) 7030 << Callee->getSourceRange(); 7031 7032 if (WarnAboutSemanticsChange) { 7033 Diag(TheCall->getEndLoc(), diag::warn_sync_fetch_and_nand_semantics_change) 7034 << Callee->getSourceRange(); 7035 } 7036 7037 // Get the decl for the concrete builtin from this, we can tell what the 7038 // concrete integer type we should convert to is. 7039 unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex]; 7040 StringRef NewBuiltinName = Context.BuiltinInfo.getName(NewBuiltinID); 7041 FunctionDecl *NewBuiltinDecl; 7042 if (NewBuiltinID == BuiltinID) 7043 NewBuiltinDecl = FDecl; 7044 else { 7045 // Perform builtin lookup to avoid redeclaring it. 7046 DeclarationName DN(&Context.Idents.get(NewBuiltinName)); 7047 LookupResult Res(*this, DN, DRE->getBeginLoc(), LookupOrdinaryName); 7048 LookupName(Res, TUScope, /*AllowBuiltinCreation=*/true); 7049 assert(Res.getFoundDecl()); 7050 NewBuiltinDecl = dyn_cast<FunctionDecl>(Res.getFoundDecl()); 7051 if (!NewBuiltinDecl) 7052 return ExprError(); 7053 } 7054 7055 // The first argument --- the pointer --- has a fixed type; we 7056 // deduce the types of the rest of the arguments accordingly. Walk 7057 // the remaining arguments, converting them to the deduced value type. 7058 for (unsigned i = 0; i != NumFixed; ++i) { 7059 ExprResult Arg = TheCall->getArg(i+1); 7060 7061 // GCC does an implicit conversion to the pointer or integer ValType. This 7062 // can fail in some cases (1i -> int**), check for this error case now. 7063 // Initialize the argument. 7064 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, 7065 ValType, /*consume*/ false); 7066 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 7067 if (Arg.isInvalid()) 7068 return ExprError(); 7069 7070 // Okay, we have something that *can* be converted to the right type. Check 7071 // to see if there is a potentially weird extension going on here. This can 7072 // happen when you do an atomic operation on something like an char* and 7073 // pass in 42. The 42 gets converted to char. This is even more strange 7074 // for things like 45.123 -> char, etc. 7075 // FIXME: Do this check. 7076 TheCall->setArg(i+1, Arg.get()); 7077 } 7078 7079 // Create a new DeclRefExpr to refer to the new decl. 7080 DeclRefExpr *NewDRE = DeclRefExpr::Create( 7081 Context, DRE->getQualifierLoc(), SourceLocation(), NewBuiltinDecl, 7082 /*enclosing*/ false, DRE->getLocation(), Context.BuiltinFnTy, 7083 DRE->getValueKind(), nullptr, nullptr, DRE->isNonOdrUse()); 7084 7085 // Set the callee in the CallExpr. 7086 // FIXME: This loses syntactic information. 7087 QualType CalleePtrTy = Context.getPointerType(NewBuiltinDecl->getType()); 7088 ExprResult PromotedCall = ImpCastExprToType(NewDRE, CalleePtrTy, 7089 CK_BuiltinFnToFnPtr); 7090 TheCall->setCallee(PromotedCall.get()); 7091 7092 // Change the result type of the call to match the original value type. This 7093 // is arbitrary, but the codegen for these builtins ins design to handle it 7094 // gracefully. 7095 TheCall->setType(ResultType); 7096 7097 // Prohibit problematic uses of bit-precise integer types with atomic 7098 // builtins. The arguments would have already been converted to the first 7099 // argument's type, so only need to check the first argument. 7100 const auto *BitIntValType = ValType->getAs<BitIntType>(); 7101 if (BitIntValType && !llvm::isPowerOf2_64(BitIntValType->getNumBits())) { 7102 Diag(FirstArg->getExprLoc(), diag::err_atomic_builtin_ext_int_size); 7103 return ExprError(); 7104 } 7105 7106 return TheCallResult; 7107} 7108 7109/// SemaBuiltinNontemporalOverloaded - We have a call to 7110/// __builtin_nontemporal_store or __builtin_nontemporal_load, which is an 7111/// overloaded function based on the pointer type of its last argument. 7112/// 7113/// This function goes through and does final semantic checking for these 7114/// builtins. 7115ExprResult Sema::SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult) { 7116 CallExpr *TheCall = (CallExpr *)TheCallResult.get(); 7117 DeclRefExpr *DRE = 7118 cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 7119 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 7120 unsigned BuiltinID = FDecl->getBuiltinID(); 7121 assert((BuiltinID == Builtin::BI__builtin_nontemporal_store || 7122 BuiltinID == Builtin::BI__builtin_nontemporal_load) && 7123 "Unexpected nontemporal load/store builtin!"); 7124 bool isStore = BuiltinID == Builtin::BI__builtin_nontemporal_store; 7125 unsigned numArgs = isStore ? 2 : 1; 7126 7127 // Ensure that we have the proper number of arguments. 7128 if (checkArgCount(*this, TheCall, numArgs)) 7129 return ExprError(); 7130 7131 // Inspect the last argument of the nontemporal builtin. This should always 7132 // be a pointer type, from which we imply the type of the memory access. 7133 // Because it is a pointer type, we don't have to worry about any implicit 7134 // casts here. 7135 Expr *PointerArg = TheCall->getArg(numArgs - 1); 7136 ExprResult PointerArgResult = 7137 DefaultFunctionArrayLvalueConversion(PointerArg); 7138 7139 if (PointerArgResult.isInvalid()) 7140 return ExprError(); 7141 PointerArg = PointerArgResult.get(); 7142 TheCall->setArg(numArgs - 1, PointerArg); 7143 7144 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); 7145 if (!pointerType) { 7146 Diag(DRE->getBeginLoc(), diag::err_nontemporal_builtin_must_be_pointer) 7147 << PointerArg->getType() << PointerArg->getSourceRange(); 7148 return ExprError(); 7149 } 7150 7151 QualType ValType = pointerType->getPointeeType(); 7152 7153 // Strip any qualifiers off ValType. 7154 ValType = ValType.getUnqualifiedType(); 7155 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && 7156 !ValType->isBlockPointerType() && !ValType->isFloatingType() && 7157 !ValType->isVectorType()) { 7158 Diag(DRE->getBeginLoc(), 7159 diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector) 7160 << PointerArg->getType() << PointerArg->getSourceRange(); 7161 return ExprError(); 7162 } 7163 7164 if (!isStore) { 7165 TheCall->setType(ValType); 7166 return TheCallResult; 7167 } 7168 7169 ExprResult ValArg = TheCall->getArg(0); 7170 InitializedEntity Entity = InitializedEntity::InitializeParameter( 7171 Context, ValType, /*consume*/ false); 7172 ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); 7173 if (ValArg.isInvalid()) 7174 return ExprError(); 7175 7176 TheCall->setArg(0, ValArg.get()); 7177 TheCall->setType(Context.VoidTy); 7178 return TheCallResult; 7179} 7180 7181/// CheckObjCString - Checks that the argument to the builtin 7182/// CFString constructor is correct 7183/// Note: It might also make sense to do the UTF-16 conversion here (would 7184/// simplify the backend). 7185bool Sema::CheckObjCString(Expr *Arg) { 7186 Arg = Arg->IgnoreParenCasts(); 7187 StringLiteral *Literal = dyn_cast<StringLiteral>(Arg); 7188 7189 if (!Literal || !Literal->isOrdinary()) { 7190 Diag(Arg->getBeginLoc(), diag::err_cfstring_literal_not_string_constant) 7191 << Arg->getSourceRange(); 7192 return true; 7193 } 7194 7195 if (Literal->containsNonAsciiOrNull()) { 7196 StringRef String = Literal->getString(); 7197 unsigned NumBytes = String.size(); 7198 SmallVector<llvm::UTF16, 128> ToBuf(NumBytes); 7199 const llvm::UTF8 *FromPtr = (const llvm::UTF8 *)String.data(); 7200 llvm::UTF16 *ToPtr = &ToBuf[0]; 7201 7202 llvm::ConversionResult Result = 7203 llvm::ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes, &ToPtr, 7204 ToPtr + NumBytes, llvm::strictConversion); 7205 // Check for conversion failure. 7206 if (Result != llvm::conversionOK) 7207 Diag(Arg->getBeginLoc(), diag::warn_cfstring_truncated) 7208 << Arg->getSourceRange(); 7209 } 7210 return false; 7211} 7212 7213/// CheckObjCString - Checks that the format string argument to the os_log() 7214/// and os_trace() functions is correct, and converts it to const char *. 7215ExprResult Sema::CheckOSLogFormatStringArg(Expr *Arg) { 7216 Arg = Arg->IgnoreParenCasts(); 7217 auto *Literal = dyn_cast<StringLiteral>(Arg); 7218 if (!Literal) { 7219 if (auto *ObjcLiteral = dyn_cast<ObjCStringLiteral>(Arg)) { 7220 Literal = ObjcLiteral->getString(); 7221 } 7222 } 7223 7224 if (!Literal || (!Literal->isOrdinary() && !Literal->isUTF8())) { 7225 return ExprError( 7226 Diag(Arg->getBeginLoc(), diag::err_os_log_format_not_string_constant) 7227 << Arg->getSourceRange()); 7228 } 7229 7230 ExprResult Result(Literal); 7231 QualType ResultTy = Context.getPointerType(Context.CharTy.withConst()); 7232 InitializedEntity Entity = 7233 InitializedEntity::InitializeParameter(Context, ResultTy, false); 7234 Result = PerformCopyInitialization(Entity, SourceLocation(), Result); 7235 return Result; 7236} 7237 7238/// Check that the user is calling the appropriate va_start builtin for the 7239/// target and calling convention. 7240static bool checkVAStartABI(Sema &S, unsigned BuiltinID, Expr *Fn) { 7241 const llvm::Triple &TT = S.Context.getTargetInfo().getTriple(); 7242 bool IsX64 = TT.getArch() == llvm::Triple::x86_64; 7243 bool IsAArch64 = (TT.getArch() == llvm::Triple::aarch64 || 7244 TT.getArch() == llvm::Triple::aarch64_32); 7245 bool IsWindows = TT.isOSWindows(); 7246 bool IsMSVAStart = BuiltinID == Builtin::BI__builtin_ms_va_start; 7247 if (IsX64 || IsAArch64) { 7248 CallingConv CC = CC_C; 7249 if (const FunctionDecl *FD = S.getCurFunctionDecl()) 7250 CC = FD->getType()->castAs<FunctionType>()->getCallConv(); 7251 if (IsMSVAStart) { 7252 // Don't allow this in System V ABI functions. 7253 if (CC == CC_X86_64SysV || (!IsWindows && CC != CC_Win64)) 7254 return S.Diag(Fn->getBeginLoc(), 7255 diag::err_ms_va_start_used_in_sysv_function); 7256 } else { 7257 // On x86-64/AArch64 Unix, don't allow this in Win64 ABI functions. 7258 // On x64 Windows, don't allow this in System V ABI functions. 7259 // (Yes, that means there's no corresponding way to support variadic 7260 // System V ABI functions on Windows.) 7261 if ((IsWindows && CC == CC_X86_64SysV) || 7262 (!IsWindows && CC == CC_Win64)) 7263 return S.Diag(Fn->getBeginLoc(), 7264 diag::err_va_start_used_in_wrong_abi_function) 7265 << !IsWindows; 7266 } 7267 return false; 7268 } 7269 7270 if (IsMSVAStart) 7271 return S.Diag(Fn->getBeginLoc(), diag::err_builtin_x64_aarch64_only); 7272 return false; 7273} 7274 7275static bool checkVAStartIsInVariadicFunction(Sema &S, Expr *Fn, 7276 ParmVarDecl **LastParam = nullptr) { 7277 // Determine whether the current function, block, or obj-c method is variadic 7278 // and get its parameter list. 7279 bool IsVariadic = false; 7280 ArrayRef<ParmVarDecl *> Params; 7281 DeclContext *Caller = S.CurContext; 7282 if (auto *Block = dyn_cast<BlockDecl>(Caller)) { 7283 IsVariadic = Block->isVariadic(); 7284 Params = Block->parameters(); 7285 } else if (auto *FD = dyn_cast<FunctionDecl>(Caller)) { 7286 IsVariadic = FD->isVariadic(); 7287 Params = FD->parameters(); 7288 } else if (auto *MD = dyn_cast<ObjCMethodDecl>(Caller)) { 7289 IsVariadic = MD->isVariadic(); 7290 // FIXME: This isn't correct for methods (results in bogus warning). 7291 Params = MD->parameters(); 7292 } else if (isa<CapturedDecl>(Caller)) { 7293 // We don't support va_start in a CapturedDecl. 7294 S.Diag(Fn->getBeginLoc(), diag::err_va_start_captured_stmt); 7295 return true; 7296 } else { 7297 // This must be some other declcontext that parses exprs. 7298 S.Diag(Fn->getBeginLoc(), diag::err_va_start_outside_function); 7299 return true; 7300 } 7301 7302 if (!IsVariadic) { 7303 S.Diag(Fn->getBeginLoc(), diag::err_va_start_fixed_function); 7304 return true; 7305 } 7306 7307 if (LastParam) 7308 *LastParam = Params.empty() ? nullptr : Params.back(); 7309 7310 return false; 7311} 7312 7313/// Check the arguments to '__builtin_va_start' or '__builtin_ms_va_start' 7314/// for validity. Emit an error and return true on failure; return false 7315/// on success. 7316bool Sema::SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) { 7317 Expr *Fn = TheCall->getCallee(); 7318 7319 if (checkVAStartABI(*this, BuiltinID, Fn)) 7320 return true; 7321 7322 // In C2x mode, va_start only needs one argument. However, the builtin still 7323 // requires two arguments (which matches the behavior of the GCC builtin), 7324 // <stdarg.h> passes `0` as the second argument in C2x mode. 7325 if (checkArgCount(*this, TheCall, 2)) 7326 return true; 7327 7328 // Type-check the first argument normally. 7329 if (checkBuiltinArgument(*this, TheCall, 0)) 7330 return true; 7331 7332 // Check that the current function is variadic, and get its last parameter. 7333 ParmVarDecl *LastParam; 7334 if (checkVAStartIsInVariadicFunction(*this, Fn, &LastParam)) 7335 return true; 7336 7337 // Verify that the second argument to the builtin is the last argument of the 7338 // current function or method. In C2x mode, if the second argument is an 7339 // integer constant expression with value 0, then we don't bother with this 7340 // check. 7341 bool SecondArgIsLastNamedArgument = false; 7342 const Expr *Arg = TheCall->getArg(1)->IgnoreParenCasts(); 7343 if (std::optional<llvm::APSInt> Val = 7344 TheCall->getArg(1)->getIntegerConstantExpr(Context); 7345 Val && LangOpts.C2x && *Val == 0) 7346 return false; 7347 7348 // These are valid if SecondArgIsLastNamedArgument is false after the next 7349 // block. 7350 QualType Type; 7351 SourceLocation ParamLoc; 7352 bool IsCRegister = false; 7353 7354 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Arg)) { 7355 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(DR->getDecl())) { 7356 SecondArgIsLastNamedArgument = PV == LastParam; 7357 7358 Type = PV->getType(); 7359 ParamLoc = PV->getLocation(); 7360 IsCRegister = 7361 PV->getStorageClass() == SC_Register && !getLangOpts().CPlusPlus; 7362 } 7363 } 7364 7365 if (!SecondArgIsLastNamedArgument) 7366 Diag(TheCall->getArg(1)->getBeginLoc(), 7367 diag::warn_second_arg_of_va_start_not_last_named_param); 7368 else if (IsCRegister || Type->isReferenceType() || 7369 Type->isSpecificBuiltinType(BuiltinType::Float) || [=] { 7370 // Promotable integers are UB, but enumerations need a bit of 7371 // extra checking to see what their promotable type actually is. 7372 if (!Context.isPromotableIntegerType(Type)) 7373 return false; 7374 if (!Type->isEnumeralType()) 7375 return true; 7376 const EnumDecl *ED = Type->castAs<EnumType>()->getDecl(); 7377 return !(ED && 7378 Context.typesAreCompatible(ED->getPromotionType(), Type)); 7379 }()) { 7380 unsigned Reason = 0; 7381 if (Type->isReferenceType()) Reason = 1; 7382 else if (IsCRegister) Reason = 2; 7383 Diag(Arg->getBeginLoc(), diag::warn_va_start_type_is_undefined) << Reason; 7384 Diag(ParamLoc, diag::note_parameter_type) << Type; 7385 } 7386 7387 return false; 7388} 7389 7390bool Sema::SemaBuiltinVAStartARMMicrosoft(CallExpr *Call) { 7391 auto IsSuitablyTypedFormatArgument = [this](const Expr *Arg) -> bool { 7392 const LangOptions &LO = getLangOpts(); 7393 7394 if (LO.CPlusPlus) 7395 return Arg->getType() 7396 .getCanonicalType() 7397 .getTypePtr() 7398 ->getPointeeType() 7399 .withoutLocalFastQualifiers() == Context.CharTy; 7400 7401 // In C, allow aliasing through `char *`, this is required for AArch64 at 7402 // least. 7403 return true; 7404 }; 7405 7406 // void __va_start(va_list *ap, const char *named_addr, size_t slot_size, 7407 // const char *named_addr); 7408 7409 Expr *Func = Call->getCallee(); 7410 7411 if (Call->getNumArgs() < 3) 7412 return Diag(Call->getEndLoc(), 7413 diag::err_typecheck_call_too_few_args_at_least) 7414 << 0 /*function call*/ << 3 << Call->getNumArgs(); 7415 7416 // Type-check the first argument normally. 7417 if (checkBuiltinArgument(*this, Call, 0)) 7418 return true; 7419 7420 // Check that the current function is variadic. 7421 if (checkVAStartIsInVariadicFunction(*this, Func)) 7422 return true; 7423 7424 // __va_start on Windows does not validate the parameter qualifiers 7425 7426 const Expr *Arg1 = Call->getArg(1)->IgnoreParens(); 7427 const Type *Arg1Ty = Arg1->getType().getCanonicalType().getTypePtr(); 7428 7429 const Expr *Arg2 = Call->getArg(2)->IgnoreParens(); 7430 const Type *Arg2Ty = Arg2->getType().getCanonicalType().getTypePtr(); 7431 7432 const QualType &ConstCharPtrTy = 7433 Context.getPointerType(Context.CharTy.withConst()); 7434 if (!Arg1Ty->isPointerType() || !IsSuitablyTypedFormatArgument(Arg1)) 7435 Diag(Arg1->getBeginLoc(), diag::err_typecheck_convert_incompatible) 7436 << Arg1->getType() << ConstCharPtrTy << 1 /* different class */ 7437 << 0 /* qualifier difference */ 7438 << 3 /* parameter mismatch */ 7439 << 2 << Arg1->getType() << ConstCharPtrTy; 7440 7441 const QualType SizeTy = Context.getSizeType(); 7442 if (Arg2Ty->getCanonicalTypeInternal().withoutLocalFastQualifiers() != SizeTy) 7443 Diag(Arg2->getBeginLoc(), diag::err_typecheck_convert_incompatible) 7444 << Arg2->getType() << SizeTy << 1 /* different class */ 7445 << 0 /* qualifier difference */ 7446 << 3 /* parameter mismatch */ 7447 << 3 << Arg2->getType() << SizeTy; 7448 7449 return false; 7450} 7451 7452/// SemaBuiltinUnorderedCompare - Handle functions like __builtin_isgreater and 7453/// friends. This is declared to take (...), so we have to check everything. 7454bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall) { 7455 if (checkArgCount(*this, TheCall, 2)) 7456 return true; 7457 7458 ExprResult OrigArg0 = TheCall->getArg(0); 7459 ExprResult OrigArg1 = TheCall->getArg(1); 7460 7461 // Do standard promotions between the two arguments, returning their common 7462 // type. 7463 QualType Res = UsualArithmeticConversions( 7464 OrigArg0, OrigArg1, TheCall->getExprLoc(), ACK_Comparison); 7465 if (OrigArg0.isInvalid() || OrigArg1.isInvalid()) 7466 return true; 7467 7468 // Make sure any conversions are pushed back into the call; this is 7469 // type safe since unordered compare builtins are declared as "_Bool 7470 // foo(...)". 7471 TheCall->setArg(0, OrigArg0.get()); 7472 TheCall->setArg(1, OrigArg1.get()); 7473 7474 if (OrigArg0.get()->isTypeDependent() || OrigArg1.get()->isTypeDependent()) 7475 return false; 7476 7477 // If the common type isn't a real floating type, then the arguments were 7478 // invalid for this operation. 7479 if (Res.isNull() || !Res->isRealFloatingType()) 7480 return Diag(OrigArg0.get()->getBeginLoc(), 7481 diag::err_typecheck_call_invalid_ordered_compare) 7482 << OrigArg0.get()->getType() << OrigArg1.get()->getType() 7483 << SourceRange(OrigArg0.get()->getBeginLoc(), 7484 OrigArg1.get()->getEndLoc()); 7485 7486 return false; 7487} 7488 7489/// SemaBuiltinSemaBuiltinFPClassification - Handle functions like 7490/// __builtin_isnan and friends. This is declared to take (...), so we have 7491/// to check everything. We expect the last argument to be a floating point 7492/// value. 7493bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) { 7494 if (checkArgCount(*this, TheCall, NumArgs)) 7495 return true; 7496 7497 // __builtin_fpclassify is the only case where NumArgs != 1, so we can count 7498 // on all preceding parameters just being int. Try all of those. 7499 for (unsigned i = 0; i < NumArgs - 1; ++i) { 7500 Expr *Arg = TheCall->getArg(i); 7501 7502 if (Arg->isTypeDependent()) 7503 return false; 7504 7505 ExprResult Res = PerformImplicitConversion(Arg, Context.IntTy, AA_Passing); 7506 7507 if (Res.isInvalid()) 7508 return true; 7509 TheCall->setArg(i, Res.get()); 7510 } 7511 7512 Expr *OrigArg = TheCall->getArg(NumArgs-1); 7513 7514 if (OrigArg->isTypeDependent()) 7515 return false; 7516 7517 // Usual Unary Conversions will convert half to float, which we want for 7518 // machines that use fp16 conversion intrinsics. Else, we wnat to leave the 7519 // type how it is, but do normal L->Rvalue conversions. 7520 if (Context.getTargetInfo().useFP16ConversionIntrinsics()) 7521 OrigArg = UsualUnaryConversions(OrigArg).get(); 7522 else 7523 OrigArg = DefaultFunctionArrayLvalueConversion(OrigArg).get(); 7524 TheCall->setArg(NumArgs - 1, OrigArg); 7525 7526 // This operation requires a non-_Complex floating-point number. 7527 if (!OrigArg->getType()->isRealFloatingType()) 7528 return Diag(OrigArg->getBeginLoc(), 7529 diag::err_typecheck_call_invalid_unary_fp) 7530 << OrigArg->getType() << OrigArg->getSourceRange(); 7531 7532 return false; 7533} 7534 7535/// Perform semantic analysis for a call to __builtin_complex. 7536bool Sema::SemaBuiltinComplex(CallExpr *TheCall) { 7537 if (checkArgCount(*this, TheCall, 2)) 7538 return true; 7539 7540 bool Dependent = false; 7541 for (unsigned I = 0; I != 2; ++I) { 7542 Expr *Arg = TheCall->getArg(I); 7543 QualType T = Arg->getType(); 7544 if (T->isDependentType()) { 7545 Dependent = true; 7546 continue; 7547 } 7548 7549 // Despite supporting _Complex int, GCC requires a real floating point type 7550 // for the operands of __builtin_complex. 7551 if (!T->isRealFloatingType()) { 7552 return Diag(Arg->getBeginLoc(), diag::err_typecheck_call_requires_real_fp) 7553 << Arg->getType() << Arg->getSourceRange(); 7554 } 7555 7556 ExprResult Converted = DefaultLvalueConversion(Arg); 7557 if (Converted.isInvalid()) 7558 return true; 7559 TheCall->setArg(I, Converted.get()); 7560 } 7561 7562 if (Dependent) { 7563 TheCall->setType(Context.DependentTy); 7564 return false; 7565 } 7566 7567 Expr *Real = TheCall->getArg(0); 7568 Expr *Imag = TheCall->getArg(1); 7569 if (!Context.hasSameType(Real->getType(), Imag->getType())) { 7570 return Diag(Real->getBeginLoc(), 7571 diag::err_typecheck_call_different_arg_types) 7572 << Real->getType() << Imag->getType() 7573 << Real->getSourceRange() << Imag->getSourceRange(); 7574 } 7575 7576 // We don't allow _Complex _Float16 nor _Complex __fp16 as type specifiers; 7577 // don't allow this builtin to form those types either. 7578 // FIXME: Should we allow these types? 7579 if (Real->getType()->isFloat16Type()) 7580 return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec) 7581 << "_Float16"; 7582 if (Real->getType()->isHalfType()) 7583 return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec) 7584 << "half"; 7585 7586 TheCall->setType(Context.getComplexType(Real->getType())); 7587 return false; 7588} 7589 7590// Customized Sema Checking for VSX builtins that have the following signature: 7591// vector [...] builtinName(vector [...], vector [...], const int); 7592// Which takes the same type of vectors (any legal vector type) for the first 7593// two arguments and takes compile time constant for the third argument. 7594// Example builtins are : 7595// vector double vec_xxpermdi(vector double, vector double, int); 7596// vector short vec_xxsldwi(vector short, vector short, int); 7597bool Sema::SemaBuiltinVSX(CallExpr *TheCall) { 7598 unsigned ExpectedNumArgs = 3; 7599 if (checkArgCount(*this, TheCall, ExpectedNumArgs)) 7600 return true; 7601 7602 // Check the third argument is a compile time constant 7603 if (!TheCall->getArg(2)->isIntegerConstantExpr(Context)) 7604 return Diag(TheCall->getBeginLoc(), 7605 diag::err_vsx_builtin_nonconstant_argument) 7606 << 3 /* argument index */ << TheCall->getDirectCallee() 7607 << SourceRange(TheCall->getArg(2)->getBeginLoc(), 7608 TheCall->getArg(2)->getEndLoc()); 7609 7610 QualType Arg1Ty = TheCall->getArg(0)->getType(); 7611 QualType Arg2Ty = TheCall->getArg(1)->getType(); 7612 7613 // Check the type of argument 1 and argument 2 are vectors. 7614 SourceLocation BuiltinLoc = TheCall->getBeginLoc(); 7615 if ((!Arg1Ty->isVectorType() && !Arg1Ty->isDependentType()) || 7616 (!Arg2Ty->isVectorType() && !Arg2Ty->isDependentType())) { 7617 return Diag(BuiltinLoc, diag::err_vec_builtin_non_vector) 7618 << TheCall->getDirectCallee() 7619 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 7620 TheCall->getArg(1)->getEndLoc()); 7621 } 7622 7623 // Check the first two arguments are the same type. 7624 if (!Context.hasSameUnqualifiedType(Arg1Ty, Arg2Ty)) { 7625 return Diag(BuiltinLoc, diag::err_vec_builtin_incompatible_vector) 7626 << TheCall->getDirectCallee() 7627 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 7628 TheCall->getArg(1)->getEndLoc()); 7629 } 7630 7631 // When default clang type checking is turned off and the customized type 7632 // checking is used, the returning type of the function must be explicitly 7633 // set. Otherwise it is _Bool by default. 7634 TheCall->setType(Arg1Ty); 7635 7636 return false; 7637} 7638 7639/// SemaBuiltinShuffleVector - Handle __builtin_shufflevector. 7640// This is declared to take (...), so we have to check everything. 7641ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) { 7642 if (TheCall->getNumArgs() < 2) 7643 return ExprError(Diag(TheCall->getEndLoc(), 7644 diag::err_typecheck_call_too_few_args_at_least) 7645 << 0 /*function call*/ << 2 << TheCall->getNumArgs() 7646 << TheCall->getSourceRange()); 7647 7648 // Determine which of the following types of shufflevector we're checking: 7649 // 1) unary, vector mask: (lhs, mask) 7650 // 2) binary, scalar mask: (lhs, rhs, index, ..., index) 7651 QualType resType = TheCall->getArg(0)->getType(); 7652 unsigned numElements = 0; 7653 7654 if (!TheCall->getArg(0)->isTypeDependent() && 7655 !TheCall->getArg(1)->isTypeDependent()) { 7656 QualType LHSType = TheCall->getArg(0)->getType(); 7657 QualType RHSType = TheCall->getArg(1)->getType(); 7658 7659 if (!LHSType->isVectorType() || !RHSType->isVectorType()) 7660 return ExprError( 7661 Diag(TheCall->getBeginLoc(), diag::err_vec_builtin_non_vector) 7662 << TheCall->getDirectCallee() 7663 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 7664 TheCall->getArg(1)->getEndLoc())); 7665 7666 numElements = LHSType->castAs<VectorType>()->getNumElements(); 7667 unsigned numResElements = TheCall->getNumArgs() - 2; 7668 7669 // Check to see if we have a call with 2 vector arguments, the unary shuffle 7670 // with mask. If so, verify that RHS is an integer vector type with the 7671 // same number of elts as lhs. 7672 if (TheCall->getNumArgs() == 2) { 7673 if (!RHSType->hasIntegerRepresentation() || 7674 RHSType->castAs<VectorType>()->getNumElements() != numElements) 7675 return ExprError(Diag(TheCall->getBeginLoc(), 7676 diag::err_vec_builtin_incompatible_vector) 7677 << TheCall->getDirectCallee() 7678 << SourceRange(TheCall->getArg(1)->getBeginLoc(), 7679 TheCall->getArg(1)->getEndLoc())); 7680 } else if (!Context.hasSameUnqualifiedType(LHSType, RHSType)) { 7681 return ExprError(Diag(TheCall->getBeginLoc(), 7682 diag::err_vec_builtin_incompatible_vector) 7683 << TheCall->getDirectCallee() 7684 << SourceRange(TheCall->getArg(0)->getBeginLoc(), 7685 TheCall->getArg(1)->getEndLoc())); 7686 } else if (numElements != numResElements) { 7687 QualType eltType = LHSType->castAs<VectorType>()->getElementType(); 7688 resType = Context.getVectorType(eltType, numResElements, 7689 VectorType::GenericVector); 7690 } 7691 } 7692 7693 for (unsigned i = 2; i < TheCall->getNumArgs(); i++) { 7694 if (TheCall->getArg(i)->isTypeDependent() || 7695 TheCall->getArg(i)->isValueDependent()) 7696 continue; 7697 7698 std::optional<llvm::APSInt> Result; 7699 if (!(Result = TheCall->getArg(i)->getIntegerConstantExpr(Context))) 7700 return ExprError(Diag(TheCall->getBeginLoc(), 7701 diag::err_shufflevector_nonconstant_argument) 7702 << TheCall->getArg(i)->getSourceRange()); 7703 7704 // Allow -1 which will be translated to undef in the IR. 7705 if (Result->isSigned() && Result->isAllOnes()) 7706 continue; 7707 7708 if (Result->getActiveBits() > 64 || 7709 Result->getZExtValue() >= numElements * 2) 7710 return ExprError(Diag(TheCall->getBeginLoc(), 7711 diag::err_shufflevector_argument_too_large) 7712 << TheCall->getArg(i)->getSourceRange()); 7713 } 7714 7715 SmallVector<Expr*, 32> exprs; 7716 7717 for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; i++) { 7718 exprs.push_back(TheCall->getArg(i)); 7719 TheCall->setArg(i, nullptr); 7720 } 7721 7722 return new (Context) ShuffleVectorExpr(Context, exprs, resType, 7723 TheCall->getCallee()->getBeginLoc(), 7724 TheCall->getRParenLoc()); 7725} 7726 7727/// SemaConvertVectorExpr - Handle __builtin_convertvector 7728ExprResult Sema::SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, 7729 SourceLocation BuiltinLoc, 7730 SourceLocation RParenLoc) { 7731 ExprValueKind VK = VK_PRValue; 7732 ExprObjectKind OK = OK_Ordinary; 7733 QualType DstTy = TInfo->getType(); 7734 QualType SrcTy = E->getType(); 7735 7736 if (!SrcTy->isVectorType() && !SrcTy->isDependentType()) 7737 return ExprError(Diag(BuiltinLoc, 7738 diag::err_convertvector_non_vector) 7739 << E->getSourceRange()); 7740 if (!DstTy->isVectorType() && !DstTy->isDependentType()) 7741 return ExprError(Diag(BuiltinLoc, 7742 diag::err_convertvector_non_vector_type)); 7743 7744 if (!SrcTy->isDependentType() && !DstTy->isDependentType()) { 7745 unsigned SrcElts = SrcTy->castAs<VectorType>()->getNumElements(); 7746 unsigned DstElts = DstTy->castAs<VectorType>()->getNumElements(); 7747 if (SrcElts != DstElts) 7748 return ExprError(Diag(BuiltinLoc, 7749 diag::err_convertvector_incompatible_vector) 7750 << E->getSourceRange()); 7751 } 7752 7753 return new (Context) 7754 ConvertVectorExpr(E, TInfo, DstTy, VK, OK, BuiltinLoc, RParenLoc); 7755} 7756 7757/// SemaBuiltinPrefetch - Handle __builtin_prefetch. 7758// This is declared to take (const void*, ...) and can take two 7759// optional constant int args. 7760bool Sema::SemaBuiltinPrefetch(CallExpr *TheCall) { 7761 unsigned NumArgs = TheCall->getNumArgs(); 7762 7763 if (NumArgs > 3) 7764 return Diag(TheCall->getEndLoc(), 7765 diag::err_typecheck_call_too_many_args_at_most) 7766 << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange(); 7767 7768 // Argument 0 is checked for us and the remaining arguments must be 7769 // constant integers. 7770 for (unsigned i = 1; i != NumArgs; ++i) 7771 if (SemaBuiltinConstantArgRange(TheCall, i, 0, i == 1 ? 1 : 3)) 7772 return true; 7773 7774 return false; 7775} 7776 7777/// SemaBuiltinArithmeticFence - Handle __arithmetic_fence. 7778bool Sema::SemaBuiltinArithmeticFence(CallExpr *TheCall) { 7779 if (!Context.getTargetInfo().checkArithmeticFenceSupported()) 7780 return Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported) 7781 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 7782 if (checkArgCount(*this, TheCall, 1)) 7783 return true; 7784 Expr *Arg = TheCall->getArg(0); 7785 if (Arg->isInstantiationDependent()) 7786 return false; 7787 7788 QualType ArgTy = Arg->getType(); 7789 if (!ArgTy->hasFloatingRepresentation()) 7790 return Diag(TheCall->getEndLoc(), diag::err_typecheck_expect_flt_or_vector) 7791 << ArgTy; 7792 if (Arg->isLValue()) { 7793 ExprResult FirstArg = DefaultLvalueConversion(Arg); 7794 TheCall->setArg(0, FirstArg.get()); 7795 } 7796 TheCall->setType(TheCall->getArg(0)->getType()); 7797 return false; 7798} 7799 7800/// SemaBuiltinAssume - Handle __assume (MS Extension). 7801// __assume does not evaluate its arguments, and should warn if its argument 7802// has side effects. 7803bool Sema::SemaBuiltinAssume(CallExpr *TheCall) { 7804 Expr *Arg = TheCall->getArg(0); 7805 if (Arg->isInstantiationDependent()) return false; 7806 7807 if (Arg->HasSideEffects(Context)) 7808 Diag(Arg->getBeginLoc(), diag::warn_assume_side_effects) 7809 << Arg->getSourceRange() 7810 << cast<FunctionDecl>(TheCall->getCalleeDecl())->getIdentifier(); 7811 7812 return false; 7813} 7814 7815/// Handle __builtin_alloca_with_align. This is declared 7816/// as (size_t, size_t) where the second size_t must be a power of 2 greater 7817/// than 8. 7818bool Sema::SemaBuiltinAllocaWithAlign(CallExpr *TheCall) { 7819 // The alignment must be a constant integer. 7820 Expr *Arg = TheCall->getArg(1); 7821 7822 // We can't check the value of a dependent argument. 7823 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) { 7824 if (const auto *UE = 7825 dyn_cast<UnaryExprOrTypeTraitExpr>(Arg->IgnoreParenImpCasts())) 7826 if (UE->getKind() == UETT_AlignOf || 7827 UE->getKind() == UETT_PreferredAlignOf) 7828 Diag(TheCall->getBeginLoc(), diag::warn_alloca_align_alignof) 7829 << Arg->getSourceRange(); 7830 7831 llvm::APSInt Result = Arg->EvaluateKnownConstInt(Context); 7832 7833 if (!Result.isPowerOf2()) 7834 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 7835 << Arg->getSourceRange(); 7836 7837 if (Result < Context.getCharWidth()) 7838 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_small) 7839 << (unsigned)Context.getCharWidth() << Arg->getSourceRange(); 7840 7841 if (Result > std::numeric_limits<int32_t>::max()) 7842 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_big) 7843 << std::numeric_limits<int32_t>::max() << Arg->getSourceRange(); 7844 } 7845 7846 return false; 7847} 7848 7849/// Handle __builtin_assume_aligned. This is declared 7850/// as (const void*, size_t, ...) and can take one optional constant int arg. 7851bool Sema::SemaBuiltinAssumeAligned(CallExpr *TheCall) { 7852 if (checkArgCountRange(*this, TheCall, 2, 3)) 7853 return true; 7854 7855 unsigned NumArgs = TheCall->getNumArgs(); 7856 Expr *FirstArg = TheCall->getArg(0); 7857 7858 { 7859 ExprResult FirstArgResult = 7860 DefaultFunctionArrayLvalueConversion(FirstArg); 7861 if (FirstArgResult.isInvalid()) 7862 return true; 7863 TheCall->setArg(0, FirstArgResult.get()); 7864 } 7865 7866 // The alignment must be a constant integer. 7867 Expr *SecondArg = TheCall->getArg(1); 7868 7869 // We can't check the value of a dependent argument. 7870 if (!SecondArg->isValueDependent()) { 7871 llvm::APSInt Result; 7872 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 7873 return true; 7874 7875 if (!Result.isPowerOf2()) 7876 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two) 7877 << SecondArg->getSourceRange(); 7878 7879 if (Result > Sema::MaximumAlignment) 7880 Diag(TheCall->getBeginLoc(), diag::warn_assume_aligned_too_great) 7881 << SecondArg->getSourceRange() << Sema::MaximumAlignment; 7882 } 7883 7884 if (NumArgs > 2) { 7885 Expr *ThirdArg = TheCall->getArg(2); 7886 if (convertArgumentToType(*this, ThirdArg, Context.getSizeType())) 7887 return true; 7888 TheCall->setArg(2, ThirdArg); 7889 } 7890 7891 return false; 7892} 7893 7894bool Sema::SemaBuiltinOSLogFormat(CallExpr *TheCall) { 7895 unsigned BuiltinID = 7896 cast<FunctionDecl>(TheCall->getCalleeDecl())->getBuiltinID(); 7897 bool IsSizeCall = BuiltinID == Builtin::BI__builtin_os_log_format_buffer_size; 7898 7899 unsigned NumArgs = TheCall->getNumArgs(); 7900 unsigned NumRequiredArgs = IsSizeCall ? 1 : 2; 7901 if (NumArgs < NumRequiredArgs) { 7902 return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args) 7903 << 0 /* function call */ << NumRequiredArgs << NumArgs 7904 << TheCall->getSourceRange(); 7905 } 7906 if (NumArgs >= NumRequiredArgs + 0x100) { 7907 return Diag(TheCall->getEndLoc(), 7908 diag::err_typecheck_call_too_many_args_at_most) 7909 << 0 /* function call */ << (NumRequiredArgs + 0xff) << NumArgs 7910 << TheCall->getSourceRange(); 7911 } 7912 unsigned i = 0; 7913 7914 // For formatting call, check buffer arg. 7915 if (!IsSizeCall) { 7916 ExprResult Arg(TheCall->getArg(i)); 7917 InitializedEntity Entity = InitializedEntity::InitializeParameter( 7918 Context, Context.VoidPtrTy, false); 7919 Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); 7920 if (Arg.isInvalid()) 7921 return true; 7922 TheCall->setArg(i, Arg.get()); 7923 i++; 7924 } 7925 7926 // Check string literal arg. 7927 unsigned FormatIdx = i; 7928 { 7929 ExprResult Arg = CheckOSLogFormatStringArg(TheCall->getArg(i)); 7930 if (Arg.isInvalid()) 7931 return true; 7932 TheCall->setArg(i, Arg.get()); 7933 i++; 7934 } 7935 7936 // Make sure variadic args are scalar. 7937 unsigned FirstDataArg = i; 7938 while (i < NumArgs) { 7939 ExprResult Arg = DefaultVariadicArgumentPromotion( 7940 TheCall->getArg(i), VariadicFunction, nullptr); 7941 if (Arg.isInvalid()) 7942 return true; 7943 CharUnits ArgSize = Context.getTypeSizeInChars(Arg.get()->getType()); 7944 if (ArgSize.getQuantity() >= 0x100) { 7945 return Diag(Arg.get()->getEndLoc(), diag::err_os_log_argument_too_big) 7946 << i << (int)ArgSize.getQuantity() << 0xff 7947 << TheCall->getSourceRange(); 7948 } 7949 TheCall->setArg(i, Arg.get()); 7950 i++; 7951 } 7952 7953 // Check formatting specifiers. NOTE: We're only doing this for the non-size 7954 // call to avoid duplicate diagnostics. 7955 if (!IsSizeCall) { 7956 llvm::SmallBitVector CheckedVarArgs(NumArgs, false); 7957 ArrayRef<const Expr *> Args(TheCall->getArgs(), TheCall->getNumArgs()); 7958 bool Success = CheckFormatArguments( 7959 Args, FAPK_Variadic, FormatIdx, FirstDataArg, FST_OSLog, 7960 VariadicFunction, TheCall->getBeginLoc(), SourceRange(), 7961 CheckedVarArgs); 7962 if (!Success) 7963 return true; 7964 } 7965 7966 if (IsSizeCall) { 7967 TheCall->setType(Context.getSizeType()); 7968 } else { 7969 TheCall->setType(Context.VoidPtrTy); 7970 } 7971 return false; 7972} 7973 7974/// SemaBuiltinConstantArg - Handle a check if argument ArgNum of CallExpr 7975/// TheCall is a constant expression. 7976bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, 7977 llvm::APSInt &Result) { 7978 Expr *Arg = TheCall->getArg(ArgNum); 7979 DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); 7980 FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); 7981 7982 if (Arg->isTypeDependent() || Arg->isValueDependent()) return false; 7983 7984 std::optional<llvm::APSInt> R; 7985 if (!(R = Arg->getIntegerConstantExpr(Context))) 7986 return Diag(TheCall->getBeginLoc(), diag::err_constant_integer_arg_type) 7987 << FDecl->getDeclName() << Arg->getSourceRange(); 7988 Result = *R; 7989 return false; 7990} 7991 7992/// SemaBuiltinConstantArgRange - Handle a check if argument ArgNum of CallExpr 7993/// TheCall is a constant expression in the range [Low, High]. 7994bool Sema::SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, 7995 int Low, int High, bool RangeIsError) { 7996 if (isConstantEvaluated()) 7997 return false; 7998 llvm::APSInt Result; 7999 8000 // We can't check the value of a dependent argument. 8001 Expr *Arg = TheCall->getArg(ArgNum); 8002 if (Arg->isTypeDependent() || Arg->isValueDependent()) 8003 return false; 8004 8005 // Check constant-ness first. 8006 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 8007 return true; 8008 8009 if (Result.getSExtValue() < Low || Result.getSExtValue() > High) { 8010 if (RangeIsError) 8011 return Diag(TheCall->getBeginLoc(), diag::err_argument_invalid_range) 8012 << toString(Result, 10) << Low << High << Arg->getSourceRange(); 8013 else 8014 // Defer the warning until we know if the code will be emitted so that 8015 // dead code can ignore this. 8016 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall, 8017 PDiag(diag::warn_argument_invalid_range) 8018 << toString(Result, 10) << Low << High 8019 << Arg->getSourceRange()); 8020 } 8021 8022 return false; 8023} 8024 8025/// SemaBuiltinConstantArgMultiple - Handle a check if argument ArgNum of CallExpr 8026/// TheCall is a constant expression is a multiple of Num.. 8027bool Sema::SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, 8028 unsigned Num) { 8029 llvm::APSInt Result; 8030 8031 // We can't check the value of a dependent argument. 8032 Expr *Arg = TheCall->getArg(ArgNum); 8033 if (Arg->isTypeDependent() || Arg->isValueDependent()) 8034 return false; 8035 8036 // Check constant-ness first. 8037 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 8038 return true; 8039 8040 if (Result.getSExtValue() % Num != 0) 8041 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_multiple) 8042 << Num << Arg->getSourceRange(); 8043 8044 return false; 8045} 8046 8047/// SemaBuiltinConstantArgPower2 - Check if argument ArgNum of TheCall is a 8048/// constant expression representing a power of 2. 8049bool Sema::SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum) { 8050 llvm::APSInt Result; 8051 8052 // We can't check the value of a dependent argument. 8053 Expr *Arg = TheCall->getArg(ArgNum); 8054 if (Arg->isTypeDependent() || Arg->isValueDependent()) 8055 return false; 8056 8057 // Check constant-ness first. 8058 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 8059 return true; 8060 8061 // Bit-twiddling to test for a power of 2: for x > 0, x & (x-1) is zero if 8062 // and only if x is a power of 2. 8063 if (Result.isStrictlyPositive() && (Result & (Result - 1)) == 0) 8064 return false; 8065 8066 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_power_of_2) 8067 << Arg->getSourceRange(); 8068} 8069 8070static bool IsShiftedByte(llvm::APSInt Value) { 8071 if (Value.isNegative()) 8072 return false; 8073 8074 // Check if it's a shifted byte, by shifting it down 8075 while (true) { 8076 // If the value fits in the bottom byte, the check passes. 8077 if (Value < 0x100) 8078 return true; 8079 8080 // Otherwise, if the value has _any_ bits in the bottom byte, the check 8081 // fails. 8082 if ((Value & 0xFF) != 0) 8083 return false; 8084 8085 // If the bottom 8 bits are all 0, but something above that is nonzero, 8086 // then shifting the value right by 8 bits won't affect whether it's a 8087 // shifted byte or not. So do that, and go round again. 8088 Value >>= 8; 8089 } 8090} 8091 8092/// SemaBuiltinConstantArgShiftedByte - Check if argument ArgNum of TheCall is 8093/// a constant expression representing an arbitrary byte value shifted left by 8094/// a multiple of 8 bits. 8095bool Sema::SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum, 8096 unsigned ArgBits) { 8097 llvm::APSInt Result; 8098 8099 // We can't check the value of a dependent argument. 8100 Expr *Arg = TheCall->getArg(ArgNum); 8101 if (Arg->isTypeDependent() || Arg->isValueDependent()) 8102 return false; 8103 8104 // Check constant-ness first. 8105 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 8106 return true; 8107 8108 // Truncate to the given size. 8109 Result = Result.getLoBits(ArgBits); 8110 Result.setIsUnsigned(true); 8111 8112 if (IsShiftedByte(Result)) 8113 return false; 8114 8115 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_shifted_byte) 8116 << Arg->getSourceRange(); 8117} 8118 8119/// SemaBuiltinConstantArgShiftedByteOr0xFF - Check if argument ArgNum of 8120/// TheCall is a constant expression representing either a shifted byte value, 8121/// or a value of the form 0x??FF (i.e. a member of the arithmetic progression 8122/// 0x00FF, 0x01FF, ..., 0xFFFF). This strange range check is needed for some 8123/// Arm MVE intrinsics. 8124bool Sema::SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, 8125 int ArgNum, 8126 unsigned ArgBits) { 8127 llvm::APSInt Result; 8128 8129 // We can't check the value of a dependent argument. 8130 Expr *Arg = TheCall->getArg(ArgNum); 8131 if (Arg->isTypeDependent() || Arg->isValueDependent()) 8132 return false; 8133 8134 // Check constant-ness first. 8135 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) 8136 return true; 8137 8138 // Truncate to the given size. 8139 Result = Result.getLoBits(ArgBits); 8140 Result.setIsUnsigned(true); 8141 8142 // Check to see if it's in either of the required forms. 8143 if (IsShiftedByte(Result) || 8144 (Result > 0 && Result < 0x10000 && (Result & 0xFF) == 0xFF)) 8145 return false; 8146 8147 return Diag(TheCall->getBeginLoc(), 8148 diag::err_argument_not_shifted_byte_or_xxff) 8149 << Arg->getSourceRange(); 8150} 8151 8152/// SemaBuiltinARMMemoryTaggingCall - Handle calls of memory tagging extensions 8153bool Sema::SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall) { 8154 if (BuiltinID == AArch64::BI__builtin_arm_irg) { 8155 if (checkArgCount(*this, TheCall, 2)) 8156 return true; 8157 Expr *Arg0 = TheCall->getArg(0); 8158 Expr *Arg1 = TheCall->getArg(1); 8159 8160 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 8161 if (FirstArg.isInvalid()) 8162 return true; 8163 QualType FirstArgType = FirstArg.get()->getType(); 8164 if (!FirstArgType->isAnyPointerType()) 8165 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 8166 << "first" << FirstArgType << Arg0->getSourceRange(); 8167 TheCall->setArg(0, FirstArg.get()); 8168 8169 ExprResult SecArg = DefaultLvalueConversion(Arg1); 8170 if (SecArg.isInvalid()) 8171 return true; 8172 QualType SecArgType = SecArg.get()->getType(); 8173 if (!SecArgType->isIntegerType()) 8174 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 8175 << "second" << SecArgType << Arg1->getSourceRange(); 8176 8177 // Derive the return type from the pointer argument. 8178 TheCall->setType(FirstArgType); 8179 return false; 8180 } 8181 8182 if (BuiltinID == AArch64::BI__builtin_arm_addg) { 8183 if (checkArgCount(*this, TheCall, 2)) 8184 return true; 8185 8186 Expr *Arg0 = TheCall->getArg(0); 8187 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 8188 if (FirstArg.isInvalid()) 8189 return true; 8190 QualType FirstArgType = FirstArg.get()->getType(); 8191 if (!FirstArgType->isAnyPointerType()) 8192 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 8193 << "first" << FirstArgType << Arg0->getSourceRange(); 8194 TheCall->setArg(0, FirstArg.get()); 8195 8196 // Derive the return type from the pointer argument. 8197 TheCall->setType(FirstArgType); 8198 8199 // Second arg must be an constant in range [0,15] 8200 return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); 8201 } 8202 8203 if (BuiltinID == AArch64::BI__builtin_arm_gmi) { 8204 if (checkArgCount(*this, TheCall, 2)) 8205 return true; 8206 Expr *Arg0 = TheCall->getArg(0); 8207 Expr *Arg1 = TheCall->getArg(1); 8208 8209 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 8210 if (FirstArg.isInvalid()) 8211 return true; 8212 QualType FirstArgType = FirstArg.get()->getType(); 8213 if (!FirstArgType->isAnyPointerType()) 8214 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 8215 << "first" << FirstArgType << Arg0->getSourceRange(); 8216 8217 QualType SecArgType = Arg1->getType(); 8218 if (!SecArgType->isIntegerType()) 8219 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) 8220 << "second" << SecArgType << Arg1->getSourceRange(); 8221 TheCall->setType(Context.IntTy); 8222 return false; 8223 } 8224 8225 if (BuiltinID == AArch64::BI__builtin_arm_ldg || 8226 BuiltinID == AArch64::BI__builtin_arm_stg) { 8227 if (checkArgCount(*this, TheCall, 1)) 8228 return true; 8229 Expr *Arg0 = TheCall->getArg(0); 8230 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(Arg0); 8231 if (FirstArg.isInvalid()) 8232 return true; 8233 8234 QualType FirstArgType = FirstArg.get()->getType(); 8235 if (!FirstArgType->isAnyPointerType()) 8236 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) 8237 << "first" << FirstArgType << Arg0->getSourceRange(); 8238 TheCall->setArg(0, FirstArg.get()); 8239 8240 // Derive the return type from the pointer argument. 8241 if (BuiltinID == AArch64::BI__builtin_arm_ldg) 8242 TheCall->setType(FirstArgType); 8243 return false; 8244 } 8245 8246 if (BuiltinID == AArch64::BI__builtin_arm_subp) { 8247 Expr *ArgA = TheCall->getArg(0); 8248 Expr *ArgB = TheCall->getArg(1); 8249 8250 ExprResult ArgExprA = DefaultFunctionArrayLvalueConversion(ArgA); 8251 ExprResult ArgExprB = DefaultFunctionArrayLvalueConversion(ArgB); 8252 8253 if (ArgExprA.isInvalid() || ArgExprB.isInvalid()) 8254 return true; 8255 8256 QualType ArgTypeA = ArgExprA.get()->getType(); 8257 QualType ArgTypeB = ArgExprB.get()->getType(); 8258 8259 auto isNull = [&] (Expr *E) -> bool { 8260 return E->isNullPointerConstant( 8261 Context, Expr::NPC_ValueDependentIsNotNull); }; 8262 8263 // argument should be either a pointer or null 8264 if (!ArgTypeA->isAnyPointerType() && !isNull(ArgA)) 8265 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 8266 << "first" << ArgTypeA << ArgA->getSourceRange(); 8267 8268 if (!ArgTypeB->isAnyPointerType() && !isNull(ArgB)) 8269 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) 8270 << "second" << ArgTypeB << ArgB->getSourceRange(); 8271 8272 // Ensure Pointee types are compatible 8273 if (ArgTypeA->isAnyPointerType() && !isNull(ArgA) && 8274 ArgTypeB->isAnyPointerType() && !isNull(ArgB)) { 8275 QualType pointeeA = ArgTypeA->getPointeeType(); 8276 QualType pointeeB = ArgTypeB->getPointeeType(); 8277 if (!Context.typesAreCompatible( 8278 Context.getCanonicalType(pointeeA).getUnqualifiedType(), 8279 Context.getCanonicalType(pointeeB).getUnqualifiedType())) { 8280 return Diag(TheCall->getBeginLoc(), diag::err_typecheck_sub_ptr_compatible) 8281 << ArgTypeA << ArgTypeB << ArgA->getSourceRange() 8282 << ArgB->getSourceRange(); 8283 } 8284 } 8285 8286 // at least one argument should be pointer type 8287 if (!ArgTypeA->isAnyPointerType() && !ArgTypeB->isAnyPointerType()) 8288 return Diag(TheCall->getBeginLoc(), diag::err_memtag_any2arg_pointer) 8289 << ArgTypeA << ArgTypeB << ArgA->getSourceRange(); 8290 8291 if (isNull(ArgA)) // adopt type of the other pointer 8292 ArgExprA = ImpCastExprToType(ArgExprA.get(), ArgTypeB, CK_NullToPointer); 8293 8294 if (isNull(ArgB)) 8295 ArgExprB = ImpCastExprToType(ArgExprB.get(), ArgTypeA, CK_NullToPointer); 8296 8297 TheCall->setArg(0, ArgExprA.get()); 8298 TheCall->setArg(1, ArgExprB.get()); 8299 TheCall->setType(Context.LongLongTy); 8300 return false; 8301 } 8302 assert(false && "Unhandled ARM MTE intrinsic"); 8303 return true; 8304} 8305 8306/// SemaBuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr 8307/// TheCall is an ARM/AArch64 special register string literal. 8308bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, 8309 int ArgNum, unsigned ExpectedFieldNum, 8310 bool AllowName) { 8311 bool IsARMBuiltin = BuiltinID == ARM::BI__builtin_arm_rsr64 || 8312 BuiltinID == ARM::BI__builtin_arm_wsr64 || 8313 BuiltinID == ARM::BI__builtin_arm_rsr || 8314 BuiltinID == ARM::BI__builtin_arm_rsrp || 8315 BuiltinID == ARM::BI__builtin_arm_wsr || 8316 BuiltinID == ARM::BI__builtin_arm_wsrp; 8317 bool IsAArch64Builtin = BuiltinID == AArch64::BI__builtin_arm_rsr64 || 8318 BuiltinID == AArch64::BI__builtin_arm_wsr64 || 8319 BuiltinID == AArch64::BI__builtin_arm_rsr128 || 8320 BuiltinID == AArch64::BI__builtin_arm_wsr128 || 8321 BuiltinID == AArch64::BI__builtin_arm_rsr || 8322 BuiltinID == AArch64::BI__builtin_arm_rsrp || 8323 BuiltinID == AArch64::BI__builtin_arm_wsr || 8324 BuiltinID == AArch64::BI__builtin_arm_wsrp; 8325 assert((IsARMBuiltin || IsAArch64Builtin) && "Unexpected ARM builtin."); 8326 8327 // We can't check the value of a dependent argument. 8328 Expr *Arg = TheCall->getArg(ArgNum); 8329 if (Arg->isTypeDependent() || Arg->isValueDependent()) 8330 return false; 8331 8332 // Check if the argument is a string literal. 8333 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) 8334 return Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal) 8335 << Arg->getSourceRange(); 8336 8337 // Check the type of special register given. 8338 StringRef Reg = cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); 8339 SmallVector<StringRef, 6> Fields; 8340 Reg.split(Fields, ":"); 8341 8342 if (Fields.size() != ExpectedFieldNum && !(AllowName && Fields.size() == 1)) 8343 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 8344 << Arg->getSourceRange(); 8345 8346 // If the string is the name of a register then we cannot check that it is 8347 // valid here but if the string is of one the forms described in ACLE then we 8348 // can check that the supplied fields are integers and within the valid 8349 // ranges. 8350 if (Fields.size() > 1) { 8351 bool FiveFields = Fields.size() == 5; 8352 8353 bool ValidString = true; 8354 if (IsARMBuiltin) { 8355 ValidString &= Fields[0].startswith_insensitive("cp") || 8356 Fields[0].startswith_insensitive("p"); 8357 if (ValidString) 8358 Fields[0] = Fields[0].drop_front( 8359 Fields[0].startswith_insensitive("cp") ? 2 : 1); 8360 8361 ValidString &= Fields[2].startswith_insensitive("c"); 8362 if (ValidString) 8363 Fields[2] = Fields[2].drop_front(1); 8364 8365 if (FiveFields) { 8366 ValidString &= Fields[3].startswith_insensitive("c"); 8367 if (ValidString) 8368 Fields[3] = Fields[3].drop_front(1); 8369 } 8370 } 8371 8372 SmallVector<int, 5> Ranges; 8373 if (FiveFields) 8374 Ranges.append({IsAArch64Builtin ? 1 : 15, 7, 15, 15, 7}); 8375 else 8376 Ranges.append({15, 7, 15}); 8377 8378 for (unsigned i=0; i<Fields.size(); ++i) { 8379 int IntField; 8380 ValidString &= !Fields[i].getAsInteger(10, IntField); 8381 ValidString &= (IntField >= 0 && IntField <= Ranges[i]); 8382 } 8383 8384 if (!ValidString) 8385 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg) 8386 << Arg->getSourceRange(); 8387 } else if (IsAArch64Builtin && Fields.size() == 1) { 8388 // This code validates writes to PSTATE registers. 8389 8390 // Not a write. 8391 if (TheCall->getNumArgs() != 2) 8392 return false; 8393 8394 // The 128-bit system register accesses do not touch PSTATE. 8395 if (BuiltinID == AArch64::BI__builtin_arm_rsr128 || 8396 BuiltinID == AArch64::BI__builtin_arm_wsr128) 8397 return false; 8398 8399 // These are the named PSTATE accesses using "MSR (immediate)" instructions, 8400 // along with the upper limit on the immediates allowed. 8401 auto MaxLimit = llvm::StringSwitch<std::optional<unsigned>>(Reg) 8402 .CaseLower("spsel", 15) 8403 .CaseLower("daifclr", 15) 8404 .CaseLower("daifset", 15) 8405 .CaseLower("pan", 15) 8406 .CaseLower("uao", 15) 8407 .CaseLower("dit", 15) 8408 .CaseLower("ssbs", 15) 8409 .CaseLower("tco", 15) 8410 .CaseLower("allint", 1) 8411 .CaseLower("pm", 1) 8412 .Default(std::nullopt); 8413 8414 // If this is not a named PSTATE, just continue without validating, as this 8415 // will be lowered to an "MSR (register)" instruction directly 8416 if (!MaxLimit) 8417 return false; 8418 8419 // Here we only allow constants in the range for that pstate, as required by 8420 // the ACLE. 8421 // 8422 // While clang also accepts the names of system registers in its ACLE 8423 // intrinsics, we prevent this with the PSTATE names used in MSR (immediate) 8424 // as the value written via a register is different to the value used as an 8425 // immediate to have the same effect. e.g., for the instruction `msr tco, 8426 // x0`, it is bit 25 of register x0 that is written into PSTATE.TCO, but 8427 // with `msr tco, #imm`, it is bit 0 of xN that is written into PSTATE.TCO. 8428 // 8429 // If a programmer wants to codegen the MSR (register) form of `msr tco, 8430 // xN`, they can still do so by specifying the register using five 8431 // colon-separated numbers in a string. 8432 return SemaBuiltinConstantArgRange(TheCall, 1, 0, *MaxLimit); 8433 } 8434 8435 return false; 8436} 8437 8438/// SemaBuiltinPPCMMACall - Check the call to a PPC MMA builtin for validity. 8439/// Emit an error and return true on failure; return false on success. 8440/// TypeStr is a string containing the type descriptor of the value returned by 8441/// the builtin and the descriptors of the expected type of the arguments. 8442bool Sema::SemaBuiltinPPCMMACall(CallExpr *TheCall, unsigned BuiltinID, 8443 const char *TypeStr) { 8444 8445 assert((TypeStr[0] != '\0') && 8446 "Invalid types in PPC MMA builtin declaration"); 8447 8448 switch (BuiltinID) { 8449 default: 8450 // This function is called in CheckPPCBuiltinFunctionCall where the 8451 // BuiltinID is guaranteed to be an MMA or pair vector memop builtin, here 8452 // we are isolating the pair vector memop builtins that can be used with mma 8453 // off so the default case is every builtin that requires mma and paired 8454 // vector memops. 8455 if (SemaFeatureCheck(*this, TheCall, "paired-vector-memops", 8456 diag::err_ppc_builtin_only_on_arch, "10") || 8457 SemaFeatureCheck(*this, TheCall, "mma", 8458 diag::err_ppc_builtin_only_on_arch, "10")) 8459 return true; 8460 break; 8461 case PPC::BI__builtin_vsx_lxvp: 8462 case PPC::BI__builtin_vsx_stxvp: 8463 case PPC::BI__builtin_vsx_assemble_pair: 8464 case PPC::BI__builtin_vsx_disassemble_pair: 8465 if (SemaFeatureCheck(*this, TheCall, "paired-vector-memops", 8466 diag::err_ppc_builtin_only_on_arch, "10")) 8467 return true; 8468 break; 8469 } 8470 8471 unsigned Mask = 0; 8472 unsigned ArgNum = 0; 8473 8474 // The first type in TypeStr is the type of the value returned by the 8475 // builtin. So we first read that type and change the type of TheCall. 8476 QualType type = DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 8477 TheCall->setType(type); 8478 8479 while (*TypeStr != '\0') { 8480 Mask = 0; 8481 QualType ExpectedType = DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 8482 if (ArgNum >= TheCall->getNumArgs()) { 8483 ArgNum++; 8484 break; 8485 } 8486 8487 Expr *Arg = TheCall->getArg(ArgNum); 8488 QualType PassedType = Arg->getType(); 8489 QualType StrippedRVType = PassedType.getCanonicalType(); 8490 8491 // Strip Restrict/Volatile qualifiers. 8492 if (StrippedRVType.isRestrictQualified() || 8493 StrippedRVType.isVolatileQualified()) 8494 StrippedRVType = StrippedRVType.getCanonicalType().getUnqualifiedType(); 8495 8496 // The only case where the argument type and expected type are allowed to 8497 // mismatch is if the argument type is a non-void pointer (or array) and 8498 // expected type is a void pointer. 8499 if (StrippedRVType != ExpectedType) 8500 if (!(ExpectedType->isVoidPointerType() && 8501 (StrippedRVType->isPointerType() || StrippedRVType->isArrayType()))) 8502 return Diag(Arg->getBeginLoc(), 8503 diag::err_typecheck_convert_incompatible) 8504 << PassedType << ExpectedType << 1 << 0 << 0; 8505 8506 // If the value of the Mask is not 0, we have a constraint in the size of 8507 // the integer argument so here we ensure the argument is a constant that 8508 // is in the valid range. 8509 if (Mask != 0 && 8510 SemaBuiltinConstantArgRange(TheCall, ArgNum, 0, Mask, true)) 8511 return true; 8512 8513 ArgNum++; 8514 } 8515 8516 // In case we exited early from the previous loop, there are other types to 8517 // read from TypeStr. So we need to read them all to ensure we have the right 8518 // number of arguments in TheCall and if it is not the case, to display a 8519 // better error message. 8520 while (*TypeStr != '\0') { 8521 (void) DecodePPCMMATypeFromStr(Context, TypeStr, Mask); 8522 ArgNum++; 8523 } 8524 if (checkArgCount(*this, TheCall, ArgNum)) 8525 return true; 8526 8527 return false; 8528} 8529 8530/// SemaBuiltinLongjmp - Handle __builtin_longjmp(void *env[5], int val). 8531/// This checks that the target supports __builtin_longjmp and 8532/// that val is a constant 1. 8533bool Sema::SemaBuiltinLongjmp(CallExpr *TheCall) { 8534 if (!Context.getTargetInfo().hasSjLjLowering()) 8535 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_unsupported) 8536 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 8537 8538 Expr *Arg = TheCall->getArg(1); 8539 llvm::APSInt Result; 8540 8541 // TODO: This is less than ideal. Overload this to take a value. 8542 if (SemaBuiltinConstantArg(TheCall, 1, Result)) 8543 return true; 8544 8545 if (Result != 1) 8546 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_invalid_val) 8547 << SourceRange(Arg->getBeginLoc(), Arg->getEndLoc()); 8548 8549 return false; 8550} 8551 8552/// SemaBuiltinSetjmp - Handle __builtin_setjmp(void *env[5]). 8553/// This checks that the target supports __builtin_setjmp. 8554bool Sema::SemaBuiltinSetjmp(CallExpr *TheCall) { 8555 if (!Context.getTargetInfo().hasSjLjLowering()) 8556 return Diag(TheCall->getBeginLoc(), diag::err_builtin_setjmp_unsupported) 8557 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc()); 8558 return false; 8559} 8560 8561namespace { 8562 8563class UncoveredArgHandler { 8564 enum { Unknown = -1, AllCovered = -2 }; 8565 8566 signed FirstUncoveredArg = Unknown; 8567 SmallVector<const Expr *, 4> DiagnosticExprs; 8568 8569public: 8570 UncoveredArgHandler() = default; 8571 8572 bool hasUncoveredArg() const { 8573 return (FirstUncoveredArg >= 0); 8574 } 8575 8576 unsigned getUncoveredArg() const { 8577 assert(hasUncoveredArg() && "no uncovered argument"); 8578 return FirstUncoveredArg; 8579 } 8580 8581 void setAllCovered() { 8582 // A string has been found with all arguments covered, so clear out 8583 // the diagnostics. 8584 DiagnosticExprs.clear(); 8585 FirstUncoveredArg = AllCovered; 8586 } 8587 8588 void Update(signed NewFirstUncoveredArg, const Expr *StrExpr) { 8589 assert(NewFirstUncoveredArg >= 0 && "Outside range"); 8590 8591 // Don't update if a previous string covers all arguments. 8592 if (FirstUncoveredArg == AllCovered) 8593 return; 8594 8595 // UncoveredArgHandler tracks the highest uncovered argument index 8596 // and with it all the strings that match this index. 8597 if (NewFirstUncoveredArg == FirstUncoveredArg) 8598 DiagnosticExprs.push_back(StrExpr); 8599 else if (NewFirstUncoveredArg > FirstUncoveredArg) { 8600 DiagnosticExprs.clear(); 8601 DiagnosticExprs.push_back(StrExpr); 8602 FirstUncoveredArg = NewFirstUncoveredArg; 8603 } 8604 } 8605 8606 void Diagnose(Sema &S, bool IsFunctionCall, const Expr *ArgExpr); 8607}; 8608 8609enum StringLiteralCheckType { 8610 SLCT_NotALiteral, 8611 SLCT_UncheckedLiteral, 8612 SLCT_CheckedLiteral 8613}; 8614 8615} // namespace 8616 8617static void sumOffsets(llvm::APSInt &Offset, llvm::APSInt Addend, 8618 BinaryOperatorKind BinOpKind, 8619 bool AddendIsRight) { 8620 unsigned BitWidth = Offset.getBitWidth(); 8621 unsigned AddendBitWidth = Addend.getBitWidth(); 8622 // There might be negative interim results. 8623 if (Addend.isUnsigned()) { 8624 Addend = Addend.zext(++AddendBitWidth); 8625 Addend.setIsSigned(true); 8626 } 8627 // Adjust the bit width of the APSInts. 8628 if (AddendBitWidth > BitWidth) { 8629 Offset = Offset.sext(AddendBitWidth); 8630 BitWidth = AddendBitWidth; 8631 } else if (BitWidth > AddendBitWidth) { 8632 Addend = Addend.sext(BitWidth); 8633 } 8634 8635 bool Ov = false; 8636 llvm::APSInt ResOffset = Offset; 8637 if (BinOpKind == BO_Add) 8638 ResOffset = Offset.sadd_ov(Addend, Ov); 8639 else { 8640 assert(AddendIsRight && BinOpKind == BO_Sub && 8641 "operator must be add or sub with addend on the right"); 8642 ResOffset = Offset.ssub_ov(Addend, Ov); 8643 } 8644 8645 // We add an offset to a pointer here so we should support an offset as big as 8646 // possible. 8647 if (Ov) { 8648 assert(BitWidth <= std::numeric_limits<unsigned>::max() / 2 && 8649 "index (intermediate) result too big"); 8650 Offset = Offset.sext(2 * BitWidth); 8651 sumOffsets(Offset, Addend, BinOpKind, AddendIsRight); 8652 return; 8653 } 8654 8655 Offset = ResOffset; 8656} 8657 8658namespace { 8659 8660// This is a wrapper class around StringLiteral to support offsetted string 8661// literals as format strings. It takes the offset into account when returning 8662// the string and its length or the source locations to display notes correctly. 8663class FormatStringLiteral { 8664 const StringLiteral *FExpr; 8665 int64_t Offset; 8666 8667 public: 8668 FormatStringLiteral(const StringLiteral *fexpr, int64_t Offset = 0) 8669 : FExpr(fexpr), Offset(Offset) {} 8670 8671 StringRef getString() const { 8672 return FExpr->getString().drop_front(Offset); 8673 } 8674 8675 unsigned getByteLength() const { 8676 return FExpr->getByteLength() - getCharByteWidth() * Offset; 8677 } 8678 8679 unsigned getLength() const { return FExpr->getLength() - Offset; } 8680 unsigned getCharByteWidth() const { return FExpr->getCharByteWidth(); } 8681 8682 StringLiteral::StringKind getKind() const { return FExpr->getKind(); } 8683 8684 QualType getType() const { return FExpr->getType(); } 8685 8686 bool isAscii() const { return FExpr->isOrdinary(); } 8687 bool isWide() const { return FExpr->isWide(); } 8688 bool isUTF8() const { return FExpr->isUTF8(); } 8689 bool isUTF16() const { return FExpr->isUTF16(); } 8690 bool isUTF32() const { return FExpr->isUTF32(); } 8691 bool isPascal() const { return FExpr->isPascal(); } 8692 8693 SourceLocation getLocationOfByte( 8694 unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, 8695 const TargetInfo &Target, unsigned *StartToken = nullptr, 8696 unsigned *StartTokenByteOffset = nullptr) const { 8697 return FExpr->getLocationOfByte(ByteNo + Offset, SM, Features, Target, 8698 StartToken, StartTokenByteOffset); 8699 } 8700 8701 SourceLocation getBeginLoc() const LLVM_READONLY { 8702 return FExpr->getBeginLoc().getLocWithOffset(Offset); 8703 } 8704 8705 SourceLocation getEndLoc() const LLVM_READONLY { return FExpr->getEndLoc(); } 8706}; 8707 8708} // namespace 8709 8710static void CheckFormatString( 8711 Sema &S, const FormatStringLiteral *FExpr, const Expr *OrigFormatExpr, 8712 ArrayRef<const Expr *> Args, Sema::FormatArgumentPassingKind APK, 8713 unsigned format_idx, unsigned firstDataArg, Sema::FormatStringType Type, 8714 bool inFunctionCall, Sema::VariadicCallType CallType, 8715 llvm::SmallBitVector &CheckedVarArgs, UncoveredArgHandler &UncoveredArg, 8716 bool IgnoreStringsWithoutSpecifiers); 8717 8718static const Expr *maybeConstEvalStringLiteral(ASTContext &Context, 8719 const Expr *E); 8720 8721// Determine if an expression is a string literal or constant string. 8722// If this function returns false on the arguments to a function expecting a 8723// format string, we will usually need to emit a warning. 8724// True string literals are then checked by CheckFormatString. 8725static StringLiteralCheckType 8726checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args, 8727 Sema::FormatArgumentPassingKind APK, unsigned format_idx, 8728 unsigned firstDataArg, Sema::FormatStringType Type, 8729 Sema::VariadicCallType CallType, bool InFunctionCall, 8730 llvm::SmallBitVector &CheckedVarArgs, 8731 UncoveredArgHandler &UncoveredArg, llvm::APSInt Offset, 8732 bool IgnoreStringsWithoutSpecifiers = false) { 8733 if (S.isConstantEvaluated()) 8734 return SLCT_NotALiteral; 8735tryAgain: 8736 assert(Offset.isSigned() && "invalid offset"); 8737 8738 if (E->isTypeDependent() || E->isValueDependent()) 8739 return SLCT_NotALiteral; 8740 8741 E = E->IgnoreParenCasts(); 8742 8743 if (E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull)) 8744 // Technically -Wformat-nonliteral does not warn about this case. 8745 // The behavior of printf and friends in this case is implementation 8746 // dependent. Ideally if the format string cannot be null then 8747 // it should have a 'nonnull' attribute in the function prototype. 8748 return SLCT_UncheckedLiteral; 8749 8750 switch (E->getStmtClass()) { 8751 case Stmt::InitListExprClass: 8752 // Handle expressions like {"foobar"}. 8753 if (const clang::Expr *SLE = maybeConstEvalStringLiteral(S.Context, E)) { 8754 return checkFormatStringExpr(S, SLE, Args, APK, format_idx, firstDataArg, 8755 Type, CallType, /*InFunctionCall*/ false, 8756 CheckedVarArgs, UncoveredArg, Offset, 8757 IgnoreStringsWithoutSpecifiers); 8758 } 8759 return SLCT_NotALiteral; 8760 case Stmt::BinaryConditionalOperatorClass: 8761 case Stmt::ConditionalOperatorClass: { 8762 // The expression is a literal if both sub-expressions were, and it was 8763 // completely checked only if both sub-expressions were checked. 8764 const AbstractConditionalOperator *C = 8765 cast<AbstractConditionalOperator>(E); 8766 8767 // Determine whether it is necessary to check both sub-expressions, for 8768 // example, because the condition expression is a constant that can be 8769 // evaluated at compile time. 8770 bool CheckLeft = true, CheckRight = true; 8771 8772 bool Cond; 8773 if (C->getCond()->EvaluateAsBooleanCondition(Cond, S.getASTContext(), 8774 S.isConstantEvaluated())) { 8775 if (Cond) 8776 CheckRight = false; 8777 else 8778 CheckLeft = false; 8779 } 8780 8781 // We need to maintain the offsets for the right and the left hand side 8782 // separately to check if every possible indexed expression is a valid 8783 // string literal. They might have different offsets for different string 8784 // literals in the end. 8785 StringLiteralCheckType Left; 8786 if (!CheckLeft) 8787 Left = SLCT_UncheckedLiteral; 8788 else { 8789 Left = checkFormatStringExpr(S, C->getTrueExpr(), Args, APK, format_idx, 8790 firstDataArg, Type, CallType, InFunctionCall, 8791 CheckedVarArgs, UncoveredArg, Offset, 8792 IgnoreStringsWithoutSpecifiers); 8793 if (Left == SLCT_NotALiteral || !CheckRight) { 8794 return Left; 8795 } 8796 } 8797 8798 StringLiteralCheckType Right = checkFormatStringExpr( 8799 S, C->getFalseExpr(), Args, APK, format_idx, firstDataArg, Type, 8800 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 8801 IgnoreStringsWithoutSpecifiers); 8802 8803 return (CheckLeft && Left < Right) ? Left : Right; 8804 } 8805 8806 case Stmt::ImplicitCastExprClass: 8807 E = cast<ImplicitCastExpr>(E)->getSubExpr(); 8808 goto tryAgain; 8809 8810 case Stmt::OpaqueValueExprClass: 8811 if (const Expr *src = cast<OpaqueValueExpr>(E)->getSourceExpr()) { 8812 E = src; 8813 goto tryAgain; 8814 } 8815 return SLCT_NotALiteral; 8816 8817 case Stmt::PredefinedExprClass: 8818 // While __func__, etc., are technically not string literals, they 8819 // cannot contain format specifiers and thus are not a security 8820 // liability. 8821 return SLCT_UncheckedLiteral; 8822 8823 case Stmt::DeclRefExprClass: { 8824 const DeclRefExpr *DR = cast<DeclRefExpr>(E); 8825 8826 // As an exception, do not flag errors for variables binding to 8827 // const string literals. 8828 if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) { 8829 bool isConstant = false; 8830 QualType T = DR->getType(); 8831 8832 if (const ArrayType *AT = S.Context.getAsArrayType(T)) { 8833 isConstant = AT->getElementType().isConstant(S.Context); 8834 } else if (const PointerType *PT = T->getAs<PointerType>()) { 8835 isConstant = T.isConstant(S.Context) && 8836 PT->getPointeeType().isConstant(S.Context); 8837 } else if (T->isObjCObjectPointerType()) { 8838 // In ObjC, there is usually no "const ObjectPointer" type, 8839 // so don't check if the pointee type is constant. 8840 isConstant = T.isConstant(S.Context); 8841 } 8842 8843 if (isConstant) { 8844 if (const Expr *Init = VD->getAnyInitializer()) { 8845 // Look through initializers like const char c[] = { "foo" } 8846 if (const InitListExpr *InitList = dyn_cast<InitListExpr>(Init)) { 8847 if (InitList->isStringLiteralInit()) 8848 Init = InitList->getInit(0)->IgnoreParenImpCasts(); 8849 } 8850 return checkFormatStringExpr( 8851 S, Init, Args, APK, format_idx, firstDataArg, Type, CallType, 8852 /*InFunctionCall*/ false, CheckedVarArgs, UncoveredArg, Offset); 8853 } 8854 } 8855 8856 // When the format argument is an argument of this function, and this 8857 // function also has the format attribute, there are several interactions 8858 // for which there shouldn't be a warning. For instance, when calling 8859 // v*printf from a function that has the printf format attribute, we 8860 // should not emit a warning about using `fmt`, even though it's not 8861 // constant, because the arguments have already been checked for the 8862 // caller of `logmessage`: 8863 // 8864 // __attribute__((format(printf, 1, 2))) 8865 // void logmessage(char const *fmt, ...) { 8866 // va_list ap; 8867 // va_start(ap, fmt); 8868 // vprintf(fmt, ap); /* do not emit a warning about "fmt" */ 8869 // ... 8870 // } 8871 // 8872 // Another interaction that we need to support is calling a variadic 8873 // format function from a format function that has fixed arguments. For 8874 // instance: 8875 // 8876 // __attribute__((format(printf, 1, 2))) 8877 // void logstring(char const *fmt, char const *str) { 8878 // printf(fmt, str); /* do not emit a warning about "fmt" */ 8879 // } 8880 // 8881 // Same (and perhaps more relatably) for the variadic template case: 8882 // 8883 // template<typename... Args> 8884 // __attribute__((format(printf, 1, 2))) 8885 // void log(const char *fmt, Args&&... args) { 8886 // printf(fmt, forward<Args>(args)...); 8887 // /* do not emit a warning about "fmt" */ 8888 // } 8889 // 8890 // Due to implementation difficulty, we only check the format, not the 8891 // format arguments, in all cases. 8892 // 8893 if (const auto *PV = dyn_cast<ParmVarDecl>(VD)) { 8894 if (const auto *D = dyn_cast<Decl>(PV->getDeclContext())) { 8895 for (const auto *PVFormat : D->specific_attrs<FormatAttr>()) { 8896 bool IsCXXMember = false; 8897 if (const auto *MD = dyn_cast<CXXMethodDecl>(D)) 8898 IsCXXMember = MD->isInstance(); 8899 8900 bool IsVariadic = false; 8901 if (const FunctionType *FnTy = D->getFunctionType()) 8902 IsVariadic = cast<FunctionProtoType>(FnTy)->isVariadic(); 8903 else if (const auto *BD = dyn_cast<BlockDecl>(D)) 8904 IsVariadic = BD->isVariadic(); 8905 else if (const auto *OMD = dyn_cast<ObjCMethodDecl>(D)) 8906 IsVariadic = OMD->isVariadic(); 8907 8908 Sema::FormatStringInfo CallerFSI; 8909 if (Sema::getFormatStringInfo(PVFormat, IsCXXMember, IsVariadic, 8910 &CallerFSI)) { 8911 // We also check if the formats are compatible. 8912 // We can't pass a 'scanf' string to a 'printf' function. 8913 if (PV->getFunctionScopeIndex() == CallerFSI.FormatIdx && 8914 Type == S.GetFormatStringType(PVFormat)) { 8915 // Lastly, check that argument passing kinds transition in a 8916 // way that makes sense: 8917 // from a caller with FAPK_VAList, allow FAPK_VAList 8918 // from a caller with FAPK_Fixed, allow FAPK_Fixed 8919 // from a caller with FAPK_Fixed, allow FAPK_Variadic 8920 // from a caller with FAPK_Variadic, allow FAPK_VAList 8921 switch (combineFAPK(CallerFSI.ArgPassingKind, APK)) { 8922 case combineFAPK(Sema::FAPK_VAList, Sema::FAPK_VAList): 8923 case combineFAPK(Sema::FAPK_Fixed, Sema::FAPK_Fixed): 8924 case combineFAPK(Sema::FAPK_Fixed, Sema::FAPK_Variadic): 8925 case combineFAPK(Sema::FAPK_Variadic, Sema::FAPK_VAList): 8926 return SLCT_UncheckedLiteral; 8927 } 8928 } 8929 } 8930 } 8931 } 8932 } 8933 } 8934 8935 return SLCT_NotALiteral; 8936 } 8937 8938 case Stmt::CallExprClass: 8939 case Stmt::CXXMemberCallExprClass: { 8940 const CallExpr *CE = cast<CallExpr>(E); 8941 if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(CE->getCalleeDecl())) { 8942 bool IsFirst = true; 8943 StringLiteralCheckType CommonResult; 8944 for (const auto *FA : ND->specific_attrs<FormatArgAttr>()) { 8945 const Expr *Arg = CE->getArg(FA->getFormatIdx().getASTIndex()); 8946 StringLiteralCheckType Result = checkFormatStringExpr( 8947 S, Arg, Args, APK, format_idx, firstDataArg, Type, CallType, 8948 InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 8949 IgnoreStringsWithoutSpecifiers); 8950 if (IsFirst) { 8951 CommonResult = Result; 8952 IsFirst = false; 8953 } 8954 } 8955 if (!IsFirst) 8956 return CommonResult; 8957 8958 if (const auto *FD = dyn_cast<FunctionDecl>(ND)) { 8959 unsigned BuiltinID = FD->getBuiltinID(); 8960 if (BuiltinID == Builtin::BI__builtin___CFStringMakeConstantString || 8961 BuiltinID == Builtin::BI__builtin___NSStringMakeConstantString) { 8962 const Expr *Arg = CE->getArg(0); 8963 return checkFormatStringExpr( 8964 S, Arg, Args, APK, format_idx, firstDataArg, Type, CallType, 8965 InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 8966 IgnoreStringsWithoutSpecifiers); 8967 } 8968 } 8969 } 8970 if (const Expr *SLE = maybeConstEvalStringLiteral(S.Context, E)) 8971 return checkFormatStringExpr(S, SLE, Args, APK, format_idx, firstDataArg, 8972 Type, CallType, /*InFunctionCall*/ false, 8973 CheckedVarArgs, UncoveredArg, Offset, 8974 IgnoreStringsWithoutSpecifiers); 8975 return SLCT_NotALiteral; 8976 } 8977 case Stmt::ObjCMessageExprClass: { 8978 const auto *ME = cast<ObjCMessageExpr>(E); 8979 if (const auto *MD = ME->getMethodDecl()) { 8980 if (const auto *FA = MD->getAttr<FormatArgAttr>()) { 8981 // As a special case heuristic, if we're using the method -[NSBundle 8982 // localizedStringForKey:value:table:], ignore any key strings that lack 8983 // format specifiers. The idea is that if the key doesn't have any 8984 // format specifiers then its probably just a key to map to the 8985 // localized strings. If it does have format specifiers though, then its 8986 // likely that the text of the key is the format string in the 8987 // programmer's language, and should be checked. 8988 const ObjCInterfaceDecl *IFace; 8989 if (MD->isInstanceMethod() && (IFace = MD->getClassInterface()) && 8990 IFace->getIdentifier()->isStr("NSBundle") && 8991 MD->getSelector().isKeywordSelector( 8992 {"localizedStringForKey", "value", "table"})) { 8993 IgnoreStringsWithoutSpecifiers = true; 8994 } 8995 8996 const Expr *Arg = ME->getArg(FA->getFormatIdx().getASTIndex()); 8997 return checkFormatStringExpr( 8998 S, Arg, Args, APK, format_idx, firstDataArg, Type, CallType, 8999 InFunctionCall, CheckedVarArgs, UncoveredArg, Offset, 9000 IgnoreStringsWithoutSpecifiers); 9001 } 9002 } 9003 9004 return SLCT_NotALiteral; 9005 } 9006 case Stmt::ObjCStringLiteralClass: 9007 case Stmt::StringLiteralClass: { 9008 const StringLiteral *StrE = nullptr; 9009 9010 if (const ObjCStringLiteral *ObjCFExpr = dyn_cast<ObjCStringLiteral>(E)) 9011 StrE = ObjCFExpr->getString(); 9012 else 9013 StrE = cast<StringLiteral>(E); 9014 9015 if (StrE) { 9016 if (Offset.isNegative() || Offset > StrE->getLength()) { 9017 // TODO: It would be better to have an explicit warning for out of 9018 // bounds literals. 9019 return SLCT_NotALiteral; 9020 } 9021 FormatStringLiteral FStr(StrE, Offset.sextOrTrunc(64).getSExtValue()); 9022 CheckFormatString(S, &FStr, E, Args, APK, format_idx, firstDataArg, Type, 9023 InFunctionCall, CallType, CheckedVarArgs, UncoveredArg, 9024 IgnoreStringsWithoutSpecifiers); 9025 return SLCT_CheckedLiteral; 9026 } 9027 9028 return SLCT_NotALiteral; 9029 } 9030 case Stmt::BinaryOperatorClass: { 9031 const BinaryOperator *BinOp = cast<BinaryOperator>(E); 9032 9033 // A string literal + an int offset is still a string literal. 9034 if (BinOp->isAdditiveOp()) { 9035 Expr::EvalResult LResult, RResult; 9036 9037 bool LIsInt = BinOp->getLHS()->EvaluateAsInt( 9038 LResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated()); 9039 bool RIsInt = BinOp->getRHS()->EvaluateAsInt( 9040 RResult, S.Context, Expr::SE_NoSideEffects, S.isConstantEvaluated()); 9041 9042 if (LIsInt != RIsInt) { 9043 BinaryOperatorKind BinOpKind = BinOp->getOpcode(); 9044 9045 if (LIsInt) { 9046 if (BinOpKind == BO_Add) { 9047 sumOffsets(Offset, LResult.Val.getInt(), BinOpKind, RIsInt); 9048 E = BinOp->getRHS(); 9049 goto tryAgain; 9050 } 9051 } else { 9052 sumOffsets(Offset, RResult.Val.getInt(), BinOpKind, RIsInt); 9053 E = BinOp->getLHS(); 9054 goto tryAgain; 9055 } 9056 } 9057 } 9058 9059 return SLCT_NotALiteral; 9060 } 9061 case Stmt::UnaryOperatorClass: { 9062 const UnaryOperator *UnaOp = cast<UnaryOperator>(E); 9063 auto ASE = dyn_cast<ArraySubscriptExpr>(UnaOp->getSubExpr()); 9064 if (UnaOp->getOpcode() == UO_AddrOf && ASE) { 9065 Expr::EvalResult IndexResult; 9066 if (ASE->getRHS()->EvaluateAsInt(IndexResult, S.Context, 9067 Expr::SE_NoSideEffects, 9068 S.isConstantEvaluated())) { 9069 sumOffsets(Offset, IndexResult.Val.getInt(), BO_Add, 9070 /*RHS is int*/ true); 9071 E = ASE->getBase(); 9072 goto tryAgain; 9073 } 9074 } 9075 9076 return SLCT_NotALiteral; 9077 } 9078 9079 default: 9080 return SLCT_NotALiteral; 9081 } 9082} 9083 9084// If this expression can be evaluated at compile-time, 9085// check if the result is a StringLiteral and return it 9086// otherwise return nullptr 9087static const Expr *maybeConstEvalStringLiteral(ASTContext &Context, 9088 const Expr *E) { 9089 Expr::EvalResult Result; 9090 if (E->EvaluateAsRValue(Result, Context) && Result.Val.isLValue()) { 9091 const auto *LVE = Result.Val.getLValueBase().dyn_cast<const Expr *>(); 9092 if (isa_and_nonnull<StringLiteral>(LVE)) 9093 return LVE; 9094 } 9095 return nullptr; 9096} 9097 9098Sema::FormatStringType Sema::GetFormatStringType(const FormatAttr *Format) { 9099 return llvm::StringSwitch<FormatStringType>(Format->getType()->getName()) 9100 .Case("scanf", FST_Scanf) 9101 .Cases("printf", "printf0", "syslog", FST_Printf) 9102 .Cases("NSString", "CFString", FST_NSString) 9103 .Case("strftime", FST_Strftime) 9104 .Case("strfmon", FST_Strfmon) 9105 .Cases("kprintf", "cmn_err", "vcmn_err", "zcmn_err", FST_Kprintf) 9106 .Case("freebsd_kprintf", FST_FreeBSDKPrintf) 9107 .Case("os_trace", FST_OSLog) 9108 .Case("os_log", FST_OSLog) 9109 .Default(FST_Unknown); 9110} 9111 9112/// CheckFormatArguments - Check calls to printf and scanf (and similar 9113/// functions) for correct use of format strings. 9114/// Returns true if a format string has been fully checked. 9115bool Sema::CheckFormatArguments(const FormatAttr *Format, 9116 ArrayRef<const Expr *> Args, bool IsCXXMember, 9117 VariadicCallType CallType, SourceLocation Loc, 9118 SourceRange Range, 9119 llvm::SmallBitVector &CheckedVarArgs) { 9120 FormatStringInfo FSI; 9121 if (getFormatStringInfo(Format, IsCXXMember, CallType != VariadicDoesNotApply, 9122 &FSI)) 9123 return CheckFormatArguments(Args, FSI.ArgPassingKind, FSI.FormatIdx, 9124 FSI.FirstDataArg, GetFormatStringType(Format), 9125 CallType, Loc, Range, CheckedVarArgs); 9126 return false; 9127} 9128 9129bool Sema::CheckFormatArguments(ArrayRef<const Expr *> Args, 9130 Sema::FormatArgumentPassingKind APK, 9131 unsigned format_idx, unsigned firstDataArg, 9132 FormatStringType Type, 9133 VariadicCallType CallType, SourceLocation Loc, 9134 SourceRange Range, 9135 llvm::SmallBitVector &CheckedVarArgs) { 9136 // CHECK: printf/scanf-like function is called with no format string. 9137 if (format_idx >= Args.size()) { 9138 Diag(Loc, diag::warn_missing_format_string) << Range; 9139 return false; 9140 } 9141 9142 const Expr *OrigFormatExpr = Args[format_idx]->IgnoreParenCasts(); 9143 9144 // CHECK: format string is not a string literal. 9145 // 9146 // Dynamically generated format strings are difficult to 9147 // automatically vet at compile time. Requiring that format strings 9148 // are string literals: (1) permits the checking of format strings by 9149 // the compiler and thereby (2) can practically remove the source of 9150 // many format string exploits. 9151 9152 // Format string can be either ObjC string (e.g. @"%d") or 9153 // C string (e.g. "%d") 9154 // ObjC string uses the same format specifiers as C string, so we can use 9155 // the same format string checking logic for both ObjC and C strings. 9156 UncoveredArgHandler UncoveredArg; 9157 StringLiteralCheckType CT = checkFormatStringExpr( 9158 *this, OrigFormatExpr, Args, APK, format_idx, firstDataArg, Type, 9159 CallType, 9160 /*IsFunctionCall*/ true, CheckedVarArgs, UncoveredArg, 9161 /*no string offset*/ llvm::APSInt(64, false) = 0); 9162 9163 // Generate a diagnostic where an uncovered argument is detected. 9164 if (UncoveredArg.hasUncoveredArg()) { 9165 unsigned ArgIdx = UncoveredArg.getUncoveredArg() + firstDataArg; 9166 assert(ArgIdx < Args.size() && "ArgIdx outside bounds"); 9167 UncoveredArg.Diagnose(*this, /*IsFunctionCall*/true, Args[ArgIdx]); 9168 } 9169 9170 if (CT != SLCT_NotALiteral) 9171 // Literal format string found, check done! 9172 return CT == SLCT_CheckedLiteral; 9173 9174 // Strftime is particular as it always uses a single 'time' argument, 9175 // so it is safe to pass a non-literal string. 9176 if (Type == FST_Strftime) 9177 return false; 9178 9179 // Do not emit diag when the string param is a macro expansion and the 9180 // format is either NSString or CFString. This is a hack to prevent 9181 // diag when using the NSLocalizedString and CFCopyLocalizedString macros 9182 // which are usually used in place of NS and CF string literals. 9183 SourceLocation FormatLoc = Args[format_idx]->getBeginLoc(); 9184 if (Type == FST_NSString && SourceMgr.isInSystemMacro(FormatLoc)) 9185 return false; 9186 9187 // If there are no arguments specified, warn with -Wformat-security, otherwise 9188 // warn only with -Wformat-nonliteral. 9189 if (Args.size() == firstDataArg) { 9190 Diag(FormatLoc, diag::warn_format_nonliteral_noargs) 9191 << OrigFormatExpr->getSourceRange(); 9192 switch (Type) { 9193 default: 9194 break; 9195 case FST_Kprintf: 9196 case FST_FreeBSDKPrintf: 9197 case FST_Printf: 9198 case FST_Syslog: 9199 Diag(FormatLoc, diag::note_format_security_fixit) 9200 << FixItHint::CreateInsertion(FormatLoc, "\"%s\", "); 9201 break; 9202 case FST_NSString: 9203 Diag(FormatLoc, diag::note_format_security_fixit) 9204 << FixItHint::CreateInsertion(FormatLoc, "@\"%@\", "); 9205 break; 9206 } 9207 } else { 9208 Diag(FormatLoc, diag::warn_format_nonliteral) 9209 << OrigFormatExpr->getSourceRange(); 9210 } 9211 return false; 9212} 9213 9214namespace { 9215 9216class CheckFormatHandler : public analyze_format_string::FormatStringHandler { 9217protected: 9218 Sema &S; 9219 const FormatStringLiteral *FExpr; 9220 const Expr *OrigFormatExpr; 9221 const Sema::FormatStringType FSType; 9222 const unsigned FirstDataArg; 9223 const unsigned NumDataArgs; 9224 const char *Beg; // Start of format string. 9225 const Sema::FormatArgumentPassingKind ArgPassingKind; 9226 ArrayRef<const Expr *> Args; 9227 unsigned FormatIdx; 9228 llvm::SmallBitVector CoveredArgs; 9229 bool usesPositionalArgs = false; 9230 bool atFirstArg = true; 9231 bool inFunctionCall; 9232 Sema::VariadicCallType CallType; 9233 llvm::SmallBitVector &CheckedVarArgs; 9234 UncoveredArgHandler &UncoveredArg; 9235 9236public: 9237 CheckFormatHandler(Sema &s, const FormatStringLiteral *fexpr, 9238 const Expr *origFormatExpr, 9239 const Sema::FormatStringType type, unsigned firstDataArg, 9240 unsigned numDataArgs, const char *beg, 9241 Sema::FormatArgumentPassingKind APK, 9242 ArrayRef<const Expr *> Args, unsigned formatIdx, 9243 bool inFunctionCall, Sema::VariadicCallType callType, 9244 llvm::SmallBitVector &CheckedVarArgs, 9245 UncoveredArgHandler &UncoveredArg) 9246 : S(s), FExpr(fexpr), OrigFormatExpr(origFormatExpr), FSType(type), 9247 FirstDataArg(firstDataArg), NumDataArgs(numDataArgs), Beg(beg), 9248 ArgPassingKind(APK), Args(Args), FormatIdx(formatIdx), 9249 inFunctionCall(inFunctionCall), CallType(callType), 9250 CheckedVarArgs(CheckedVarArgs), UncoveredArg(UncoveredArg) { 9251 CoveredArgs.resize(numDataArgs); 9252 CoveredArgs.reset(); 9253 } 9254 9255 void DoneProcessing(); 9256 9257 void HandleIncompleteSpecifier(const char *startSpecifier, 9258 unsigned specifierLen) override; 9259 9260 void HandleInvalidLengthModifier( 9261 const analyze_format_string::FormatSpecifier &FS, 9262 const analyze_format_string::ConversionSpecifier &CS, 9263 const char *startSpecifier, unsigned specifierLen, 9264 unsigned DiagID); 9265 9266 void HandleNonStandardLengthModifier( 9267 const analyze_format_string::FormatSpecifier &FS, 9268 const char *startSpecifier, unsigned specifierLen); 9269 9270 void HandleNonStandardConversionSpecifier( 9271 const analyze_format_string::ConversionSpecifier &CS, 9272 const char *startSpecifier, unsigned specifierLen); 9273 9274 void HandlePosition(const char *startPos, unsigned posLen) override; 9275 9276 void HandleInvalidPosition(const char *startSpecifier, 9277 unsigned specifierLen, 9278 analyze_format_string::PositionContext p) override; 9279 9280 void HandleZeroPosition(const char *startPos, unsigned posLen) override; 9281 9282 void HandleNullChar(const char *nullCharacter) override; 9283 9284 template <typename Range> 9285 static void 9286 EmitFormatDiagnostic(Sema &S, bool inFunctionCall, const Expr *ArgumentExpr, 9287 const PartialDiagnostic &PDiag, SourceLocation StringLoc, 9288 bool IsStringLocation, Range StringRange, 9289 ArrayRef<FixItHint> Fixit = std::nullopt); 9290 9291protected: 9292 bool HandleInvalidConversionSpecifier(unsigned argIndex, SourceLocation Loc, 9293 const char *startSpec, 9294 unsigned specifierLen, 9295 const char *csStart, unsigned csLen); 9296 9297 void HandlePositionalNonpositionalArgs(SourceLocation Loc, 9298 const char *startSpec, 9299 unsigned specifierLen); 9300 9301 SourceRange getFormatStringRange(); 9302 CharSourceRange getSpecifierRange(const char *startSpecifier, 9303 unsigned specifierLen); 9304 SourceLocation getLocationOfByte(const char *x); 9305 9306 const Expr *getDataArg(unsigned i) const; 9307 9308 bool CheckNumArgs(const analyze_format_string::FormatSpecifier &FS, 9309 const analyze_format_string::ConversionSpecifier &CS, 9310 const char *startSpecifier, unsigned specifierLen, 9311 unsigned argIndex); 9312 9313 template <typename Range> 9314 void EmitFormatDiagnostic(PartialDiagnostic PDiag, SourceLocation StringLoc, 9315 bool IsStringLocation, Range StringRange, 9316 ArrayRef<FixItHint> Fixit = std::nullopt); 9317}; 9318 9319} // namespace 9320 9321SourceRange CheckFormatHandler::getFormatStringRange() { 9322 return OrigFormatExpr->getSourceRange(); 9323} 9324 9325CharSourceRange CheckFormatHandler:: 9326getSpecifierRange(const char *startSpecifier, unsigned specifierLen) { 9327 SourceLocation Start = getLocationOfByte(startSpecifier); 9328 SourceLocation End = getLocationOfByte(startSpecifier + specifierLen - 1); 9329 9330 // Advance the end SourceLocation by one due to half-open ranges. 9331 End = End.getLocWithOffset(1); 9332 9333 return CharSourceRange::getCharRange(Start, End); 9334} 9335 9336SourceLocation CheckFormatHandler::getLocationOfByte(const char *x) { 9337 return FExpr->getLocationOfByte(x - Beg, S.getSourceManager(), 9338 S.getLangOpts(), S.Context.getTargetInfo()); 9339} 9340 9341void CheckFormatHandler::HandleIncompleteSpecifier(const char *startSpecifier, 9342 unsigned specifierLen){ 9343 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_incomplete_specifier), 9344 getLocationOfByte(startSpecifier), 9345 /*IsStringLocation*/true, 9346 getSpecifierRange(startSpecifier, specifierLen)); 9347} 9348 9349void CheckFormatHandler::HandleInvalidLengthModifier( 9350 const analyze_format_string::FormatSpecifier &FS, 9351 const analyze_format_string::ConversionSpecifier &CS, 9352 const char *startSpecifier, unsigned specifierLen, unsigned DiagID) { 9353 using namespace analyze_format_string; 9354 9355 const LengthModifier &LM = FS.getLengthModifier(); 9356 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 9357 9358 // See if we know how to fix this length modifier. 9359 std::optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 9360 if (FixedLM) { 9361 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 9362 getLocationOfByte(LM.getStart()), 9363 /*IsStringLocation*/true, 9364 getSpecifierRange(startSpecifier, specifierLen)); 9365 9366 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 9367 << FixedLM->toString() 9368 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 9369 9370 } else { 9371 FixItHint Hint; 9372 if (DiagID == diag::warn_format_nonsensical_length) 9373 Hint = FixItHint::CreateRemoval(LMRange); 9374 9375 EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), 9376 getLocationOfByte(LM.getStart()), 9377 /*IsStringLocation*/true, 9378 getSpecifierRange(startSpecifier, specifierLen), 9379 Hint); 9380 } 9381} 9382 9383void CheckFormatHandler::HandleNonStandardLengthModifier( 9384 const analyze_format_string::FormatSpecifier &FS, 9385 const char *startSpecifier, unsigned specifierLen) { 9386 using namespace analyze_format_string; 9387 9388 const LengthModifier &LM = FS.getLengthModifier(); 9389 CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); 9390 9391 // See if we know how to fix this length modifier. 9392 std::optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); 9393 if (FixedLM) { 9394 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 9395 << LM.toString() << 0, 9396 getLocationOfByte(LM.getStart()), 9397 /*IsStringLocation*/true, 9398 getSpecifierRange(startSpecifier, specifierLen)); 9399 9400 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) 9401 << FixedLM->toString() 9402 << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); 9403 9404 } else { 9405 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 9406 << LM.toString() << 0, 9407 getLocationOfByte(LM.getStart()), 9408 /*IsStringLocation*/true, 9409 getSpecifierRange(startSpecifier, specifierLen)); 9410 } 9411} 9412 9413void CheckFormatHandler::HandleNonStandardConversionSpecifier( 9414 const analyze_format_string::ConversionSpecifier &CS, 9415 const char *startSpecifier, unsigned specifierLen) { 9416 using namespace analyze_format_string; 9417 9418 // See if we know how to fix this conversion specifier. 9419 std::optional<ConversionSpecifier> FixedCS = CS.getStandardSpecifier(); 9420 if (FixedCS) { 9421 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 9422 << CS.toString() << /*conversion specifier*/1, 9423 getLocationOfByte(CS.getStart()), 9424 /*IsStringLocation*/true, 9425 getSpecifierRange(startSpecifier, specifierLen)); 9426 9427 CharSourceRange CSRange = getSpecifierRange(CS.getStart(), CS.getLength()); 9428 S.Diag(getLocationOfByte(CS.getStart()), diag::note_format_fix_specifier) 9429 << FixedCS->toString() 9430 << FixItHint::CreateReplacement(CSRange, FixedCS->toString()); 9431 } else { 9432 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) 9433 << CS.toString() << /*conversion specifier*/1, 9434 getLocationOfByte(CS.getStart()), 9435 /*IsStringLocation*/true, 9436 getSpecifierRange(startSpecifier, specifierLen)); 9437 } 9438} 9439 9440void CheckFormatHandler::HandlePosition(const char *startPos, 9441 unsigned posLen) { 9442 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard_positional_arg), 9443 getLocationOfByte(startPos), 9444 /*IsStringLocation*/true, 9445 getSpecifierRange(startPos, posLen)); 9446} 9447 9448void 9449CheckFormatHandler::HandleInvalidPosition(const char *startPos, unsigned posLen, 9450 analyze_format_string::PositionContext p) { 9451 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_positional_specifier) 9452 << (unsigned) p, 9453 getLocationOfByte(startPos), /*IsStringLocation*/true, 9454 getSpecifierRange(startPos, posLen)); 9455} 9456 9457void CheckFormatHandler::HandleZeroPosition(const char *startPos, 9458 unsigned posLen) { 9459 EmitFormatDiagnostic(S.PDiag(diag::warn_format_zero_positional_specifier), 9460 getLocationOfByte(startPos), 9461 /*IsStringLocation*/true, 9462 getSpecifierRange(startPos, posLen)); 9463} 9464 9465void CheckFormatHandler::HandleNullChar(const char *nullCharacter) { 9466 if (!isa<ObjCStringLiteral>(OrigFormatExpr)) { 9467 // The presence of a null character is likely an error. 9468 EmitFormatDiagnostic( 9469 S.PDiag(diag::warn_printf_format_string_contains_null_char), 9470 getLocationOfByte(nullCharacter), /*IsStringLocation*/true, 9471 getFormatStringRange()); 9472 } 9473} 9474 9475// Note that this may return NULL if there was an error parsing or building 9476// one of the argument expressions. 9477const Expr *CheckFormatHandler::getDataArg(unsigned i) const { 9478 return Args[FirstDataArg + i]; 9479} 9480 9481void CheckFormatHandler::DoneProcessing() { 9482 // Does the number of data arguments exceed the number of 9483 // format conversions in the format string? 9484 if (ArgPassingKind != Sema::FAPK_VAList) { 9485 // Find any arguments that weren't covered. 9486 CoveredArgs.flip(); 9487 signed notCoveredArg = CoveredArgs.find_first(); 9488 if (notCoveredArg >= 0) { 9489 assert((unsigned)notCoveredArg < NumDataArgs); 9490 UncoveredArg.Update(notCoveredArg, OrigFormatExpr); 9491 } else { 9492 UncoveredArg.setAllCovered(); 9493 } 9494 } 9495} 9496 9497void UncoveredArgHandler::Diagnose(Sema &S, bool IsFunctionCall, 9498 const Expr *ArgExpr) { 9499 assert(hasUncoveredArg() && DiagnosticExprs.size() > 0 && 9500 "Invalid state"); 9501 9502 if (!ArgExpr) 9503 return; 9504 9505 SourceLocation Loc = ArgExpr->getBeginLoc(); 9506 9507 if (S.getSourceManager().isInSystemMacro(Loc)) 9508 return; 9509 9510 PartialDiagnostic PDiag = S.PDiag(diag::warn_printf_data_arg_not_used); 9511 for (auto E : DiagnosticExprs) 9512 PDiag << E->getSourceRange(); 9513 9514 CheckFormatHandler::EmitFormatDiagnostic( 9515 S, IsFunctionCall, DiagnosticExprs[0], 9516 PDiag, Loc, /*IsStringLocation*/false, 9517 DiagnosticExprs[0]->getSourceRange()); 9518} 9519 9520bool 9521CheckFormatHandler::HandleInvalidConversionSpecifier(unsigned argIndex, 9522 SourceLocation Loc, 9523 const char *startSpec, 9524 unsigned specifierLen, 9525 const char *csStart, 9526 unsigned csLen) { 9527 bool keepGoing = true; 9528 if (argIndex < NumDataArgs) { 9529 // Consider the argument coverered, even though the specifier doesn't 9530 // make sense. 9531 CoveredArgs.set(argIndex); 9532 } 9533 else { 9534 // If argIndex exceeds the number of data arguments we 9535 // don't issue a warning because that is just a cascade of warnings (and 9536 // they may have intended '%%' anyway). We don't want to continue processing 9537 // the format string after this point, however, as we will like just get 9538 // gibberish when trying to match arguments. 9539 keepGoing = false; 9540 } 9541 9542 StringRef Specifier(csStart, csLen); 9543 9544 // If the specifier in non-printable, it could be the first byte of a UTF-8 9545 // sequence. In that case, print the UTF-8 code point. If not, print the byte 9546 // hex value. 9547 std::string CodePointStr; 9548 if (!llvm::sys::locale::isPrint(*csStart)) { 9549 llvm::UTF32 CodePoint; 9550 const llvm::UTF8 **B = reinterpret_cast<const llvm::UTF8 **>(&csStart); 9551 const llvm::UTF8 *E = 9552 reinterpret_cast<const llvm::UTF8 *>(csStart + csLen); 9553 llvm::ConversionResult Result = 9554 llvm::convertUTF8Sequence(B, E, &CodePoint, llvm::strictConversion); 9555 9556 if (Result != llvm::conversionOK) { 9557 unsigned char FirstChar = *csStart; 9558 CodePoint = (llvm::UTF32)FirstChar; 9559 } 9560 9561 llvm::raw_string_ostream OS(CodePointStr); 9562 if (CodePoint < 256) 9563 OS << "\\x" << llvm::format("%02x", CodePoint); 9564 else if (CodePoint <= 0xFFFF) 9565 OS << "\\u" << llvm::format("%04x", CodePoint); 9566 else 9567 OS << "\\U" << llvm::format("%08x", CodePoint); 9568 OS.flush(); 9569 Specifier = CodePointStr; 9570 } 9571 9572 EmitFormatDiagnostic( 9573 S.PDiag(diag::warn_format_invalid_conversion) << Specifier, Loc, 9574 /*IsStringLocation*/ true, getSpecifierRange(startSpec, specifierLen)); 9575 9576 return keepGoing; 9577} 9578 9579void 9580CheckFormatHandler::HandlePositionalNonpositionalArgs(SourceLocation Loc, 9581 const char *startSpec, 9582 unsigned specifierLen) { 9583 EmitFormatDiagnostic( 9584 S.PDiag(diag::warn_format_mix_positional_nonpositional_args), 9585 Loc, /*isStringLoc*/true, getSpecifierRange(startSpec, specifierLen)); 9586} 9587 9588bool 9589CheckFormatHandler::CheckNumArgs( 9590 const analyze_format_string::FormatSpecifier &FS, 9591 const analyze_format_string::ConversionSpecifier &CS, 9592 const char *startSpecifier, unsigned specifierLen, unsigned argIndex) { 9593 9594 if (argIndex >= NumDataArgs) { 9595 PartialDiagnostic PDiag = FS.usesPositionalArg() 9596 ? (S.PDiag(diag::warn_printf_positional_arg_exceeds_data_args) 9597 << (argIndex+1) << NumDataArgs) 9598 : S.PDiag(diag::warn_printf_insufficient_data_args); 9599 EmitFormatDiagnostic( 9600 PDiag, getLocationOfByte(CS.getStart()), /*IsStringLocation*/true, 9601 getSpecifierRange(startSpecifier, specifierLen)); 9602 9603 // Since more arguments than conversion tokens are given, by extension 9604 // all arguments are covered, so mark this as so. 9605 UncoveredArg.setAllCovered(); 9606 return false; 9607 } 9608 return true; 9609} 9610 9611template<typename Range> 9612void CheckFormatHandler::EmitFormatDiagnostic(PartialDiagnostic PDiag, 9613 SourceLocation Loc, 9614 bool IsStringLocation, 9615 Range StringRange, 9616 ArrayRef<FixItHint> FixIt) { 9617 EmitFormatDiagnostic(S, inFunctionCall, Args[FormatIdx], PDiag, 9618 Loc, IsStringLocation, StringRange, FixIt); 9619} 9620 9621/// If the format string is not within the function call, emit a note 9622/// so that the function call and string are in diagnostic messages. 9623/// 9624/// \param InFunctionCall if true, the format string is within the function 9625/// call and only one diagnostic message will be produced. Otherwise, an 9626/// extra note will be emitted pointing to location of the format string. 9627/// 9628/// \param ArgumentExpr the expression that is passed as the format string 9629/// argument in the function call. Used for getting locations when two 9630/// diagnostics are emitted. 9631/// 9632/// \param PDiag the callee should already have provided any strings for the 9633/// diagnostic message. This function only adds locations and fixits 9634/// to diagnostics. 9635/// 9636/// \param Loc primary location for diagnostic. If two diagnostics are 9637/// required, one will be at Loc and a new SourceLocation will be created for 9638/// the other one. 9639/// 9640/// \param IsStringLocation if true, Loc points to the format string should be 9641/// used for the note. Otherwise, Loc points to the argument list and will 9642/// be used with PDiag. 9643/// 9644/// \param StringRange some or all of the string to highlight. This is 9645/// templated so it can accept either a CharSourceRange or a SourceRange. 9646/// 9647/// \param FixIt optional fix it hint for the format string. 9648template <typename Range> 9649void CheckFormatHandler::EmitFormatDiagnostic( 9650 Sema &S, bool InFunctionCall, const Expr *ArgumentExpr, 9651 const PartialDiagnostic &PDiag, SourceLocation Loc, bool IsStringLocation, 9652 Range StringRange, ArrayRef<FixItHint> FixIt) { 9653 if (InFunctionCall) { 9654 const Sema::SemaDiagnosticBuilder &D = S.Diag(Loc, PDiag); 9655 D << StringRange; 9656 D << FixIt; 9657 } else { 9658 S.Diag(IsStringLocation ? ArgumentExpr->getExprLoc() : Loc, PDiag) 9659 << ArgumentExpr->getSourceRange(); 9660 9661 const Sema::SemaDiagnosticBuilder &Note = 9662 S.Diag(IsStringLocation ? Loc : StringRange.getBegin(), 9663 diag::note_format_string_defined); 9664 9665 Note << StringRange; 9666 Note << FixIt; 9667 } 9668} 9669 9670//===--- CHECK: Printf format string checking ------------------------------===// 9671 9672namespace { 9673 9674class CheckPrintfHandler : public CheckFormatHandler { 9675public: 9676 CheckPrintfHandler(Sema &s, const FormatStringLiteral *fexpr, 9677 const Expr *origFormatExpr, 9678 const Sema::FormatStringType type, unsigned firstDataArg, 9679 unsigned numDataArgs, bool isObjC, const char *beg, 9680 Sema::FormatArgumentPassingKind APK, 9681 ArrayRef<const Expr *> Args, unsigned formatIdx, 9682 bool inFunctionCall, Sema::VariadicCallType CallType, 9683 llvm::SmallBitVector &CheckedVarArgs, 9684 UncoveredArgHandler &UncoveredArg) 9685 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 9686 numDataArgs, beg, APK, Args, formatIdx, 9687 inFunctionCall, CallType, CheckedVarArgs, 9688 UncoveredArg) {} 9689 9690 bool isObjCContext() const { return FSType == Sema::FST_NSString; } 9691 9692 /// Returns true if '%@' specifiers are allowed in the format string. 9693 bool allowsObjCArg() const { 9694 return FSType == Sema::FST_NSString || FSType == Sema::FST_OSLog || 9695 FSType == Sema::FST_OSTrace; 9696 } 9697 9698 bool HandleInvalidPrintfConversionSpecifier( 9699 const analyze_printf::PrintfSpecifier &FS, 9700 const char *startSpecifier, 9701 unsigned specifierLen) override; 9702 9703 void handleInvalidMaskType(StringRef MaskType) override; 9704 9705 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, 9706 const char *startSpecifier, unsigned specifierLen, 9707 const TargetInfo &Target) override; 9708 bool checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 9709 const char *StartSpecifier, 9710 unsigned SpecifierLen, 9711 const Expr *E); 9712 9713 bool HandleAmount(const analyze_format_string::OptionalAmount &Amt, unsigned k, 9714 const char *startSpecifier, unsigned specifierLen); 9715 void HandleInvalidAmount(const analyze_printf::PrintfSpecifier &FS, 9716 const analyze_printf::OptionalAmount &Amt, 9717 unsigned type, 9718 const char *startSpecifier, unsigned specifierLen); 9719 void HandleFlag(const analyze_printf::PrintfSpecifier &FS, 9720 const analyze_printf::OptionalFlag &flag, 9721 const char *startSpecifier, unsigned specifierLen); 9722 void HandleIgnoredFlag(const analyze_printf::PrintfSpecifier &FS, 9723 const analyze_printf::OptionalFlag &ignoredFlag, 9724 const analyze_printf::OptionalFlag &flag, 9725 const char *startSpecifier, unsigned specifierLen); 9726 bool checkForCStrMembers(const analyze_printf::ArgType &AT, 9727 const Expr *E); 9728 9729 void HandleEmptyObjCModifierFlag(const char *startFlag, 9730 unsigned flagLen) override; 9731 9732 void HandleInvalidObjCModifierFlag(const char *startFlag, 9733 unsigned flagLen) override; 9734 9735 void HandleObjCFlagsWithNonObjCConversion(const char *flagsStart, 9736 const char *flagsEnd, 9737 const char *conversionPosition) 9738 override; 9739}; 9740 9741} // namespace 9742 9743bool CheckPrintfHandler::HandleInvalidPrintfConversionSpecifier( 9744 const analyze_printf::PrintfSpecifier &FS, 9745 const char *startSpecifier, 9746 unsigned specifierLen) { 9747 const analyze_printf::PrintfConversionSpecifier &CS = 9748 FS.getConversionSpecifier(); 9749 9750 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 9751 getLocationOfByte(CS.getStart()), 9752 startSpecifier, specifierLen, 9753 CS.getStart(), CS.getLength()); 9754} 9755 9756void CheckPrintfHandler::handleInvalidMaskType(StringRef MaskType) { 9757 S.Diag(getLocationOfByte(MaskType.data()), diag::err_invalid_mask_type_size); 9758} 9759 9760bool CheckPrintfHandler::HandleAmount( 9761 const analyze_format_string::OptionalAmount &Amt, unsigned k, 9762 const char *startSpecifier, unsigned specifierLen) { 9763 if (Amt.hasDataArgument()) { 9764 if (ArgPassingKind != Sema::FAPK_VAList) { 9765 unsigned argIndex = Amt.getArgIndex(); 9766 if (argIndex >= NumDataArgs) { 9767 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_missing_arg) 9768 << k, 9769 getLocationOfByte(Amt.getStart()), 9770 /*IsStringLocation*/ true, 9771 getSpecifierRange(startSpecifier, specifierLen)); 9772 // Don't do any more checking. We will just emit 9773 // spurious errors. 9774 return false; 9775 } 9776 9777 // Type check the data argument. It should be an 'int'. 9778 // Although not in conformance with C99, we also allow the argument to be 9779 // an 'unsigned int' as that is a reasonably safe case. GCC also 9780 // doesn't emit a warning for that case. 9781 CoveredArgs.set(argIndex); 9782 const Expr *Arg = getDataArg(argIndex); 9783 if (!Arg) 9784 return false; 9785 9786 QualType T = Arg->getType(); 9787 9788 const analyze_printf::ArgType &AT = Amt.getArgType(S.Context); 9789 assert(AT.isValid()); 9790 9791 if (!AT.matchesType(S.Context, T)) { 9792 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_wrong_type) 9793 << k << AT.getRepresentativeTypeName(S.Context) 9794 << T << Arg->getSourceRange(), 9795 getLocationOfByte(Amt.getStart()), 9796 /*IsStringLocation*/true, 9797 getSpecifierRange(startSpecifier, specifierLen)); 9798 // Don't do any more checking. We will just emit 9799 // spurious errors. 9800 return false; 9801 } 9802 } 9803 } 9804 return true; 9805} 9806 9807void CheckPrintfHandler::HandleInvalidAmount( 9808 const analyze_printf::PrintfSpecifier &FS, 9809 const analyze_printf::OptionalAmount &Amt, 9810 unsigned type, 9811 const char *startSpecifier, 9812 unsigned specifierLen) { 9813 const analyze_printf::PrintfConversionSpecifier &CS = 9814 FS.getConversionSpecifier(); 9815 9816 FixItHint fixit = 9817 Amt.getHowSpecified() == analyze_printf::OptionalAmount::Constant 9818 ? FixItHint::CreateRemoval(getSpecifierRange(Amt.getStart(), 9819 Amt.getConstantLength())) 9820 : FixItHint(); 9821 9822 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_optional_amount) 9823 << type << CS.toString(), 9824 getLocationOfByte(Amt.getStart()), 9825 /*IsStringLocation*/true, 9826 getSpecifierRange(startSpecifier, specifierLen), 9827 fixit); 9828} 9829 9830void CheckPrintfHandler::HandleFlag(const analyze_printf::PrintfSpecifier &FS, 9831 const analyze_printf::OptionalFlag &flag, 9832 const char *startSpecifier, 9833 unsigned specifierLen) { 9834 // Warn about pointless flag with a fixit removal. 9835 const analyze_printf::PrintfConversionSpecifier &CS = 9836 FS.getConversionSpecifier(); 9837 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_flag) 9838 << flag.toString() << CS.toString(), 9839 getLocationOfByte(flag.getPosition()), 9840 /*IsStringLocation*/true, 9841 getSpecifierRange(startSpecifier, specifierLen), 9842 FixItHint::CreateRemoval( 9843 getSpecifierRange(flag.getPosition(), 1))); 9844} 9845 9846void CheckPrintfHandler::HandleIgnoredFlag( 9847 const analyze_printf::PrintfSpecifier &FS, 9848 const analyze_printf::OptionalFlag &ignoredFlag, 9849 const analyze_printf::OptionalFlag &flag, 9850 const char *startSpecifier, 9851 unsigned specifierLen) { 9852 // Warn about ignored flag with a fixit removal. 9853 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_ignored_flag) 9854 << ignoredFlag.toString() << flag.toString(), 9855 getLocationOfByte(ignoredFlag.getPosition()), 9856 /*IsStringLocation*/true, 9857 getSpecifierRange(startSpecifier, specifierLen), 9858 FixItHint::CreateRemoval( 9859 getSpecifierRange(ignoredFlag.getPosition(), 1))); 9860} 9861 9862void CheckPrintfHandler::HandleEmptyObjCModifierFlag(const char *startFlag, 9863 unsigned flagLen) { 9864 // Warn about an empty flag. 9865 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_empty_objc_flag), 9866 getLocationOfByte(startFlag), 9867 /*IsStringLocation*/true, 9868 getSpecifierRange(startFlag, flagLen)); 9869} 9870 9871void CheckPrintfHandler::HandleInvalidObjCModifierFlag(const char *startFlag, 9872 unsigned flagLen) { 9873 // Warn about an invalid flag. 9874 auto Range = getSpecifierRange(startFlag, flagLen); 9875 StringRef flag(startFlag, flagLen); 9876 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_invalid_objc_flag) << flag, 9877 getLocationOfByte(startFlag), 9878 /*IsStringLocation*/true, 9879 Range, FixItHint::CreateRemoval(Range)); 9880} 9881 9882void CheckPrintfHandler::HandleObjCFlagsWithNonObjCConversion( 9883 const char *flagsStart, const char *flagsEnd, const char *conversionPosition) { 9884 // Warn about using '[...]' without a '@' conversion. 9885 auto Range = getSpecifierRange(flagsStart, flagsEnd - flagsStart + 1); 9886 auto diag = diag::warn_printf_ObjCflags_without_ObjCConversion; 9887 EmitFormatDiagnostic(S.PDiag(diag) << StringRef(conversionPosition, 1), 9888 getLocationOfByte(conversionPosition), 9889 /*IsStringLocation*/true, 9890 Range, FixItHint::CreateRemoval(Range)); 9891} 9892 9893// Determines if the specified is a C++ class or struct containing 9894// a member with the specified name and kind (e.g. a CXXMethodDecl named 9895// "c_str()"). 9896template<typename MemberKind> 9897static llvm::SmallPtrSet<MemberKind*, 1> 9898CXXRecordMembersNamed(StringRef Name, Sema &S, QualType Ty) { 9899 const RecordType *RT = Ty->getAs<RecordType>(); 9900 llvm::SmallPtrSet<MemberKind*, 1> Results; 9901 9902 if (!RT) 9903 return Results; 9904 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); 9905 if (!RD || !RD->getDefinition()) 9906 return Results; 9907 9908 LookupResult R(S, &S.Context.Idents.get(Name), SourceLocation(), 9909 Sema::LookupMemberName); 9910 R.suppressDiagnostics(); 9911 9912 // We just need to include all members of the right kind turned up by the 9913 // filter, at this point. 9914 if (S.LookupQualifiedName(R, RT->getDecl())) 9915 for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) { 9916 NamedDecl *decl = (*I)->getUnderlyingDecl(); 9917 if (MemberKind *FK = dyn_cast<MemberKind>(decl)) 9918 Results.insert(FK); 9919 } 9920 return Results; 9921} 9922 9923/// Check if we could call '.c_str()' on an object. 9924/// 9925/// FIXME: This returns the wrong results in some cases (if cv-qualifiers don't 9926/// allow the call, or if it would be ambiguous). 9927bool Sema::hasCStrMethod(const Expr *E) { 9928 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 9929 9930 MethodSet Results = 9931 CXXRecordMembersNamed<CXXMethodDecl>("c_str", *this, E->getType()); 9932 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 9933 MI != ME; ++MI) 9934 if ((*MI)->getMinRequiredArguments() == 0) 9935 return true; 9936 return false; 9937} 9938 9939// Check if a (w)string was passed when a (w)char* was needed, and offer a 9940// better diagnostic if so. AT is assumed to be valid. 9941// Returns true when a c_str() conversion method is found. 9942bool CheckPrintfHandler::checkForCStrMembers( 9943 const analyze_printf::ArgType &AT, const Expr *E) { 9944 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>; 9945 9946 MethodSet Results = 9947 CXXRecordMembersNamed<CXXMethodDecl>("c_str", S, E->getType()); 9948 9949 for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); 9950 MI != ME; ++MI) { 9951 const CXXMethodDecl *Method = *MI; 9952 if (Method->getMinRequiredArguments() == 0 && 9953 AT.matchesType(S.Context, Method->getReturnType())) { 9954 // FIXME: Suggest parens if the expression needs them. 9955 SourceLocation EndLoc = S.getLocForEndOfToken(E->getEndLoc()); 9956 S.Diag(E->getBeginLoc(), diag::note_printf_c_str) 9957 << "c_str()" << FixItHint::CreateInsertion(EndLoc, ".c_str()"); 9958 return true; 9959 } 9960 } 9961 9962 return false; 9963} 9964 9965bool CheckPrintfHandler::HandlePrintfSpecifier( 9966 const analyze_printf::PrintfSpecifier &FS, const char *startSpecifier, 9967 unsigned specifierLen, const TargetInfo &Target) { 9968 using namespace analyze_format_string; 9969 using namespace analyze_printf; 9970 9971 const PrintfConversionSpecifier &CS = FS.getConversionSpecifier(); 9972 9973 if (FS.consumesDataArgument()) { 9974 if (atFirstArg) { 9975 atFirstArg = false; 9976 usesPositionalArgs = FS.usesPositionalArg(); 9977 } 9978 else if (usesPositionalArgs != FS.usesPositionalArg()) { 9979 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 9980 startSpecifier, specifierLen); 9981 return false; 9982 } 9983 } 9984 9985 // First check if the field width, precision, and conversion specifier 9986 // have matching data arguments. 9987 if (!HandleAmount(FS.getFieldWidth(), /* field width */ 0, 9988 startSpecifier, specifierLen)) { 9989 return false; 9990 } 9991 9992 if (!HandleAmount(FS.getPrecision(), /* precision */ 1, 9993 startSpecifier, specifierLen)) { 9994 return false; 9995 } 9996 9997 if (!CS.consumesDataArgument()) { 9998 // FIXME: Technically specifying a precision or field width here 9999 // makes no sense. Worth issuing a warning at some point. 10000 return true; 10001 } 10002 10003 // Consume the argument. 10004 unsigned argIndex = FS.getArgIndex(); 10005 if (argIndex < NumDataArgs) { 10006 // The check to see if the argIndex is valid will come later. 10007 // We set the bit here because we may exit early from this 10008 // function if we encounter some other error. 10009 CoveredArgs.set(argIndex); 10010 } 10011 10012 // FreeBSD kernel extensions. 10013 if (CS.getKind() == ConversionSpecifier::FreeBSDbArg || 10014 CS.getKind() == ConversionSpecifier::FreeBSDDArg) { 10015 // We need at least two arguments. 10016 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex + 1)) 10017 return false; 10018 10019 // Claim the second argument. 10020 CoveredArgs.set(argIndex + 1); 10021 10022 const Expr *Ex = getDataArg(argIndex); 10023 if (CS.getKind() == ConversionSpecifier::FreeBSDDArg) { 10024 // Type check the first argument (pointer for %D) 10025 const analyze_printf::ArgType &AT = ArgType::CPointerTy; 10026 if (AT.isValid() && !AT.matchesType(S.Context, Ex->getType())) 10027 EmitFormatDiagnostic( 10028 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 10029 << AT.getRepresentativeTypeName(S.Context) << Ex->getType() 10030 << false << Ex->getSourceRange(), 10031 Ex->getBeginLoc(), /*IsStringLocation*/false, 10032 getSpecifierRange(startSpecifier, specifierLen)); 10033 } else { 10034 // Check the length modifier for %b 10035 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 10036 S.getLangOpts())) 10037 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 10038 diag::warn_format_nonsensical_length); 10039 else if (!FS.hasStandardLengthModifier()) 10040 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 10041 else if (!FS.hasStandardLengthConversionCombination()) 10042 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 10043 diag::warn_format_non_standard_conversion_spec); 10044 10045 // Type check the first argument of %b 10046 if (!checkFormatExpr(FS, startSpecifier, specifierLen, Ex)) 10047 return false; 10048 } 10049 10050 // Type check the second argument (char * for both %b and %D) 10051 Ex = getDataArg(argIndex + 1); 10052 const analyze_printf::ArgType &AT2 = ArgType::CStrTy; 10053 if (AT2.isValid() && !AT2.matchesType(S.Context, Ex->getType())) 10054 EmitFormatDiagnostic( 10055 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 10056 << AT2.getRepresentativeTypeName(S.Context) << Ex->getType() 10057 << false << Ex->getSourceRange(), 10058 Ex->getBeginLoc(), /*IsStringLocation*/ false, 10059 getSpecifierRange(startSpecifier, specifierLen)); 10060 10061 return true; 10062 } 10063 10064 // Check for using an Objective-C specific conversion specifier 10065 // in a non-ObjC literal. 10066 if (!allowsObjCArg() && CS.isObjCArg()) { 10067 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 10068 specifierLen); 10069 } 10070 10071 // %P can only be used with os_log. 10072 if (FSType != Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::PArg) { 10073 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 10074 specifierLen); 10075 } 10076 10077 // %n is not allowed with os_log. 10078 if (FSType == Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::nArg) { 10079 EmitFormatDiagnostic(S.PDiag(diag::warn_os_log_format_narg), 10080 getLocationOfByte(CS.getStart()), 10081 /*IsStringLocation*/ false, 10082 getSpecifierRange(startSpecifier, specifierLen)); 10083 10084 return true; 10085 } 10086 10087 // %n is not allowed anywhere 10088 if (CS.getKind() == ConversionSpecifier::nArg) { 10089 EmitFormatDiagnostic(S.PDiag(diag::warn_format_narg), 10090 getLocationOfByte(CS.getStart()), 10091 /*IsStringLocation*/ false, 10092 getSpecifierRange(startSpecifier, specifierLen)); 10093 return true; 10094 } 10095 10096 // Only scalars are allowed for os_trace. 10097 if (FSType == Sema::FST_OSTrace && 10098 (CS.getKind() == ConversionSpecifier::PArg || 10099 CS.getKind() == ConversionSpecifier::sArg || 10100 CS.getKind() == ConversionSpecifier::ObjCObjArg)) { 10101 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, 10102 specifierLen); 10103 } 10104 10105 // Check for use of public/private annotation outside of os_log(). 10106 if (FSType != Sema::FST_OSLog) { 10107 if (FS.isPublic().isSet()) { 10108 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 10109 << "public", 10110 getLocationOfByte(FS.isPublic().getPosition()), 10111 /*IsStringLocation*/ false, 10112 getSpecifierRange(startSpecifier, specifierLen)); 10113 } 10114 if (FS.isPrivate().isSet()) { 10115 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation) 10116 << "private", 10117 getLocationOfByte(FS.isPrivate().getPosition()), 10118 /*IsStringLocation*/ false, 10119 getSpecifierRange(startSpecifier, specifierLen)); 10120 } 10121 } 10122 10123 const llvm::Triple &Triple = Target.getTriple(); 10124 if (CS.getKind() == ConversionSpecifier::nArg && 10125 (Triple.isAndroid() || Triple.isOSFuchsia())) { 10126 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_narg_not_supported), 10127 getLocationOfByte(CS.getStart()), 10128 /*IsStringLocation*/ false, 10129 getSpecifierRange(startSpecifier, specifierLen)); 10130 } 10131 10132 // Check for invalid use of field width 10133 if (!FS.hasValidFieldWidth()) { 10134 HandleInvalidAmount(FS, FS.getFieldWidth(), /* field width */ 0, 10135 startSpecifier, specifierLen); 10136 } 10137 10138 // Check for invalid use of precision 10139 if (!FS.hasValidPrecision()) { 10140 HandleInvalidAmount(FS, FS.getPrecision(), /* precision */ 1, 10141 startSpecifier, specifierLen); 10142 } 10143 10144 // Precision is mandatory for %P specifier. 10145 if (CS.getKind() == ConversionSpecifier::PArg && 10146 FS.getPrecision().getHowSpecified() == OptionalAmount::NotSpecified) { 10147 EmitFormatDiagnostic(S.PDiag(diag::warn_format_P_no_precision), 10148 getLocationOfByte(startSpecifier), 10149 /*IsStringLocation*/ false, 10150 getSpecifierRange(startSpecifier, specifierLen)); 10151 } 10152 10153 // Check each flag does not conflict with any other component. 10154 if (!FS.hasValidThousandsGroupingPrefix()) 10155 HandleFlag(FS, FS.hasThousandsGrouping(), startSpecifier, specifierLen); 10156 if (!FS.hasValidLeadingZeros()) 10157 HandleFlag(FS, FS.hasLeadingZeros(), startSpecifier, specifierLen); 10158 if (!FS.hasValidPlusPrefix()) 10159 HandleFlag(FS, FS.hasPlusPrefix(), startSpecifier, specifierLen); 10160 if (!FS.hasValidSpacePrefix()) 10161 HandleFlag(FS, FS.hasSpacePrefix(), startSpecifier, specifierLen); 10162 if (!FS.hasValidAlternativeForm()) 10163 HandleFlag(FS, FS.hasAlternativeForm(), startSpecifier, specifierLen); 10164 if (!FS.hasValidLeftJustified()) 10165 HandleFlag(FS, FS.isLeftJustified(), startSpecifier, specifierLen); 10166 10167 // Check that flags are not ignored by another flag 10168 if (FS.hasSpacePrefix() && FS.hasPlusPrefix()) // ' ' ignored by '+' 10169 HandleIgnoredFlag(FS, FS.hasSpacePrefix(), FS.hasPlusPrefix(), 10170 startSpecifier, specifierLen); 10171 if (FS.hasLeadingZeros() && FS.isLeftJustified()) // '0' ignored by '-' 10172 HandleIgnoredFlag(FS, FS.hasLeadingZeros(), FS.isLeftJustified(), 10173 startSpecifier, specifierLen); 10174 10175 // Check the length modifier is valid with the given conversion specifier. 10176 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 10177 S.getLangOpts())) 10178 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 10179 diag::warn_format_nonsensical_length); 10180 else if (!FS.hasStandardLengthModifier()) 10181 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 10182 else if (!FS.hasStandardLengthConversionCombination()) 10183 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 10184 diag::warn_format_non_standard_conversion_spec); 10185 10186 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 10187 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 10188 10189 // The remaining checks depend on the data arguments. 10190 if (ArgPassingKind == Sema::FAPK_VAList) 10191 return true; 10192 10193 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 10194 return false; 10195 10196 const Expr *Arg = getDataArg(argIndex); 10197 if (!Arg) 10198 return true; 10199 10200 return checkFormatExpr(FS, startSpecifier, specifierLen, Arg); 10201} 10202 10203static bool requiresParensToAddCast(const Expr *E) { 10204 // FIXME: We should have a general way to reason about operator 10205 // precedence and whether parens are actually needed here. 10206 // Take care of a few common cases where they aren't. 10207 const Expr *Inside = E->IgnoreImpCasts(); 10208 if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(Inside)) 10209 Inside = POE->getSyntacticForm()->IgnoreImpCasts(); 10210 10211 switch (Inside->getStmtClass()) { 10212 case Stmt::ArraySubscriptExprClass: 10213 case Stmt::CallExprClass: 10214 case Stmt::CharacterLiteralClass: 10215 case Stmt::CXXBoolLiteralExprClass: 10216 case Stmt::DeclRefExprClass: 10217 case Stmt::FloatingLiteralClass: 10218 case Stmt::IntegerLiteralClass: 10219 case Stmt::MemberExprClass: 10220 case Stmt::ObjCArrayLiteralClass: 10221 case Stmt::ObjCBoolLiteralExprClass: 10222 case Stmt::ObjCBoxedExprClass: 10223 case Stmt::ObjCDictionaryLiteralClass: 10224 case Stmt::ObjCEncodeExprClass: 10225 case Stmt::ObjCIvarRefExprClass: 10226 case Stmt::ObjCMessageExprClass: 10227 case Stmt::ObjCPropertyRefExprClass: 10228 case Stmt::ObjCStringLiteralClass: 10229 case Stmt::ObjCSubscriptRefExprClass: 10230 case Stmt::ParenExprClass: 10231 case Stmt::StringLiteralClass: 10232 case Stmt::UnaryOperatorClass: 10233 return false; 10234 default: 10235 return true; 10236 } 10237} 10238 10239static std::pair<QualType, StringRef> 10240shouldNotPrintDirectly(const ASTContext &Context, 10241 QualType IntendedTy, 10242 const Expr *E) { 10243 // Use a 'while' to peel off layers of typedefs. 10244 QualType TyTy = IntendedTy; 10245 while (const TypedefType *UserTy = TyTy->getAs<TypedefType>()) { 10246 StringRef Name = UserTy->getDecl()->getName(); 10247 QualType CastTy = llvm::StringSwitch<QualType>(Name) 10248 .Case("CFIndex", Context.getNSIntegerType()) 10249 .Case("NSInteger", Context.getNSIntegerType()) 10250 .Case("NSUInteger", Context.getNSUIntegerType()) 10251 .Case("SInt32", Context.IntTy) 10252 .Case("UInt32", Context.UnsignedIntTy) 10253 .Default(QualType()); 10254 10255 if (!CastTy.isNull()) 10256 return std::make_pair(CastTy, Name); 10257 10258 TyTy = UserTy->desugar(); 10259 } 10260 10261 // Strip parens if necessary. 10262 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) 10263 return shouldNotPrintDirectly(Context, 10264 PE->getSubExpr()->getType(), 10265 PE->getSubExpr()); 10266 10267 // If this is a conditional expression, then its result type is constructed 10268 // via usual arithmetic conversions and thus there might be no necessary 10269 // typedef sugar there. Recurse to operands to check for NSInteger & 10270 // Co. usage condition. 10271 if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) { 10272 QualType TrueTy, FalseTy; 10273 StringRef TrueName, FalseName; 10274 10275 std::tie(TrueTy, TrueName) = 10276 shouldNotPrintDirectly(Context, 10277 CO->getTrueExpr()->getType(), 10278 CO->getTrueExpr()); 10279 std::tie(FalseTy, FalseName) = 10280 shouldNotPrintDirectly(Context, 10281 CO->getFalseExpr()->getType(), 10282 CO->getFalseExpr()); 10283 10284 if (TrueTy == FalseTy) 10285 return std::make_pair(TrueTy, TrueName); 10286 else if (TrueTy.isNull()) 10287 return std::make_pair(FalseTy, FalseName); 10288 else if (FalseTy.isNull()) 10289 return std::make_pair(TrueTy, TrueName); 10290 } 10291 10292 return std::make_pair(QualType(), StringRef()); 10293} 10294 10295/// Return true if \p ICE is an implicit argument promotion of an arithmetic 10296/// type. Bit-field 'promotions' from a higher ranked type to a lower ranked 10297/// type do not count. 10298static bool 10299isArithmeticArgumentPromotion(Sema &S, const ImplicitCastExpr *ICE) { 10300 QualType From = ICE->getSubExpr()->getType(); 10301 QualType To = ICE->getType(); 10302 // It's an integer promotion if the destination type is the promoted 10303 // source type. 10304 if (ICE->getCastKind() == CK_IntegralCast && 10305 S.Context.isPromotableIntegerType(From) && 10306 S.Context.getPromotedIntegerType(From) == To) 10307 return true; 10308 // Look through vector types, since we do default argument promotion for 10309 // those in OpenCL. 10310 if (const auto *VecTy = From->getAs<ExtVectorType>()) 10311 From = VecTy->getElementType(); 10312 if (const auto *VecTy = To->getAs<ExtVectorType>()) 10313 To = VecTy->getElementType(); 10314 // It's a floating promotion if the source type is a lower rank. 10315 return ICE->getCastKind() == CK_FloatingCast && 10316 S.Context.getFloatingTypeOrder(From, To) < 0; 10317} 10318 10319bool 10320CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, 10321 const char *StartSpecifier, 10322 unsigned SpecifierLen, 10323 const Expr *E) { 10324 using namespace analyze_format_string; 10325 using namespace analyze_printf; 10326 10327 // Now type check the data expression that matches the 10328 // format specifier. 10329 const analyze_printf::ArgType &AT = FS.getArgType(S.Context, isObjCContext()); 10330 if (!AT.isValid()) 10331 return true; 10332 10333 QualType ExprTy = E->getType(); 10334 while (const TypeOfExprType *TET = dyn_cast<TypeOfExprType>(ExprTy)) { 10335 ExprTy = TET->getUnderlyingExpr()->getType(); 10336 } 10337 10338 // When using the format attribute in C++, you can receive a function or an 10339 // array that will necessarily decay to a pointer when passed to the final 10340 // format consumer. Apply decay before type comparison. 10341 if (ExprTy->canDecayToPointerType()) 10342 ExprTy = S.Context.getDecayedType(ExprTy); 10343 10344 // Diagnose attempts to print a boolean value as a character. Unlike other 10345 // -Wformat diagnostics, this is fine from a type perspective, but it still 10346 // doesn't make sense. 10347 if (FS.getConversionSpecifier().getKind() == ConversionSpecifier::cArg && 10348 E->isKnownToHaveBooleanValue()) { 10349 const CharSourceRange &CSR = 10350 getSpecifierRange(StartSpecifier, SpecifierLen); 10351 SmallString<4> FSString; 10352 llvm::raw_svector_ostream os(FSString); 10353 FS.toString(os); 10354 EmitFormatDiagnostic(S.PDiag(diag::warn_format_bool_as_character) 10355 << FSString, 10356 E->getExprLoc(), false, CSR); 10357 return true; 10358 } 10359 10360 ArgType::MatchKind ImplicitMatch = ArgType::NoMatch; 10361 ArgType::MatchKind Match = AT.matchesType(S.Context, ExprTy); 10362 if (Match == ArgType::Match) 10363 return true; 10364 10365 // NoMatchPromotionTypeConfusion should be only returned in ImplictCastExpr 10366 assert(Match != ArgType::NoMatchPromotionTypeConfusion); 10367 10368 // Look through argument promotions for our error message's reported type. 10369 // This includes the integral and floating promotions, but excludes array 10370 // and function pointer decay (seeing that an argument intended to be a 10371 // string has type 'char [6]' is probably more confusing than 'char *') and 10372 // certain bitfield promotions (bitfields can be 'demoted' to a lesser type). 10373 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 10374 if (isArithmeticArgumentPromotion(S, ICE)) { 10375 E = ICE->getSubExpr(); 10376 ExprTy = E->getType(); 10377 10378 // Check if we didn't match because of an implicit cast from a 'char' 10379 // or 'short' to an 'int'. This is done because printf is a varargs 10380 // function. 10381 if (ICE->getType() == S.Context.IntTy || 10382 ICE->getType() == S.Context.UnsignedIntTy) { 10383 // All further checking is done on the subexpression 10384 ImplicitMatch = AT.matchesType(S.Context, ExprTy); 10385 if (ImplicitMatch == ArgType::Match) 10386 return true; 10387 } 10388 } 10389 } else if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E)) { 10390 // Special case for 'a', which has type 'int' in C. 10391 // Note, however, that we do /not/ want to treat multibyte constants like 10392 // 'MooV' as characters! This form is deprecated but still exists. In 10393 // addition, don't treat expressions as of type 'char' if one byte length 10394 // modifier is provided. 10395 if (ExprTy == S.Context.IntTy && 10396 FS.getLengthModifier().getKind() != LengthModifier::AsChar) 10397 if (llvm::isUIntN(S.Context.getCharWidth(), CL->getValue())) { 10398 ExprTy = S.Context.CharTy; 10399 // To improve check results, we consider a character literal in C 10400 // to be a 'char' rather than an 'int'. 'printf("%hd", 'a');' is 10401 // more likely a type confusion situation, so we will suggest to 10402 // use '%hhd' instead by discarding the MatchPromotion. 10403 if (Match == ArgType::MatchPromotion) 10404 Match = ArgType::NoMatch; 10405 } 10406 } 10407 if (Match == ArgType::MatchPromotion) { 10408 // WG14 N2562 only clarified promotions in *printf 10409 // For NSLog in ObjC, just preserve -Wformat behavior 10410 if (!S.getLangOpts().ObjC && 10411 ImplicitMatch != ArgType::NoMatchPromotionTypeConfusion && 10412 ImplicitMatch != ArgType::NoMatchTypeConfusion) 10413 return true; 10414 Match = ArgType::NoMatch; 10415 } 10416 if (ImplicitMatch == ArgType::NoMatchPedantic || 10417 ImplicitMatch == ArgType::NoMatchTypeConfusion) 10418 Match = ImplicitMatch; 10419 assert(Match != ArgType::MatchPromotion); 10420 // Look through enums to their underlying type. 10421 bool IsEnum = false; 10422 if (auto EnumTy = ExprTy->getAs<EnumType>()) { 10423 ExprTy = EnumTy->getDecl()->getIntegerType(); 10424 IsEnum = true; 10425 } 10426 10427 // %C in an Objective-C context prints a unichar, not a wchar_t. 10428 // If the argument is an integer of some kind, believe the %C and suggest 10429 // a cast instead of changing the conversion specifier. 10430 QualType IntendedTy = ExprTy; 10431 if (isObjCContext() && 10432 FS.getConversionSpecifier().getKind() == ConversionSpecifier::CArg) { 10433 if (ExprTy->isIntegralOrUnscopedEnumerationType() && 10434 !ExprTy->isCharType()) { 10435 // 'unichar' is defined as a typedef of unsigned short, but we should 10436 // prefer using the typedef if it is visible. 10437 IntendedTy = S.Context.UnsignedShortTy; 10438 10439 // While we are here, check if the value is an IntegerLiteral that happens 10440 // to be within the valid range. 10441 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E)) { 10442 const llvm::APInt &V = IL->getValue(); 10443 if (V.getActiveBits() <= S.Context.getTypeSize(IntendedTy)) 10444 return true; 10445 } 10446 10447 LookupResult Result(S, &S.Context.Idents.get("unichar"), E->getBeginLoc(), 10448 Sema::LookupOrdinaryName); 10449 if (S.LookupName(Result, S.getCurScope())) { 10450 NamedDecl *ND = Result.getFoundDecl(); 10451 if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(ND)) 10452 if (TD->getUnderlyingType() == IntendedTy) 10453 IntendedTy = S.Context.getTypedefType(TD); 10454 } 10455 } 10456 } 10457 10458 // Special-case some of Darwin's platform-independence types by suggesting 10459 // casts to primitive types that are known to be large enough. 10460 bool ShouldNotPrintDirectly = false; StringRef CastTyName; 10461 if (S.Context.getTargetInfo().getTriple().isOSDarwin()) { 10462 QualType CastTy; 10463 std::tie(CastTy, CastTyName) = shouldNotPrintDirectly(S.Context, IntendedTy, E); 10464 if (!CastTy.isNull()) { 10465 // %zi/%zu and %td/%tu are OK to use for NSInteger/NSUInteger of type int 10466 // (long in ASTContext). Only complain to pedants. 10467 if ((CastTyName == "NSInteger" || CastTyName == "NSUInteger") && 10468 (AT.isSizeT() || AT.isPtrdiffT()) && 10469 AT.matchesType(S.Context, CastTy)) 10470 Match = ArgType::NoMatchPedantic; 10471 IntendedTy = CastTy; 10472 ShouldNotPrintDirectly = true; 10473 } 10474 } 10475 10476 // We may be able to offer a FixItHint if it is a supported type. 10477 PrintfSpecifier fixedFS = FS; 10478 bool Success = 10479 fixedFS.fixType(IntendedTy, S.getLangOpts(), S.Context, isObjCContext()); 10480 10481 if (Success) { 10482 // Get the fix string from the fixed format specifier 10483 SmallString<16> buf; 10484 llvm::raw_svector_ostream os(buf); 10485 fixedFS.toString(os); 10486 10487 CharSourceRange SpecRange = getSpecifierRange(StartSpecifier, SpecifierLen); 10488 10489 if (IntendedTy == ExprTy && !ShouldNotPrintDirectly) { 10490 unsigned Diag; 10491 switch (Match) { 10492 case ArgType::Match: 10493 case ArgType::MatchPromotion: 10494 case ArgType::NoMatchPromotionTypeConfusion: 10495 llvm_unreachable("expected non-matching"); 10496 case ArgType::NoMatchPedantic: 10497 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; 10498 break; 10499 case ArgType::NoMatchTypeConfusion: 10500 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion; 10501 break; 10502 case ArgType::NoMatch: 10503 Diag = diag::warn_format_conversion_argument_type_mismatch; 10504 break; 10505 } 10506 10507 // In this case, the specifier is wrong and should be changed to match 10508 // the argument. 10509 EmitFormatDiagnostic(S.PDiag(Diag) 10510 << AT.getRepresentativeTypeName(S.Context) 10511 << IntendedTy << IsEnum << E->getSourceRange(), 10512 E->getBeginLoc(), 10513 /*IsStringLocation*/ false, SpecRange, 10514 FixItHint::CreateReplacement(SpecRange, os.str())); 10515 } else { 10516 // The canonical type for formatting this value is different from the 10517 // actual type of the expression. (This occurs, for example, with Darwin's 10518 // NSInteger on 32-bit platforms, where it is typedef'd as 'int', but 10519 // should be printed as 'long' for 64-bit compatibility.) 10520 // Rather than emitting a normal format/argument mismatch, we want to 10521 // add a cast to the recommended type (and correct the format string 10522 // if necessary). 10523 SmallString<16> CastBuf; 10524 llvm::raw_svector_ostream CastFix(CastBuf); 10525 CastFix << "("; 10526 IntendedTy.print(CastFix, S.Context.getPrintingPolicy()); 10527 CastFix << ")"; 10528 10529 SmallVector<FixItHint,4> Hints; 10530 if (!AT.matchesType(S.Context, IntendedTy) || ShouldNotPrintDirectly) 10531 Hints.push_back(FixItHint::CreateReplacement(SpecRange, os.str())); 10532 10533 if (const CStyleCastExpr *CCast = dyn_cast<CStyleCastExpr>(E)) { 10534 // If there's already a cast present, just replace it. 10535 SourceRange CastRange(CCast->getLParenLoc(), CCast->getRParenLoc()); 10536 Hints.push_back(FixItHint::CreateReplacement(CastRange, CastFix.str())); 10537 10538 } else if (!requiresParensToAddCast(E)) { 10539 // If the expression has high enough precedence, 10540 // just write the C-style cast. 10541 Hints.push_back( 10542 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 10543 } else { 10544 // Otherwise, add parens around the expression as well as the cast. 10545 CastFix << "("; 10546 Hints.push_back( 10547 FixItHint::CreateInsertion(E->getBeginLoc(), CastFix.str())); 10548 10549 SourceLocation After = S.getLocForEndOfToken(E->getEndLoc()); 10550 Hints.push_back(FixItHint::CreateInsertion(After, ")")); 10551 } 10552 10553 if (ShouldNotPrintDirectly) { 10554 // The expression has a type that should not be printed directly. 10555 // We extract the name from the typedef because we don't want to show 10556 // the underlying type in the diagnostic. 10557 StringRef Name; 10558 if (const auto *TypedefTy = ExprTy->getAs<TypedefType>()) 10559 Name = TypedefTy->getDecl()->getName(); 10560 else 10561 Name = CastTyName; 10562 unsigned Diag = Match == ArgType::NoMatchPedantic 10563 ? diag::warn_format_argument_needs_cast_pedantic 10564 : diag::warn_format_argument_needs_cast; 10565 EmitFormatDiagnostic(S.PDiag(Diag) << Name << IntendedTy << IsEnum 10566 << E->getSourceRange(), 10567 E->getBeginLoc(), /*IsStringLocation=*/false, 10568 SpecRange, Hints); 10569 } else { 10570 // In this case, the expression could be printed using a different 10571 // specifier, but we've decided that the specifier is probably correct 10572 // and we should cast instead. Just use the normal warning message. 10573 EmitFormatDiagnostic( 10574 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 10575 << AT.getRepresentativeTypeName(S.Context) << ExprTy << IsEnum 10576 << E->getSourceRange(), 10577 E->getBeginLoc(), /*IsStringLocation*/ false, SpecRange, Hints); 10578 } 10579 } 10580 } else { 10581 const CharSourceRange &CSR = getSpecifierRange(StartSpecifier, 10582 SpecifierLen); 10583 // Since the warning for passing non-POD types to variadic functions 10584 // was deferred until now, we emit a warning for non-POD 10585 // arguments here. 10586 bool EmitTypeMismatch = false; 10587 switch (S.isValidVarArgType(ExprTy)) { 10588 case Sema::VAK_Valid: 10589 case Sema::VAK_ValidInCXX11: { 10590 unsigned Diag; 10591 switch (Match) { 10592 case ArgType::Match: 10593 case ArgType::MatchPromotion: 10594 case ArgType::NoMatchPromotionTypeConfusion: 10595 llvm_unreachable("expected non-matching"); 10596 case ArgType::NoMatchPedantic: 10597 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; 10598 break; 10599 case ArgType::NoMatchTypeConfusion: 10600 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion; 10601 break; 10602 case ArgType::NoMatch: 10603 Diag = diag::warn_format_conversion_argument_type_mismatch; 10604 break; 10605 } 10606 10607 EmitFormatDiagnostic( 10608 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) << ExprTy 10609 << IsEnum << CSR << E->getSourceRange(), 10610 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 10611 break; 10612 } 10613 case Sema::VAK_Undefined: 10614 case Sema::VAK_MSVCUndefined: 10615 if (CallType == Sema::VariadicDoesNotApply) { 10616 EmitTypeMismatch = true; 10617 } else { 10618 EmitFormatDiagnostic( 10619 S.PDiag(diag::warn_non_pod_vararg_with_format_string) 10620 << S.getLangOpts().CPlusPlus11 << ExprTy << CallType 10621 << AT.getRepresentativeTypeName(S.Context) << CSR 10622 << E->getSourceRange(), 10623 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 10624 checkForCStrMembers(AT, E); 10625 } 10626 break; 10627 10628 case Sema::VAK_Invalid: 10629 if (CallType == Sema::VariadicDoesNotApply) 10630 EmitTypeMismatch = true; 10631 else if (ExprTy->isObjCObjectType()) 10632 EmitFormatDiagnostic( 10633 S.PDiag(diag::err_cannot_pass_objc_interface_to_vararg_format) 10634 << S.getLangOpts().CPlusPlus11 << ExprTy << CallType 10635 << AT.getRepresentativeTypeName(S.Context) << CSR 10636 << E->getSourceRange(), 10637 E->getBeginLoc(), /*IsStringLocation*/ false, CSR); 10638 else 10639 // FIXME: If this is an initializer list, suggest removing the braces 10640 // or inserting a cast to the target type. 10641 S.Diag(E->getBeginLoc(), diag::err_cannot_pass_to_vararg_format) 10642 << isa<InitListExpr>(E) << ExprTy << CallType 10643 << AT.getRepresentativeTypeName(S.Context) << E->getSourceRange(); 10644 break; 10645 } 10646 10647 if (EmitTypeMismatch) { 10648 // The function is not variadic, so we do not generate warnings about 10649 // being allowed to pass that object as a variadic argument. Instead, 10650 // since there are inherently no printf specifiers for types which cannot 10651 // be passed as variadic arguments, emit a plain old specifier mismatch 10652 // argument. 10653 EmitFormatDiagnostic( 10654 S.PDiag(diag::warn_format_conversion_argument_type_mismatch) 10655 << AT.getRepresentativeTypeName(S.Context) << ExprTy << false 10656 << E->getSourceRange(), 10657 E->getBeginLoc(), false, CSR); 10658 } 10659 10660 assert(FirstDataArg + FS.getArgIndex() < CheckedVarArgs.size() && 10661 "format string specifier index out of range"); 10662 CheckedVarArgs[FirstDataArg + FS.getArgIndex()] = true; 10663 } 10664 10665 return true; 10666} 10667 10668//===--- CHECK: Scanf format string checking ------------------------------===// 10669 10670namespace { 10671 10672class CheckScanfHandler : public CheckFormatHandler { 10673public: 10674 CheckScanfHandler(Sema &s, const FormatStringLiteral *fexpr, 10675 const Expr *origFormatExpr, Sema::FormatStringType type, 10676 unsigned firstDataArg, unsigned numDataArgs, 10677 const char *beg, Sema::FormatArgumentPassingKind APK, 10678 ArrayRef<const Expr *> Args, unsigned formatIdx, 10679 bool inFunctionCall, Sema::VariadicCallType CallType, 10680 llvm::SmallBitVector &CheckedVarArgs, 10681 UncoveredArgHandler &UncoveredArg) 10682 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg, 10683 numDataArgs, beg, APK, Args, formatIdx, 10684 inFunctionCall, CallType, CheckedVarArgs, 10685 UncoveredArg) {} 10686 10687 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS, 10688 const char *startSpecifier, 10689 unsigned specifierLen) override; 10690 10691 bool HandleInvalidScanfConversionSpecifier( 10692 const analyze_scanf::ScanfSpecifier &FS, 10693 const char *startSpecifier, 10694 unsigned specifierLen) override; 10695 10696 void HandleIncompleteScanList(const char *start, const char *end) override; 10697}; 10698 10699} // namespace 10700 10701void CheckScanfHandler::HandleIncompleteScanList(const char *start, 10702 const char *end) { 10703 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_scanlist_incomplete), 10704 getLocationOfByte(end), /*IsStringLocation*/true, 10705 getSpecifierRange(start, end - start)); 10706} 10707 10708bool CheckScanfHandler::HandleInvalidScanfConversionSpecifier( 10709 const analyze_scanf::ScanfSpecifier &FS, 10710 const char *startSpecifier, 10711 unsigned specifierLen) { 10712 const analyze_scanf::ScanfConversionSpecifier &CS = 10713 FS.getConversionSpecifier(); 10714 10715 return HandleInvalidConversionSpecifier(FS.getArgIndex(), 10716 getLocationOfByte(CS.getStart()), 10717 startSpecifier, specifierLen, 10718 CS.getStart(), CS.getLength()); 10719} 10720 10721bool CheckScanfHandler::HandleScanfSpecifier( 10722 const analyze_scanf::ScanfSpecifier &FS, 10723 const char *startSpecifier, 10724 unsigned specifierLen) { 10725 using namespace analyze_scanf; 10726 using namespace analyze_format_string; 10727 10728 const ScanfConversionSpecifier &CS = FS.getConversionSpecifier(); 10729 10730 // Handle case where '%' and '*' don't consume an argument. These shouldn't 10731 // be used to decide if we are using positional arguments consistently. 10732 if (FS.consumesDataArgument()) { 10733 if (atFirstArg) { 10734 atFirstArg = false; 10735 usesPositionalArgs = FS.usesPositionalArg(); 10736 } 10737 else if (usesPositionalArgs != FS.usesPositionalArg()) { 10738 HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), 10739 startSpecifier, specifierLen); 10740 return false; 10741 } 10742 } 10743 10744 // Check if the field with is non-zero. 10745 const OptionalAmount &Amt = FS.getFieldWidth(); 10746 if (Amt.getHowSpecified() == OptionalAmount::Constant) { 10747 if (Amt.getConstantAmount() == 0) { 10748 const CharSourceRange &R = getSpecifierRange(Amt.getStart(), 10749 Amt.getConstantLength()); 10750 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_nonzero_width), 10751 getLocationOfByte(Amt.getStart()), 10752 /*IsStringLocation*/true, R, 10753 FixItHint::CreateRemoval(R)); 10754 } 10755 } 10756 10757 if (!FS.consumesDataArgument()) { 10758 // FIXME: Technically specifying a precision or field width here 10759 // makes no sense. Worth issuing a warning at some point. 10760 return true; 10761 } 10762 10763 // Consume the argument. 10764 unsigned argIndex = FS.getArgIndex(); 10765 if (argIndex < NumDataArgs) { 10766 // The check to see if the argIndex is valid will come later. 10767 // We set the bit here because we may exit early from this 10768 // function if we encounter some other error. 10769 CoveredArgs.set(argIndex); 10770 } 10771 10772 // Check the length modifier is valid with the given conversion specifier. 10773 if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), 10774 S.getLangOpts())) 10775 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 10776 diag::warn_format_nonsensical_length); 10777 else if (!FS.hasStandardLengthModifier()) 10778 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); 10779 else if (!FS.hasStandardLengthConversionCombination()) 10780 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, 10781 diag::warn_format_non_standard_conversion_spec); 10782 10783 if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) 10784 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); 10785 10786 // The remaining checks depend on the data arguments. 10787 if (ArgPassingKind == Sema::FAPK_VAList) 10788 return true; 10789 10790 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) 10791 return false; 10792 10793 // Check that the argument type matches the format specifier. 10794 const Expr *Ex = getDataArg(argIndex); 10795 if (!Ex) 10796 return true; 10797 10798 const analyze_format_string::ArgType &AT = FS.getArgType(S.Context); 10799 10800 if (!AT.isValid()) { 10801 return true; 10802 } 10803 10804 analyze_format_string::ArgType::MatchKind Match = 10805 AT.matchesType(S.Context, Ex->getType()); 10806 bool Pedantic = Match == analyze_format_string::ArgType::NoMatchPedantic; 10807 if (Match == analyze_format_string::ArgType::Match) 10808 return true; 10809 10810 ScanfSpecifier fixedFS = FS; 10811 bool Success = fixedFS.fixType(Ex->getType(), Ex->IgnoreImpCasts()->getType(), 10812 S.getLangOpts(), S.Context); 10813 10814 unsigned Diag = 10815 Pedantic ? diag::warn_format_conversion_argument_type_mismatch_pedantic 10816 : diag::warn_format_conversion_argument_type_mismatch; 10817 10818 if (Success) { 10819 // Get the fix string from the fixed format specifier. 10820 SmallString<128> buf; 10821 llvm::raw_svector_ostream os(buf); 10822 fixedFS.toString(os); 10823 10824 EmitFormatDiagnostic( 10825 S.PDiag(Diag) << AT.getRepresentativeTypeName(S.Context) 10826 << Ex->getType() << false << Ex->getSourceRange(), 10827 Ex->getBeginLoc(), 10828 /*IsStringLocation*/ false, 10829 getSpecifierRange(startSpecifier, specifierLen), 10830 FixItHint::CreateReplacement( 10831 getSpecifierRange(startSpecifier, specifierLen), os.str())); 10832 } else { 10833 EmitFormatDiagnostic(S.PDiag(Diag) 10834 << AT.getRepresentativeTypeName(S.Context) 10835 << Ex->getType() << false << Ex->getSourceRange(), 10836 Ex->getBeginLoc(), 10837 /*IsStringLocation*/ false, 10838 getSpecifierRange(startSpecifier, specifierLen)); 10839 } 10840 10841 return true; 10842} 10843 10844static void CheckFormatString( 10845 Sema &S, const FormatStringLiteral *FExpr, const Expr *OrigFormatExpr, 10846 ArrayRef<const Expr *> Args, Sema::FormatArgumentPassingKind APK, 10847 unsigned format_idx, unsigned firstDataArg, Sema::FormatStringType Type, 10848 bool inFunctionCall, Sema::VariadicCallType CallType, 10849 llvm::SmallBitVector &CheckedVarArgs, UncoveredArgHandler &UncoveredArg, 10850 bool IgnoreStringsWithoutSpecifiers) { 10851 // CHECK: is the format string a wide literal? 10852 if (!FExpr->isAscii() && !FExpr->isUTF8()) { 10853 CheckFormatHandler::EmitFormatDiagnostic( 10854 S, inFunctionCall, Args[format_idx], 10855 S.PDiag(diag::warn_format_string_is_wide_literal), FExpr->getBeginLoc(), 10856 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 10857 return; 10858 } 10859 10860 // Str - The format string. NOTE: this is NOT null-terminated! 10861 StringRef StrRef = FExpr->getString(); 10862 const char *Str = StrRef.data(); 10863 // Account for cases where the string literal is truncated in a declaration. 10864 const ConstantArrayType *T = 10865 S.Context.getAsConstantArrayType(FExpr->getType()); 10866 assert(T && "String literal not of constant array type!"); 10867 size_t TypeSize = T->getSize().getZExtValue(); 10868 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 10869 const unsigned numDataArgs = Args.size() - firstDataArg; 10870 10871 if (IgnoreStringsWithoutSpecifiers && 10872 !analyze_format_string::parseFormatStringHasFormattingSpecifiers( 10873 Str, Str + StrLen, S.getLangOpts(), S.Context.getTargetInfo())) 10874 return; 10875 10876 // Emit a warning if the string literal is truncated and does not contain an 10877 // embedded null character. 10878 if (TypeSize <= StrRef.size() && !StrRef.substr(0, TypeSize).contains('\0')) { 10879 CheckFormatHandler::EmitFormatDiagnostic( 10880 S, inFunctionCall, Args[format_idx], 10881 S.PDiag(diag::warn_printf_format_string_not_null_terminated), 10882 FExpr->getBeginLoc(), 10883 /*IsStringLocation=*/true, OrigFormatExpr->getSourceRange()); 10884 return; 10885 } 10886 10887 // CHECK: empty format string? 10888 if (StrLen == 0 && numDataArgs > 0) { 10889 CheckFormatHandler::EmitFormatDiagnostic( 10890 S, inFunctionCall, Args[format_idx], 10891 S.PDiag(diag::warn_empty_format_string), FExpr->getBeginLoc(), 10892 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange()); 10893 return; 10894 } 10895 10896 if (Type == Sema::FST_Printf || Type == Sema::FST_NSString || 10897 Type == Sema::FST_Kprintf || Type == Sema::FST_FreeBSDKPrintf || 10898 Type == Sema::FST_OSLog || Type == Sema::FST_OSTrace || 10899 Type == Sema::FST_Syslog) { 10900 CheckPrintfHandler H( 10901 S, FExpr, OrigFormatExpr, Type, firstDataArg, numDataArgs, 10902 (Type == Sema::FST_NSString || Type == Sema::FST_OSTrace), Str, APK, 10903 Args, format_idx, inFunctionCall, CallType, CheckedVarArgs, 10904 UncoveredArg); 10905 10906 if (!analyze_format_string::ParsePrintfString( 10907 H, Str, Str + StrLen, S.getLangOpts(), S.Context.getTargetInfo(), 10908 Type == Sema::FST_Kprintf || Type == Sema::FST_FreeBSDKPrintf)) 10909 H.DoneProcessing(); 10910 } else if (Type == Sema::FST_Scanf) { 10911 CheckScanfHandler H(S, FExpr, OrigFormatExpr, Type, firstDataArg, 10912 numDataArgs, Str, APK, Args, format_idx, inFunctionCall, 10913 CallType, CheckedVarArgs, UncoveredArg); 10914 10915 if (!analyze_format_string::ParseScanfString( 10916 H, Str, Str + StrLen, S.getLangOpts(), S.Context.getTargetInfo())) 10917 H.DoneProcessing(); 10918 } // TODO: handle other formats 10919} 10920 10921bool Sema::FormatStringHasSArg(const StringLiteral *FExpr) { 10922 // Str - The format string. NOTE: this is NOT null-terminated! 10923 StringRef StrRef = FExpr->getString(); 10924 const char *Str = StrRef.data(); 10925 // Account for cases where the string literal is truncated in a declaration. 10926 const ConstantArrayType *T = Context.getAsConstantArrayType(FExpr->getType()); 10927 assert(T && "String literal not of constant array type!"); 10928 size_t TypeSize = T->getSize().getZExtValue(); 10929 size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); 10930 return analyze_format_string::ParseFormatStringHasSArg(Str, Str + StrLen, 10931 getLangOpts(), 10932 Context.getTargetInfo()); 10933} 10934 10935//===--- CHECK: Warn on use of wrong absolute value function. -------------===// 10936 10937// Returns the related absolute value function that is larger, of 0 if one 10938// does not exist. 10939static unsigned getLargerAbsoluteValueFunction(unsigned AbsFunction) { 10940 switch (AbsFunction) { 10941 default: 10942 return 0; 10943 10944 case Builtin::BI__builtin_abs: 10945 return Builtin::BI__builtin_labs; 10946 case Builtin::BI__builtin_labs: 10947 return Builtin::BI__builtin_llabs; 10948 case Builtin::BI__builtin_llabs: 10949 return 0; 10950 10951 case Builtin::BI__builtin_fabsf: 10952 return Builtin::BI__builtin_fabs; 10953 case Builtin::BI__builtin_fabs: 10954 return Builtin::BI__builtin_fabsl; 10955 case Builtin::BI__builtin_fabsl: 10956 return 0; 10957 10958 case Builtin::BI__builtin_cabsf: 10959 return Builtin::BI__builtin_cabs; 10960 case Builtin::BI__builtin_cabs: 10961 return Builtin::BI__builtin_cabsl; 10962 case Builtin::BI__builtin_cabsl: 10963 return 0; 10964 10965 case Builtin::BIabs: 10966 return Builtin::BIlabs; 10967 case Builtin::BIlabs: 10968 return Builtin::BIllabs; 10969 case Builtin::BIllabs: 10970 return 0; 10971 10972 case Builtin::BIfabsf: 10973 return Builtin::BIfabs; 10974 case Builtin::BIfabs: 10975 return Builtin::BIfabsl; 10976 case Builtin::BIfabsl: 10977 return 0; 10978 10979 case Builtin::BIcabsf: 10980 return Builtin::BIcabs; 10981 case Builtin::BIcabs: 10982 return Builtin::BIcabsl; 10983 case Builtin::BIcabsl: 10984 return 0; 10985 } 10986} 10987 10988// Returns the argument type of the absolute value function. 10989static QualType getAbsoluteValueArgumentType(ASTContext &Context, 10990 unsigned AbsType) { 10991 if (AbsType == 0) 10992 return QualType(); 10993 10994 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None; 10995 QualType BuiltinType = Context.GetBuiltinType(AbsType, Error); 10996 if (Error != ASTContext::GE_None) 10997 return QualType(); 10998 10999 const FunctionProtoType *FT = BuiltinType->getAs<FunctionProtoType>(); 11000 if (!FT) 11001 return QualType(); 11002 11003 if (FT->getNumParams() != 1) 11004 return QualType(); 11005 11006 return FT->getParamType(0); 11007} 11008 11009// Returns the best absolute value function, or zero, based on type and 11010// current absolute value function. 11011static unsigned getBestAbsFunction(ASTContext &Context, QualType ArgType, 11012 unsigned AbsFunctionKind) { 11013 unsigned BestKind = 0; 11014 uint64_t ArgSize = Context.getTypeSize(ArgType); 11015 for (unsigned Kind = AbsFunctionKind; Kind != 0; 11016 Kind = getLargerAbsoluteValueFunction(Kind)) { 11017 QualType ParamType = getAbsoluteValueArgumentType(Context, Kind); 11018 if (Context.getTypeSize(ParamType) >= ArgSize) { 11019 if (BestKind == 0) 11020 BestKind = Kind; 11021 else if (Context.hasSameType(ParamType, ArgType)) { 11022 BestKind = Kind; 11023 break; 11024 } 11025 } 11026 } 11027 return BestKind; 11028} 11029 11030enum AbsoluteValueKind { 11031 AVK_Integer, 11032 AVK_Floating, 11033 AVK_Complex 11034}; 11035 11036static AbsoluteValueKind getAbsoluteValueKind(QualType T) { 11037 if (T->isIntegralOrEnumerationType()) 11038 return AVK_Integer; 11039 if (T->isRealFloatingType()) 11040 return AVK_Floating; 11041 if (T->isAnyComplexType()) 11042 return AVK_Complex; 11043 11044 llvm_unreachable("Type not integer, floating, or complex"); 11045} 11046 11047// Changes the absolute value function to a different type. Preserves whether 11048// the function is a builtin. 11049static unsigned changeAbsFunction(unsigned AbsKind, 11050 AbsoluteValueKind ValueKind) { 11051 switch (ValueKind) { 11052 case AVK_Integer: 11053 switch (AbsKind) { 11054 default: 11055 return 0; 11056 case Builtin::BI__builtin_fabsf: 11057 case Builtin::BI__builtin_fabs: 11058 case Builtin::BI__builtin_fabsl: 11059 case Builtin::BI__builtin_cabsf: 11060 case Builtin::BI__builtin_cabs: 11061 case Builtin::BI__builtin_cabsl: 11062 return Builtin::BI__builtin_abs; 11063 case Builtin::BIfabsf: 11064 case Builtin::BIfabs: 11065 case Builtin::BIfabsl: 11066 case Builtin::BIcabsf: 11067 case Builtin::BIcabs: 11068 case Builtin::BIcabsl: 11069 return Builtin::BIabs; 11070 } 11071 case AVK_Floating: 11072 switch (AbsKind) { 11073 default: 11074 return 0; 11075 case Builtin::BI__builtin_abs: 11076 case Builtin::BI__builtin_labs: 11077 case Builtin::BI__builtin_llabs: 11078 case Builtin::BI__builtin_cabsf: 11079 case Builtin::BI__builtin_cabs: 11080 case Builtin::BI__builtin_cabsl: 11081 return Builtin::BI__builtin_fabsf; 11082 case Builtin::BIabs: 11083 case Builtin::BIlabs: 11084 case Builtin::BIllabs: 11085 case Builtin::BIcabsf: 11086 case Builtin::BIcabs: 11087 case Builtin::BIcabsl: 11088 return Builtin::BIfabsf; 11089 } 11090 case AVK_Complex: 11091 switch (AbsKind) { 11092 default: 11093 return 0; 11094 case Builtin::BI__builtin_abs: 11095 case Builtin::BI__builtin_labs: 11096 case Builtin::BI__builtin_llabs: 11097 case Builtin::BI__builtin_fabsf: 11098 case Builtin::BI__builtin_fabs: 11099 case Builtin::BI__builtin_fabsl: 11100 return Builtin::BI__builtin_cabsf; 11101 case Builtin::BIabs: 11102 case Builtin::BIlabs: 11103 case Builtin::BIllabs: 11104 case Builtin::BIfabsf: 11105 case Builtin::BIfabs: 11106 case Builtin::BIfabsl: 11107 return Builtin::BIcabsf; 11108 } 11109 } 11110 llvm_unreachable("Unable to convert function"); 11111} 11112 11113static unsigned getAbsoluteValueFunctionKind(const FunctionDecl *FDecl) { 11114 const IdentifierInfo *FnInfo = FDecl->getIdentifier(); 11115 if (!FnInfo) 11116 return 0; 11117 11118 switch (FDecl->getBuiltinID()) { 11119 default: 11120 return 0; 11121 case Builtin::BI__builtin_abs: 11122 case Builtin::BI__builtin_fabs: 11123 case Builtin::BI__builtin_fabsf: 11124 case Builtin::BI__builtin_fabsl: 11125 case Builtin::BI__builtin_labs: 11126 case Builtin::BI__builtin_llabs: 11127 case Builtin::BI__builtin_cabs: 11128 case Builtin::BI__builtin_cabsf: 11129 case Builtin::BI__builtin_cabsl: 11130 case Builtin::BIabs: 11131 case Builtin::BIlabs: 11132 case Builtin::BIllabs: 11133 case Builtin::BIfabs: 11134 case Builtin::BIfabsf: 11135 case Builtin::BIfabsl: 11136 case Builtin::BIcabs: 11137 case Builtin::BIcabsf: 11138 case Builtin::BIcabsl: 11139 return FDecl->getBuiltinID(); 11140 } 11141 llvm_unreachable("Unknown Builtin type"); 11142} 11143 11144// If the replacement is valid, emit a note with replacement function. 11145// Additionally, suggest including the proper header if not already included. 11146static void emitReplacement(Sema &S, SourceLocation Loc, SourceRange Range, 11147 unsigned AbsKind, QualType ArgType) { 11148 bool EmitHeaderHint = true; 11149 const char *HeaderName = nullptr; 11150 StringRef FunctionName; 11151 if (S.getLangOpts().CPlusPlus && !ArgType->isAnyComplexType()) { 11152 FunctionName = "std::abs"; 11153 if (ArgType->isIntegralOrEnumerationType()) { 11154 HeaderName = "cstdlib"; 11155 } else if (ArgType->isRealFloatingType()) { 11156 HeaderName = "cmath"; 11157 } else { 11158 llvm_unreachable("Invalid Type"); 11159 } 11160 11161 // Lookup all std::abs 11162 if (NamespaceDecl *Std = S.getStdNamespace()) { 11163 LookupResult R(S, &S.Context.Idents.get("abs"), Loc, Sema::LookupAnyName); 11164 R.suppressDiagnostics(); 11165 S.LookupQualifiedName(R, Std); 11166 11167 for (const auto *I : R) { 11168 const FunctionDecl *FDecl = nullptr; 11169 if (const UsingShadowDecl *UsingD = dyn_cast<UsingShadowDecl>(I)) { 11170 FDecl = dyn_cast<FunctionDecl>(UsingD->getTargetDecl()); 11171 } else { 11172 FDecl = dyn_cast<FunctionDecl>(I); 11173 } 11174 if (!FDecl) 11175 continue; 11176 11177 // Found std::abs(), check that they are the right ones. 11178 if (FDecl->getNumParams() != 1) 11179 continue; 11180 11181 // Check that the parameter type can handle the argument. 11182 QualType ParamType = FDecl->getParamDecl(0)->getType(); 11183 if (getAbsoluteValueKind(ArgType) == getAbsoluteValueKind(ParamType) && 11184 S.Context.getTypeSize(ArgType) <= 11185 S.Context.getTypeSize(ParamType)) { 11186 // Found a function, don't need the header hint. 11187 EmitHeaderHint = false; 11188 break; 11189 } 11190 } 11191 } 11192 } else { 11193 FunctionName = S.Context.BuiltinInfo.getName(AbsKind); 11194 HeaderName = S.Context.BuiltinInfo.getHeaderName(AbsKind); 11195 11196 if (HeaderName) { 11197 DeclarationName DN(&S.Context.Idents.get(FunctionName)); 11198 LookupResult R(S, DN, Loc, Sema::LookupAnyName); 11199 R.suppressDiagnostics(); 11200 S.LookupName(R, S.getCurScope()); 11201 11202 if (R.isSingleResult()) { 11203 FunctionDecl *FD = dyn_cast<FunctionDecl>(R.getFoundDecl()); 11204 if (FD && FD->getBuiltinID() == AbsKind) { 11205 EmitHeaderHint = false; 11206 } else { 11207 return; 11208 } 11209 } else if (!R.empty()) { 11210 return; 11211 } 11212 } 11213 } 11214 11215 S.Diag(Loc, diag::note_replace_abs_function) 11216 << FunctionName << FixItHint::CreateReplacement(Range, FunctionName); 11217 11218 if (!HeaderName) 11219 return; 11220 11221 if (!EmitHeaderHint) 11222 return; 11223 11224 S.Diag(Loc, diag::note_include_header_or_declare) << HeaderName 11225 << FunctionName; 11226} 11227 11228template <std::size_t StrLen> 11229static bool IsStdFunction(const FunctionDecl *FDecl, 11230 const char (&Str)[StrLen]) { 11231 if (!FDecl) 11232 return false; 11233 if (!FDecl->getIdentifier() || !FDecl->getIdentifier()->isStr(Str)) 11234 return false; 11235 if (!FDecl->isInStdNamespace()) 11236 return false; 11237 11238 return true; 11239} 11240 11241// Warn when using the wrong abs() function. 11242void Sema::CheckAbsoluteValueFunction(const CallExpr *Call, 11243 const FunctionDecl *FDecl) { 11244 if (Call->getNumArgs() != 1) 11245 return; 11246 11247 unsigned AbsKind = getAbsoluteValueFunctionKind(FDecl); 11248 bool IsStdAbs = IsStdFunction(FDecl, "abs"); 11249 if (AbsKind == 0 && !IsStdAbs) 11250 return; 11251 11252 QualType ArgType = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 11253 QualType ParamType = Call->getArg(0)->getType(); 11254 11255 // Unsigned types cannot be negative. Suggest removing the absolute value 11256 // function call. 11257 if (ArgType->isUnsignedIntegerType()) { 11258 StringRef FunctionName = 11259 IsStdAbs ? "std::abs" : Context.BuiltinInfo.getName(AbsKind); 11260 Diag(Call->getExprLoc(), diag::warn_unsigned_abs) << ArgType << ParamType; 11261 Diag(Call->getExprLoc(), diag::note_remove_abs) 11262 << FunctionName 11263 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()); 11264 return; 11265 } 11266 11267 // Taking the absolute value of a pointer is very suspicious, they probably 11268 // wanted to index into an array, dereference a pointer, call a function, etc. 11269 if (ArgType->isPointerType() || ArgType->canDecayToPointerType()) { 11270 unsigned DiagType = 0; 11271 if (ArgType->isFunctionType()) 11272 DiagType = 1; 11273 else if (ArgType->isArrayType()) 11274 DiagType = 2; 11275 11276 Diag(Call->getExprLoc(), diag::warn_pointer_abs) << DiagType << ArgType; 11277 return; 11278 } 11279 11280 // std::abs has overloads which prevent most of the absolute value problems 11281 // from occurring. 11282 if (IsStdAbs) 11283 return; 11284 11285 AbsoluteValueKind ArgValueKind = getAbsoluteValueKind(ArgType); 11286 AbsoluteValueKind ParamValueKind = getAbsoluteValueKind(ParamType); 11287 11288 // The argument and parameter are the same kind. Check if they are the right 11289 // size. 11290 if (ArgValueKind == ParamValueKind) { 11291 if (Context.getTypeSize(ArgType) <= Context.getTypeSize(ParamType)) 11292 return; 11293 11294 unsigned NewAbsKind = getBestAbsFunction(Context, ArgType, AbsKind); 11295 Diag(Call->getExprLoc(), diag::warn_abs_too_small) 11296 << FDecl << ArgType << ParamType; 11297 11298 if (NewAbsKind == 0) 11299 return; 11300 11301 emitReplacement(*this, Call->getExprLoc(), 11302 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 11303 return; 11304 } 11305 11306 // ArgValueKind != ParamValueKind 11307 // The wrong type of absolute value function was used. Attempt to find the 11308 // proper one. 11309 unsigned NewAbsKind = changeAbsFunction(AbsKind, ArgValueKind); 11310 NewAbsKind = getBestAbsFunction(Context, ArgType, NewAbsKind); 11311 if (NewAbsKind == 0) 11312 return; 11313 11314 Diag(Call->getExprLoc(), diag::warn_wrong_absolute_value_type) 11315 << FDecl << ParamValueKind << ArgValueKind; 11316 11317 emitReplacement(*this, Call->getExprLoc(), 11318 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); 11319} 11320 11321//===--- CHECK: Warn on use of std::max and unsigned zero. r---------------===// 11322void Sema::CheckMaxUnsignedZero(const CallExpr *Call, 11323 const FunctionDecl *FDecl) { 11324 if (!Call || !FDecl) return; 11325 11326 // Ignore template specializations and macros. 11327 if (inTemplateInstantiation()) return; 11328 if (Call->getExprLoc().isMacroID()) return; 11329 11330 // Only care about the one template argument, two function parameter std::max 11331 if (Call->getNumArgs() != 2) return; 11332 if (!IsStdFunction(FDecl, "max")) return; 11333 const auto * ArgList = FDecl->getTemplateSpecializationArgs(); 11334 if (!ArgList) return; 11335 if (ArgList->size() != 1) return; 11336 11337 // Check that template type argument is unsigned integer. 11338 const auto& TA = ArgList->get(0); 11339 if (TA.getKind() != TemplateArgument::Type) return; 11340 QualType ArgType = TA.getAsType(); 11341 if (!ArgType->isUnsignedIntegerType()) return; 11342 11343 // See if either argument is a literal zero. 11344 auto IsLiteralZeroArg = [](const Expr* E) -> bool { 11345 const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E); 11346 if (!MTE) return false; 11347 const auto *Num = dyn_cast<IntegerLiteral>(MTE->getSubExpr()); 11348 if (!Num) return false; 11349 if (Num->getValue() != 0) return false; 11350 return true; 11351 }; 11352 11353 const Expr *FirstArg = Call->getArg(0); 11354 const Expr *SecondArg = Call->getArg(1); 11355 const bool IsFirstArgZero = IsLiteralZeroArg(FirstArg); 11356 const bool IsSecondArgZero = IsLiteralZeroArg(SecondArg); 11357 11358 // Only warn when exactly one argument is zero. 11359 if (IsFirstArgZero == IsSecondArgZero) return; 11360 11361 SourceRange FirstRange = FirstArg->getSourceRange(); 11362 SourceRange SecondRange = SecondArg->getSourceRange(); 11363 11364 SourceRange ZeroRange = IsFirstArgZero ? FirstRange : SecondRange; 11365 11366 Diag(Call->getExprLoc(), diag::warn_max_unsigned_zero) 11367 << IsFirstArgZero << Call->getCallee()->getSourceRange() << ZeroRange; 11368 11369 // Deduce what parts to remove so that "std::max(0u, foo)" becomes "(foo)". 11370 SourceRange RemovalRange; 11371 if (IsFirstArgZero) { 11372 RemovalRange = SourceRange(FirstRange.getBegin(), 11373 SecondRange.getBegin().getLocWithOffset(-1)); 11374 } else { 11375 RemovalRange = SourceRange(getLocForEndOfToken(FirstRange.getEnd()), 11376 SecondRange.getEnd()); 11377 } 11378 11379 Diag(Call->getExprLoc(), diag::note_remove_max_call) 11380 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()) 11381 << FixItHint::CreateRemoval(RemovalRange); 11382} 11383 11384//===--- CHECK: Standard memory functions ---------------------------------===// 11385 11386/// Takes the expression passed to the size_t parameter of functions 11387/// such as memcmp, strncat, etc and warns if it's a comparison. 11388/// 11389/// This is to catch typos like `if (memcmp(&a, &b, sizeof(a) > 0))`. 11390static bool CheckMemorySizeofForComparison(Sema &S, const Expr *E, 11391 IdentifierInfo *FnName, 11392 SourceLocation FnLoc, 11393 SourceLocation RParenLoc) { 11394 const BinaryOperator *Size = dyn_cast<BinaryOperator>(E); 11395 if (!Size) 11396 return false; 11397 11398 // if E is binop and op is <=>, >, <, >=, <=, ==, &&, ||: 11399 if (!Size->isComparisonOp() && !Size->isLogicalOp()) 11400 return false; 11401 11402 SourceRange SizeRange = Size->getSourceRange(); 11403 S.Diag(Size->getOperatorLoc(), diag::warn_memsize_comparison) 11404 << SizeRange << FnName; 11405 S.Diag(FnLoc, diag::note_memsize_comparison_paren) 11406 << FnName 11407 << FixItHint::CreateInsertion( 11408 S.getLocForEndOfToken(Size->getLHS()->getEndLoc()), ")") 11409 << FixItHint::CreateRemoval(RParenLoc); 11410 S.Diag(SizeRange.getBegin(), diag::note_memsize_comparison_cast_silence) 11411 << FixItHint::CreateInsertion(SizeRange.getBegin(), "(size_t)(") 11412 << FixItHint::CreateInsertion(S.getLocForEndOfToken(SizeRange.getEnd()), 11413 ")"); 11414 11415 return true; 11416} 11417 11418/// Determine whether the given type is or contains a dynamic class type 11419/// (e.g., whether it has a vtable). 11420static const CXXRecordDecl *getContainedDynamicClass(QualType T, 11421 bool &IsContained) { 11422 // Look through array types while ignoring qualifiers. 11423 const Type *Ty = T->getBaseElementTypeUnsafe(); 11424 IsContained = false; 11425 11426 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); 11427 RD = RD ? RD->getDefinition() : nullptr; 11428 if (!RD || RD->isInvalidDecl()) 11429 return nullptr; 11430 11431 if (RD->isDynamicClass()) 11432 return RD; 11433 11434 // Check all the fields. If any bases were dynamic, the class is dynamic. 11435 // It's impossible for a class to transitively contain itself by value, so 11436 // infinite recursion is impossible. 11437 for (auto *FD : RD->fields()) { 11438 bool SubContained; 11439 if (const CXXRecordDecl *ContainedRD = 11440 getContainedDynamicClass(FD->getType(), SubContained)) { 11441 IsContained = true; 11442 return ContainedRD; 11443 } 11444 } 11445 11446 return nullptr; 11447} 11448 11449static const UnaryExprOrTypeTraitExpr *getAsSizeOfExpr(const Expr *E) { 11450 if (const auto *Unary = dyn_cast<UnaryExprOrTypeTraitExpr>(E)) 11451 if (Unary->getKind() == UETT_SizeOf) 11452 return Unary; 11453 return nullptr; 11454} 11455 11456/// If E is a sizeof expression, returns its argument expression, 11457/// otherwise returns NULL. 11458static const Expr *getSizeOfExprArg(const Expr *E) { 11459 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 11460 if (!SizeOf->isArgumentType()) 11461 return SizeOf->getArgumentExpr()->IgnoreParenImpCasts(); 11462 return nullptr; 11463} 11464 11465/// If E is a sizeof expression, returns its argument type. 11466static QualType getSizeOfArgType(const Expr *E) { 11467 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E)) 11468 return SizeOf->getTypeOfArgument(); 11469 return QualType(); 11470} 11471 11472namespace { 11473 11474struct SearchNonTrivialToInitializeField 11475 : DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField> { 11476 using Super = 11477 DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField>; 11478 11479 SearchNonTrivialToInitializeField(const Expr *E, Sema &S) : E(E), S(S) {} 11480 11481 void visitWithKind(QualType::PrimitiveDefaultInitializeKind PDIK, QualType FT, 11482 SourceLocation SL) { 11483 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 11484 asDerived().visitArray(PDIK, AT, SL); 11485 return; 11486 } 11487 11488 Super::visitWithKind(PDIK, FT, SL); 11489 } 11490 11491 void visitARCStrong(QualType FT, SourceLocation SL) { 11492 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 11493 } 11494 void visitARCWeak(QualType FT, SourceLocation SL) { 11495 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1); 11496 } 11497 void visitStruct(QualType FT, SourceLocation SL) { 11498 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 11499 visit(FD->getType(), FD->getLocation()); 11500 } 11501 void visitArray(QualType::PrimitiveDefaultInitializeKind PDIK, 11502 const ArrayType *AT, SourceLocation SL) { 11503 visit(getContext().getBaseElementType(AT), SL); 11504 } 11505 void visitTrivial(QualType FT, SourceLocation SL) {} 11506 11507 static void diag(QualType RT, const Expr *E, Sema &S) { 11508 SearchNonTrivialToInitializeField(E, S).visitStruct(RT, SourceLocation()); 11509 } 11510 11511 ASTContext &getContext() { return S.getASTContext(); } 11512 11513 const Expr *E; 11514 Sema &S; 11515}; 11516 11517struct SearchNonTrivialToCopyField 11518 : CopiedTypeVisitor<SearchNonTrivialToCopyField, false> { 11519 using Super = CopiedTypeVisitor<SearchNonTrivialToCopyField, false>; 11520 11521 SearchNonTrivialToCopyField(const Expr *E, Sema &S) : E(E), S(S) {} 11522 11523 void visitWithKind(QualType::PrimitiveCopyKind PCK, QualType FT, 11524 SourceLocation SL) { 11525 if (const auto *AT = asDerived().getContext().getAsArrayType(FT)) { 11526 asDerived().visitArray(PCK, AT, SL); 11527 return; 11528 } 11529 11530 Super::visitWithKind(PCK, FT, SL); 11531 } 11532 11533 void visitARCStrong(QualType FT, SourceLocation SL) { 11534 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 11535 } 11536 void visitARCWeak(QualType FT, SourceLocation SL) { 11537 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0); 11538 } 11539 void visitStruct(QualType FT, SourceLocation SL) { 11540 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields()) 11541 visit(FD->getType(), FD->getLocation()); 11542 } 11543 void visitArray(QualType::PrimitiveCopyKind PCK, const ArrayType *AT, 11544 SourceLocation SL) { 11545 visit(getContext().getBaseElementType(AT), SL); 11546 } 11547 void preVisit(QualType::PrimitiveCopyKind PCK, QualType FT, 11548 SourceLocation SL) {} 11549 void visitTrivial(QualType FT, SourceLocation SL) {} 11550 void visitVolatileTrivial(QualType FT, SourceLocation SL) {} 11551 11552 static void diag(QualType RT, const Expr *E, Sema &S) { 11553 SearchNonTrivialToCopyField(E, S).visitStruct(RT, SourceLocation()); 11554 } 11555 11556 ASTContext &getContext() { return S.getASTContext(); } 11557 11558 const Expr *E; 11559 Sema &S; 11560}; 11561 11562} 11563 11564/// Detect if \c SizeofExpr is likely to calculate the sizeof an object. 11565static bool doesExprLikelyComputeSize(const Expr *SizeofExpr) { 11566 SizeofExpr = SizeofExpr->IgnoreParenImpCasts(); 11567 11568 if (const auto *BO = dyn_cast<BinaryOperator>(SizeofExpr)) { 11569 if (BO->getOpcode() != BO_Mul && BO->getOpcode() != BO_Add) 11570 return false; 11571 11572 return doesExprLikelyComputeSize(BO->getLHS()) || 11573 doesExprLikelyComputeSize(BO->getRHS()); 11574 } 11575 11576 return getAsSizeOfExpr(SizeofExpr) != nullptr; 11577} 11578 11579/// Check if the ArgLoc originated from a macro passed to the call at CallLoc. 11580/// 11581/// \code 11582/// #define MACRO 0 11583/// foo(MACRO); 11584/// foo(0); 11585/// \endcode 11586/// 11587/// This should return true for the first call to foo, but not for the second 11588/// (regardless of whether foo is a macro or function). 11589static bool isArgumentExpandedFromMacro(SourceManager &SM, 11590 SourceLocation CallLoc, 11591 SourceLocation ArgLoc) { 11592 if (!CallLoc.isMacroID()) 11593 return SM.getFileID(CallLoc) != SM.getFileID(ArgLoc); 11594 11595 return SM.getFileID(SM.getImmediateMacroCallerLoc(CallLoc)) != 11596 SM.getFileID(SM.getImmediateMacroCallerLoc(ArgLoc)); 11597} 11598 11599/// Diagnose cases like 'memset(buf, sizeof(buf), 0)', which should have the 11600/// last two arguments transposed. 11601static void CheckMemaccessSize(Sema &S, unsigned BId, const CallExpr *Call) { 11602 if (BId != Builtin::BImemset && BId != Builtin::BIbzero) 11603 return; 11604 11605 const Expr *SizeArg = 11606 Call->getArg(BId == Builtin::BImemset ? 2 : 1)->IgnoreImpCasts(); 11607 11608 auto isLiteralZero = [](const Expr *E) { 11609 return (isa<IntegerLiteral>(E) && 11610 cast<IntegerLiteral>(E)->getValue() == 0) || 11611 (isa<CharacterLiteral>(E) && 11612 cast<CharacterLiteral>(E)->getValue() == 0); 11613 }; 11614 11615 // If we're memsetting or bzeroing 0 bytes, then this is likely an error. 11616 SourceLocation CallLoc = Call->getRParenLoc(); 11617 SourceManager &SM = S.getSourceManager(); 11618 if (isLiteralZero(SizeArg) && 11619 !isArgumentExpandedFromMacro(SM, CallLoc, SizeArg->getExprLoc())) { 11620 11621 SourceLocation DiagLoc = SizeArg->getExprLoc(); 11622 11623 // Some platforms #define bzero to __builtin_memset. See if this is the 11624 // case, and if so, emit a better diagnostic. 11625 if (BId == Builtin::BIbzero || 11626 (CallLoc.isMacroID() && Lexer::getImmediateMacroName( 11627 CallLoc, SM, S.getLangOpts()) == "bzero")) { 11628 S.Diag(DiagLoc, diag::warn_suspicious_bzero_size); 11629 S.Diag(DiagLoc, diag::note_suspicious_bzero_size_silence); 11630 } else if (!isLiteralZero(Call->getArg(1)->IgnoreImpCasts())) { 11631 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 0; 11632 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 0; 11633 } 11634 return; 11635 } 11636 11637 // If the second argument to a memset is a sizeof expression and the third 11638 // isn't, this is also likely an error. This should catch 11639 // 'memset(buf, sizeof(buf), 0xff)'. 11640 if (BId == Builtin::BImemset && 11641 doesExprLikelyComputeSize(Call->getArg(1)) && 11642 !doesExprLikelyComputeSize(Call->getArg(2))) { 11643 SourceLocation DiagLoc = Call->getArg(1)->getExprLoc(); 11644 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 1; 11645 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 1; 11646 return; 11647 } 11648} 11649 11650/// Check for dangerous or invalid arguments to memset(). 11651/// 11652/// This issues warnings on known problematic, dangerous or unspecified 11653/// arguments to the standard 'memset', 'memcpy', 'memmove', and 'memcmp' 11654/// function calls. 11655/// 11656/// \param Call The call expression to diagnose. 11657void Sema::CheckMemaccessArguments(const CallExpr *Call, 11658 unsigned BId, 11659 IdentifierInfo *FnName) { 11660 assert(BId != 0); 11661 11662 // It is possible to have a non-standard definition of memset. Validate 11663 // we have enough arguments, and if not, abort further checking. 11664 unsigned ExpectedNumArgs = 11665 (BId == Builtin::BIstrndup || BId == Builtin::BIbzero ? 2 : 3); 11666 if (Call->getNumArgs() < ExpectedNumArgs) 11667 return; 11668 11669 unsigned LastArg = (BId == Builtin::BImemset || BId == Builtin::BIbzero || 11670 BId == Builtin::BIstrndup ? 1 : 2); 11671 unsigned LenArg = 11672 (BId == Builtin::BIbzero || BId == Builtin::BIstrndup ? 1 : 2); 11673 const Expr *LenExpr = Call->getArg(LenArg)->IgnoreParenImpCasts(); 11674 11675 if (CheckMemorySizeofForComparison(*this, LenExpr, FnName, 11676 Call->getBeginLoc(), Call->getRParenLoc())) 11677 return; 11678 11679 // Catch cases like 'memset(buf, sizeof(buf), 0)'. 11680 CheckMemaccessSize(*this, BId, Call); 11681 11682 // We have special checking when the length is a sizeof expression. 11683 QualType SizeOfArgTy = getSizeOfArgType(LenExpr); 11684 const Expr *SizeOfArg = getSizeOfExprArg(LenExpr); 11685 llvm::FoldingSetNodeID SizeOfArgID; 11686 11687 // Although widely used, 'bzero' is not a standard function. Be more strict 11688 // with the argument types before allowing diagnostics and only allow the 11689 // form bzero(ptr, sizeof(...)). 11690 QualType FirstArgTy = Call->getArg(0)->IgnoreParenImpCasts()->getType(); 11691 if (BId == Builtin::BIbzero && !FirstArgTy->getAs<PointerType>()) 11692 return; 11693 11694 for (unsigned ArgIdx = 0; ArgIdx != LastArg; ++ArgIdx) { 11695 const Expr *Dest = Call->getArg(ArgIdx)->IgnoreParenImpCasts(); 11696 SourceRange ArgRange = Call->getArg(ArgIdx)->getSourceRange(); 11697 11698 QualType DestTy = Dest->getType(); 11699 QualType PointeeTy; 11700 if (const PointerType *DestPtrTy = DestTy->getAs<PointerType>()) { 11701 PointeeTy = DestPtrTy->getPointeeType(); 11702 11703 // Never warn about void type pointers. This can be used to suppress 11704 // false positives. 11705 if (PointeeTy->isVoidType()) 11706 continue; 11707 11708 // Catch "memset(p, 0, sizeof(p))" -- needs to be sizeof(*p). Do this by 11709 // actually comparing the expressions for equality. Because computing the 11710 // expression IDs can be expensive, we only do this if the diagnostic is 11711 // enabled. 11712 if (SizeOfArg && 11713 !Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, 11714 SizeOfArg->getExprLoc())) { 11715 // We only compute IDs for expressions if the warning is enabled, and 11716 // cache the sizeof arg's ID. 11717 if (SizeOfArgID == llvm::FoldingSetNodeID()) 11718 SizeOfArg->Profile(SizeOfArgID, Context, true); 11719 llvm::FoldingSetNodeID DestID; 11720 Dest->Profile(DestID, Context, true); 11721 if (DestID == SizeOfArgID) { 11722 // TODO: For strncpy() and friends, this could suggest sizeof(dst) 11723 // over sizeof(src) as well. 11724 unsigned ActionIdx = 0; // Default is to suggest dereferencing. 11725 StringRef ReadableName = FnName->getName(); 11726 11727 if (const UnaryOperator *UnaryOp = dyn_cast<UnaryOperator>(Dest)) 11728 if (UnaryOp->getOpcode() == UO_AddrOf) 11729 ActionIdx = 1; // If its an address-of operator, just remove it. 11730 if (!PointeeTy->isIncompleteType() && 11731 (Context.getTypeSize(PointeeTy) == Context.getCharWidth())) 11732 ActionIdx = 2; // If the pointee's size is sizeof(char), 11733 // suggest an explicit length. 11734 11735 // If the function is defined as a builtin macro, do not show macro 11736 // expansion. 11737 SourceLocation SL = SizeOfArg->getExprLoc(); 11738 SourceRange DSR = Dest->getSourceRange(); 11739 SourceRange SSR = SizeOfArg->getSourceRange(); 11740 SourceManager &SM = getSourceManager(); 11741 11742 if (SM.isMacroArgExpansion(SL)) { 11743 ReadableName = Lexer::getImmediateMacroName(SL, SM, LangOpts); 11744 SL = SM.getSpellingLoc(SL); 11745 DSR = SourceRange(SM.getSpellingLoc(DSR.getBegin()), 11746 SM.getSpellingLoc(DSR.getEnd())); 11747 SSR = SourceRange(SM.getSpellingLoc(SSR.getBegin()), 11748 SM.getSpellingLoc(SSR.getEnd())); 11749 } 11750 11751 DiagRuntimeBehavior(SL, SizeOfArg, 11752 PDiag(diag::warn_sizeof_pointer_expr_memaccess) 11753 << ReadableName 11754 << PointeeTy 11755 << DestTy 11756 << DSR 11757 << SSR); 11758 DiagRuntimeBehavior(SL, SizeOfArg, 11759 PDiag(diag::warn_sizeof_pointer_expr_memaccess_note) 11760 << ActionIdx 11761 << SSR); 11762 11763 break; 11764 } 11765 } 11766 11767 // Also check for cases where the sizeof argument is the exact same 11768 // type as the memory argument, and where it points to a user-defined 11769 // record type. 11770 if (SizeOfArgTy != QualType()) { 11771 if (PointeeTy->isRecordType() && 11772 Context.typesAreCompatible(SizeOfArgTy, DestTy)) { 11773 DiagRuntimeBehavior(LenExpr->getExprLoc(), Dest, 11774 PDiag(diag::warn_sizeof_pointer_type_memaccess) 11775 << FnName << SizeOfArgTy << ArgIdx 11776 << PointeeTy << Dest->getSourceRange() 11777 << LenExpr->getSourceRange()); 11778 break; 11779 } 11780 } 11781 } else if (DestTy->isArrayType()) { 11782 PointeeTy = DestTy; 11783 } 11784 11785 if (PointeeTy == QualType()) 11786 continue; 11787 11788 // Always complain about dynamic classes. 11789 bool IsContained; 11790 if (const CXXRecordDecl *ContainedRD = 11791 getContainedDynamicClass(PointeeTy, IsContained)) { 11792 11793 unsigned OperationType = 0; 11794 const bool IsCmp = BId == Builtin::BImemcmp || BId == Builtin::BIbcmp; 11795 // "overwritten" if we're warning about the destination for any call 11796 // but memcmp; otherwise a verb appropriate to the call. 11797 if (ArgIdx != 0 || IsCmp) { 11798 if (BId == Builtin::BImemcpy) 11799 OperationType = 1; 11800 else if(BId == Builtin::BImemmove) 11801 OperationType = 2; 11802 else if (IsCmp) 11803 OperationType = 3; 11804 } 11805 11806 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 11807 PDiag(diag::warn_dyn_class_memaccess) 11808 << (IsCmp ? ArgIdx + 2 : ArgIdx) << FnName 11809 << IsContained << ContainedRD << OperationType 11810 << Call->getCallee()->getSourceRange()); 11811 } else if (PointeeTy.hasNonTrivialObjCLifetime() && 11812 BId != Builtin::BImemset) 11813 DiagRuntimeBehavior( 11814 Dest->getExprLoc(), Dest, 11815 PDiag(diag::warn_arc_object_memaccess) 11816 << ArgIdx << FnName << PointeeTy 11817 << Call->getCallee()->getSourceRange()); 11818 else if (const auto *RT = PointeeTy->getAs<RecordType>()) { 11819 if ((BId == Builtin::BImemset || BId == Builtin::BIbzero) && 11820 RT->getDecl()->isNonTrivialToPrimitiveDefaultInitialize()) { 11821 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 11822 PDiag(diag::warn_cstruct_memaccess) 11823 << ArgIdx << FnName << PointeeTy << 0); 11824 SearchNonTrivialToInitializeField::diag(PointeeTy, Dest, *this); 11825 } else if ((BId == Builtin::BImemcpy || BId == Builtin::BImemmove) && 11826 RT->getDecl()->isNonTrivialToPrimitiveCopy()) { 11827 DiagRuntimeBehavior(Dest->getExprLoc(), Dest, 11828 PDiag(diag::warn_cstruct_memaccess) 11829 << ArgIdx << FnName << PointeeTy << 1); 11830 SearchNonTrivialToCopyField::diag(PointeeTy, Dest, *this); 11831 } else { 11832 continue; 11833 } 11834 } else 11835 continue; 11836 11837 DiagRuntimeBehavior( 11838 Dest->getExprLoc(), Dest, 11839 PDiag(diag::note_bad_memaccess_silence) 11840 << FixItHint::CreateInsertion(ArgRange.getBegin(), "(void*)")); 11841 break; 11842 } 11843} 11844 11845// A little helper routine: ignore addition and subtraction of integer literals. 11846// This intentionally does not ignore all integer constant expressions because 11847// we don't want to remove sizeof(). 11848static const Expr *ignoreLiteralAdditions(const Expr *Ex, ASTContext &Ctx) { 11849 Ex = Ex->IgnoreParenCasts(); 11850 11851 while (true) { 11852 const BinaryOperator * BO = dyn_cast<BinaryOperator>(Ex); 11853 if (!BO || !BO->isAdditiveOp()) 11854 break; 11855 11856 const Expr *RHS = BO->getRHS()->IgnoreParenCasts(); 11857 const Expr *LHS = BO->getLHS()->IgnoreParenCasts(); 11858 11859 if (isa<IntegerLiteral>(RHS)) 11860 Ex = LHS; 11861 else if (isa<IntegerLiteral>(LHS)) 11862 Ex = RHS; 11863 else 11864 break; 11865 } 11866 11867 return Ex; 11868} 11869 11870static bool isConstantSizeArrayWithMoreThanOneElement(QualType Ty, 11871 ASTContext &Context) { 11872 // Only handle constant-sized or VLAs, but not flexible members. 11873 if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(Ty)) { 11874 // Only issue the FIXIT for arrays of size > 1. 11875 if (CAT->getSize().getSExtValue() <= 1) 11876 return false; 11877 } else if (!Ty->isVariableArrayType()) { 11878 return false; 11879 } 11880 return true; 11881} 11882 11883// Warn if the user has made the 'size' argument to strlcpy or strlcat 11884// be the size of the source, instead of the destination. 11885void Sema::CheckStrlcpycatArguments(const CallExpr *Call, 11886 IdentifierInfo *FnName) { 11887 11888 // Don't crash if the user has the wrong number of arguments 11889 unsigned NumArgs = Call->getNumArgs(); 11890 if ((NumArgs != 3) && (NumArgs != 4)) 11891 return; 11892 11893 const Expr *SrcArg = ignoreLiteralAdditions(Call->getArg(1), Context); 11894 const Expr *SizeArg = ignoreLiteralAdditions(Call->getArg(2), Context); 11895 const Expr *CompareWithSrc = nullptr; 11896 11897 if (CheckMemorySizeofForComparison(*this, SizeArg, FnName, 11898 Call->getBeginLoc(), Call->getRParenLoc())) 11899 return; 11900 11901 // Look for 'strlcpy(dst, x, sizeof(x))' 11902 if (const Expr *Ex = getSizeOfExprArg(SizeArg)) 11903 CompareWithSrc = Ex; 11904 else { 11905 // Look for 'strlcpy(dst, x, strlen(x))' 11906 if (const CallExpr *SizeCall = dyn_cast<CallExpr>(SizeArg)) { 11907 if (SizeCall->getBuiltinCallee() == Builtin::BIstrlen && 11908 SizeCall->getNumArgs() == 1) 11909 CompareWithSrc = ignoreLiteralAdditions(SizeCall->getArg(0), Context); 11910 } 11911 } 11912 11913 if (!CompareWithSrc) 11914 return; 11915 11916 // Determine if the argument to sizeof/strlen is equal to the source 11917 // argument. In principle there's all kinds of things you could do 11918 // here, for instance creating an == expression and evaluating it with 11919 // EvaluateAsBooleanCondition, but this uses a more direct technique: 11920 const DeclRefExpr *SrcArgDRE = dyn_cast<DeclRefExpr>(SrcArg); 11921 if (!SrcArgDRE) 11922 return; 11923 11924 const DeclRefExpr *CompareWithSrcDRE = dyn_cast<DeclRefExpr>(CompareWithSrc); 11925 if (!CompareWithSrcDRE || 11926 SrcArgDRE->getDecl() != CompareWithSrcDRE->getDecl()) 11927 return; 11928 11929 const Expr *OriginalSizeArg = Call->getArg(2); 11930 Diag(CompareWithSrcDRE->getBeginLoc(), diag::warn_strlcpycat_wrong_size) 11931 << OriginalSizeArg->getSourceRange() << FnName; 11932 11933 // Output a FIXIT hint if the destination is an array (rather than a 11934 // pointer to an array). This could be enhanced to handle some 11935 // pointers if we know the actual size, like if DstArg is 'array+2' 11936 // we could say 'sizeof(array)-2'. 11937 const Expr *DstArg = Call->getArg(0)->IgnoreParenImpCasts(); 11938 if (!isConstantSizeArrayWithMoreThanOneElement(DstArg->getType(), Context)) 11939 return; 11940 11941 SmallString<128> sizeString; 11942 llvm::raw_svector_ostream OS(sizeString); 11943 OS << "sizeof("; 11944 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 11945 OS << ")"; 11946 11947 Diag(OriginalSizeArg->getBeginLoc(), diag::note_strlcpycat_wrong_size) 11948 << FixItHint::CreateReplacement(OriginalSizeArg->getSourceRange(), 11949 OS.str()); 11950} 11951 11952/// Check if two expressions refer to the same declaration. 11953static bool referToTheSameDecl(const Expr *E1, const Expr *E2) { 11954 if (const DeclRefExpr *D1 = dyn_cast_or_null<DeclRefExpr>(E1)) 11955 if (const DeclRefExpr *D2 = dyn_cast_or_null<DeclRefExpr>(E2)) 11956 return D1->getDecl() == D2->getDecl(); 11957 return false; 11958} 11959 11960static const Expr *getStrlenExprArg(const Expr *E) { 11961 if (const CallExpr *CE = dyn_cast<CallExpr>(E)) { 11962 const FunctionDecl *FD = CE->getDirectCallee(); 11963 if (!FD || FD->getMemoryFunctionKind() != Builtin::BIstrlen) 11964 return nullptr; 11965 return CE->getArg(0)->IgnoreParenCasts(); 11966 } 11967 return nullptr; 11968} 11969 11970// Warn on anti-patterns as the 'size' argument to strncat. 11971// The correct size argument should look like following: 11972// strncat(dst, src, sizeof(dst) - strlen(dest) - 1); 11973void Sema::CheckStrncatArguments(const CallExpr *CE, 11974 IdentifierInfo *FnName) { 11975 // Don't crash if the user has the wrong number of arguments. 11976 if (CE->getNumArgs() < 3) 11977 return; 11978 const Expr *DstArg = CE->getArg(0)->IgnoreParenCasts(); 11979 const Expr *SrcArg = CE->getArg(1)->IgnoreParenCasts(); 11980 const Expr *LenArg = CE->getArg(2)->IgnoreParenCasts(); 11981 11982 if (CheckMemorySizeofForComparison(*this, LenArg, FnName, CE->getBeginLoc(), 11983 CE->getRParenLoc())) 11984 return; 11985 11986 // Identify common expressions, which are wrongly used as the size argument 11987 // to strncat and may lead to buffer overflows. 11988 unsigned PatternType = 0; 11989 if (const Expr *SizeOfArg = getSizeOfExprArg(LenArg)) { 11990 // - sizeof(dst) 11991 if (referToTheSameDecl(SizeOfArg, DstArg)) 11992 PatternType = 1; 11993 // - sizeof(src) 11994 else if (referToTheSameDecl(SizeOfArg, SrcArg)) 11995 PatternType = 2; 11996 } else if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(LenArg)) { 11997 if (BE->getOpcode() == BO_Sub) { 11998 const Expr *L = BE->getLHS()->IgnoreParenCasts(); 11999 const Expr *R = BE->getRHS()->IgnoreParenCasts(); 12000 // - sizeof(dst) - strlen(dst) 12001 if (referToTheSameDecl(DstArg, getSizeOfExprArg(L)) && 12002 referToTheSameDecl(DstArg, getStrlenExprArg(R))) 12003 PatternType = 1; 12004 // - sizeof(src) - (anything) 12005 else if (referToTheSameDecl(SrcArg, getSizeOfExprArg(L))) 12006 PatternType = 2; 12007 } 12008 } 12009 12010 if (PatternType == 0) 12011 return; 12012 12013 // Generate the diagnostic. 12014 SourceLocation SL = LenArg->getBeginLoc(); 12015 SourceRange SR = LenArg->getSourceRange(); 12016 SourceManager &SM = getSourceManager(); 12017 12018 // If the function is defined as a builtin macro, do not show macro expansion. 12019 if (SM.isMacroArgExpansion(SL)) { 12020 SL = SM.getSpellingLoc(SL); 12021 SR = SourceRange(SM.getSpellingLoc(SR.getBegin()), 12022 SM.getSpellingLoc(SR.getEnd())); 12023 } 12024 12025 // Check if the destination is an array (rather than a pointer to an array). 12026 QualType DstTy = DstArg->getType(); 12027 bool isKnownSizeArray = isConstantSizeArrayWithMoreThanOneElement(DstTy, 12028 Context); 12029 if (!isKnownSizeArray) { 12030 if (PatternType == 1) 12031 Diag(SL, diag::warn_strncat_wrong_size) << SR; 12032 else 12033 Diag(SL, diag::warn_strncat_src_size) << SR; 12034 return; 12035 } 12036 12037 if (PatternType == 1) 12038 Diag(SL, diag::warn_strncat_large_size) << SR; 12039 else 12040 Diag(SL, diag::warn_strncat_src_size) << SR; 12041 12042 SmallString<128> sizeString; 12043 llvm::raw_svector_ostream OS(sizeString); 12044 OS << "sizeof("; 12045 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 12046 OS << ") - "; 12047 OS << "strlen("; 12048 DstArg->printPretty(OS, nullptr, getPrintingPolicy()); 12049 OS << ") - 1"; 12050 12051 Diag(SL, diag::note_strncat_wrong_size) 12052 << FixItHint::CreateReplacement(SR, OS.str()); 12053} 12054 12055namespace { 12056void CheckFreeArgumentsOnLvalue(Sema &S, const std::string &CalleeName, 12057 const UnaryOperator *UnaryExpr, const Decl *D) { 12058 if (isa<FieldDecl, FunctionDecl, VarDecl>(D)) { 12059 S.Diag(UnaryExpr->getBeginLoc(), diag::warn_free_nonheap_object) 12060 << CalleeName << 0 /*object: */ << cast<NamedDecl>(D); 12061 return; 12062 } 12063} 12064 12065void CheckFreeArgumentsAddressof(Sema &S, const std::string &CalleeName, 12066 const UnaryOperator *UnaryExpr) { 12067 if (const auto *Lvalue = dyn_cast<DeclRefExpr>(UnaryExpr->getSubExpr())) { 12068 const Decl *D = Lvalue->getDecl(); 12069 if (isa<DeclaratorDecl>(D)) 12070 if (!dyn_cast<DeclaratorDecl>(D)->getType()->isReferenceType()) 12071 return CheckFreeArgumentsOnLvalue(S, CalleeName, UnaryExpr, D); 12072 } 12073 12074 if (const auto *Lvalue = dyn_cast<MemberExpr>(UnaryExpr->getSubExpr())) 12075 return CheckFreeArgumentsOnLvalue(S, CalleeName, UnaryExpr, 12076 Lvalue->getMemberDecl()); 12077} 12078 12079void CheckFreeArgumentsPlus(Sema &S, const std::string &CalleeName, 12080 const UnaryOperator *UnaryExpr) { 12081 const auto *Lambda = dyn_cast<LambdaExpr>( 12082 UnaryExpr->getSubExpr()->IgnoreImplicitAsWritten()->IgnoreParens()); 12083 if (!Lambda) 12084 return; 12085 12086 S.Diag(Lambda->getBeginLoc(), diag::warn_free_nonheap_object) 12087 << CalleeName << 2 /*object: lambda expression*/; 12088} 12089 12090void CheckFreeArgumentsStackArray(Sema &S, const std::string &CalleeName, 12091 const DeclRefExpr *Lvalue) { 12092 const auto *Var = dyn_cast<VarDecl>(Lvalue->getDecl()); 12093 if (Var == nullptr) 12094 return; 12095 12096 S.Diag(Lvalue->getBeginLoc(), diag::warn_free_nonheap_object) 12097 << CalleeName << 0 /*object: */ << Var; 12098} 12099 12100void CheckFreeArgumentsCast(Sema &S, const std::string &CalleeName, 12101 const CastExpr *Cast) { 12102 SmallString<128> SizeString; 12103 llvm::raw_svector_ostream OS(SizeString); 12104 12105 clang::CastKind Kind = Cast->getCastKind(); 12106 if (Kind == clang::CK_BitCast && 12107 !Cast->getSubExpr()->getType()->isFunctionPointerType()) 12108 return; 12109 if (Kind == clang::CK_IntegralToPointer && 12110 !isa<IntegerLiteral>( 12111 Cast->getSubExpr()->IgnoreParenImpCasts()->IgnoreParens())) 12112 return; 12113 12114 switch (Cast->getCastKind()) { 12115 case clang::CK_BitCast: 12116 case clang::CK_IntegralToPointer: 12117 case clang::CK_FunctionToPointerDecay: 12118 OS << '\''; 12119 Cast->printPretty(OS, nullptr, S.getPrintingPolicy()); 12120 OS << '\''; 12121 break; 12122 default: 12123 return; 12124 } 12125 12126 S.Diag(Cast->getBeginLoc(), diag::warn_free_nonheap_object) 12127 << CalleeName << 0 /*object: */ << OS.str(); 12128} 12129} // namespace 12130 12131/// Alerts the user that they are attempting to free a non-malloc'd object. 12132void Sema::CheckFreeArguments(const CallExpr *E) { 12133 const std::string CalleeName = 12134 cast<FunctionDecl>(E->getCalleeDecl())->getQualifiedNameAsString(); 12135 12136 { // Prefer something that doesn't involve a cast to make things simpler. 12137 const Expr *Arg = E->getArg(0)->IgnoreParenCasts(); 12138 if (const auto *UnaryExpr = dyn_cast<UnaryOperator>(Arg)) 12139 switch (UnaryExpr->getOpcode()) { 12140 case UnaryOperator::Opcode::UO_AddrOf: 12141 return CheckFreeArgumentsAddressof(*this, CalleeName, UnaryExpr); 12142 case UnaryOperator::Opcode::UO_Plus: 12143 return CheckFreeArgumentsPlus(*this, CalleeName, UnaryExpr); 12144 default: 12145 break; 12146 } 12147 12148 if (const auto *Lvalue = dyn_cast<DeclRefExpr>(Arg)) 12149 if (Lvalue->getType()->isArrayType()) 12150 return CheckFreeArgumentsStackArray(*this, CalleeName, Lvalue); 12151 12152 if (const auto *Label = dyn_cast<AddrLabelExpr>(Arg)) { 12153 Diag(Label->getBeginLoc(), diag::warn_free_nonheap_object) 12154 << CalleeName << 0 /*object: */ << Label->getLabel()->getIdentifier(); 12155 return; 12156 } 12157 12158 if (isa<BlockExpr>(Arg)) { 12159 Diag(Arg->getBeginLoc(), diag::warn_free_nonheap_object) 12160 << CalleeName << 1 /*object: block*/; 12161 return; 12162 } 12163 } 12164 // Maybe the cast was important, check after the other cases. 12165 if (const auto *Cast = dyn_cast<CastExpr>(E->getArg(0))) 12166 return CheckFreeArgumentsCast(*this, CalleeName, Cast); 12167} 12168 12169void 12170Sema::CheckReturnValExpr(Expr *RetValExp, QualType lhsType, 12171 SourceLocation ReturnLoc, 12172 bool isObjCMethod, 12173 const AttrVec *Attrs, 12174 const FunctionDecl *FD) { 12175 // Check if the return value is null but should not be. 12176 if (((Attrs && hasSpecificAttr<ReturnsNonNullAttr>(*Attrs)) || 12177 (!isObjCMethod && isNonNullType(lhsType))) && 12178 CheckNonNullExpr(*this, RetValExp)) 12179 Diag(ReturnLoc, diag::warn_null_ret) 12180 << (isObjCMethod ? 1 : 0) << RetValExp->getSourceRange(); 12181 12182 // C++11 [basic.stc.dynamic.allocation]p4: 12183 // If an allocation function declared with a non-throwing 12184 // exception-specification fails to allocate storage, it shall return 12185 // a null pointer. Any other allocation function that fails to allocate 12186 // storage shall indicate failure only by throwing an exception [...] 12187 if (FD) { 12188 OverloadedOperatorKind Op = FD->getOverloadedOperator(); 12189 if (Op == OO_New || Op == OO_Array_New) { 12190 const FunctionProtoType *Proto 12191 = FD->getType()->castAs<FunctionProtoType>(); 12192 if (!Proto->isNothrow(/*ResultIfDependent*/true) && 12193 CheckNonNullExpr(*this, RetValExp)) 12194 Diag(ReturnLoc, diag::warn_operator_new_returns_null) 12195 << FD << getLangOpts().CPlusPlus11; 12196 } 12197 } 12198 12199 // PPC MMA non-pointer types are not allowed as return type. Checking the type 12200 // here prevent the user from using a PPC MMA type as trailing return type. 12201 if (Context.getTargetInfo().getTriple().isPPC64()) 12202 CheckPPCMMAType(RetValExp->getType(), ReturnLoc); 12203} 12204 12205/// Check for comparisons of floating-point values using == and !=. Issue a 12206/// warning if the comparison is not likely to do what the programmer intended. 12207void Sema::CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS, 12208 BinaryOperatorKind Opcode) { 12209 if (!BinaryOperator::isEqualityOp(Opcode)) 12210 return; 12211 12212 // Match and capture subexpressions such as "(float) X == 0.1". 12213 FloatingLiteral *FPLiteral; 12214 CastExpr *FPCast; 12215 auto getCastAndLiteral = [&FPLiteral, &FPCast](Expr *L, Expr *R) { 12216 FPLiteral = dyn_cast<FloatingLiteral>(L->IgnoreParens()); 12217 FPCast = dyn_cast<CastExpr>(R->IgnoreParens()); 12218 return FPLiteral && FPCast; 12219 }; 12220 12221 if (getCastAndLiteral(LHS, RHS) || getCastAndLiteral(RHS, LHS)) { 12222 auto *SourceTy = FPCast->getSubExpr()->getType()->getAs<BuiltinType>(); 12223 auto *TargetTy = FPLiteral->getType()->getAs<BuiltinType>(); 12224 if (SourceTy && TargetTy && SourceTy->isFloatingPoint() && 12225 TargetTy->isFloatingPoint()) { 12226 bool Lossy; 12227 llvm::APFloat TargetC = FPLiteral->getValue(); 12228 TargetC.convert(Context.getFloatTypeSemantics(QualType(SourceTy, 0)), 12229 llvm::APFloat::rmNearestTiesToEven, &Lossy); 12230 if (Lossy) { 12231 // If the literal cannot be represented in the source type, then a 12232 // check for == is always false and check for != is always true. 12233 Diag(Loc, diag::warn_float_compare_literal) 12234 << (Opcode == BO_EQ) << QualType(SourceTy, 0) 12235 << LHS->getSourceRange() << RHS->getSourceRange(); 12236 return; 12237 } 12238 } 12239 } 12240 12241 // Match a more general floating-point equality comparison (-Wfloat-equal). 12242 Expr* LeftExprSansParen = LHS->IgnoreParenImpCasts(); 12243 Expr* RightExprSansParen = RHS->IgnoreParenImpCasts(); 12244 12245 // Special case: check for x == x (which is OK). 12246 // Do not emit warnings for such cases. 12247 if (auto *DRL = dyn_cast<DeclRefExpr>(LeftExprSansParen)) 12248 if (auto *DRR = dyn_cast<DeclRefExpr>(RightExprSansParen)) 12249 if (DRL->getDecl() == DRR->getDecl()) 12250 return; 12251 12252 // Special case: check for comparisons against literals that can be exactly 12253 // represented by APFloat. In such cases, do not emit a warning. This 12254 // is a heuristic: often comparison against such literals are used to 12255 // detect if a value in a variable has not changed. This clearly can 12256 // lead to false negatives. 12257 if (FloatingLiteral* FLL = dyn_cast<FloatingLiteral>(LeftExprSansParen)) { 12258 if (FLL->isExact()) 12259 return; 12260 } else 12261 if (FloatingLiteral* FLR = dyn_cast<FloatingLiteral>(RightExprSansParen)) 12262 if (FLR->isExact()) 12263 return; 12264 12265 // Check for comparisons with builtin types. 12266 if (CallExpr* CL = dyn_cast<CallExpr>(LeftExprSansParen)) 12267 if (CL->getBuiltinCallee()) 12268 return; 12269 12270 if (CallExpr* CR = dyn_cast<CallExpr>(RightExprSansParen)) 12271 if (CR->getBuiltinCallee()) 12272 return; 12273 12274 // Emit the diagnostic. 12275 Diag(Loc, diag::warn_floatingpoint_eq) 12276 << LHS->getSourceRange() << RHS->getSourceRange(); 12277} 12278 12279//===--- CHECK: Integer mixed-sign comparisons (-Wsign-compare) --------===// 12280//===--- CHECK: Lossy implicit conversions (-Wconversion) --------------===// 12281 12282namespace { 12283 12284/// Structure recording the 'active' range of an integer-valued 12285/// expression. 12286struct IntRange { 12287 /// The number of bits active in the int. Note that this includes exactly one 12288 /// sign bit if !NonNegative. 12289 unsigned Width; 12290 12291 /// True if the int is known not to have negative values. If so, all leading 12292 /// bits before Width are known zero, otherwise they are known to be the 12293 /// same as the MSB within Width. 12294 bool NonNegative; 12295 12296 IntRange(unsigned Width, bool NonNegative) 12297 : Width(Width), NonNegative(NonNegative) {} 12298 12299 /// Number of bits excluding the sign bit. 12300 unsigned valueBits() const { 12301 return NonNegative ? Width : Width - 1; 12302 } 12303 12304 /// Returns the range of the bool type. 12305 static IntRange forBoolType() { 12306 return IntRange(1, true); 12307 } 12308 12309 /// Returns the range of an opaque value of the given integral type. 12310 static IntRange forValueOfType(ASTContext &C, QualType T) { 12311 return forValueOfCanonicalType(C, 12312 T->getCanonicalTypeInternal().getTypePtr()); 12313 } 12314 12315 /// Returns the range of an opaque value of a canonical integral type. 12316 static IntRange forValueOfCanonicalType(ASTContext &C, const Type *T) { 12317 assert(T->isCanonicalUnqualified()); 12318 12319 if (const VectorType *VT = dyn_cast<VectorType>(T)) 12320 T = VT->getElementType().getTypePtr(); 12321 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 12322 T = CT->getElementType().getTypePtr(); 12323 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 12324 T = AT->getValueType().getTypePtr(); 12325 12326 if (!C.getLangOpts().CPlusPlus) { 12327 // For enum types in C code, use the underlying datatype. 12328 if (const EnumType *ET = dyn_cast<EnumType>(T)) 12329 T = ET->getDecl()->getIntegerType().getDesugaredType(C).getTypePtr(); 12330 } else if (const EnumType *ET = dyn_cast<EnumType>(T)) { 12331 // For enum types in C++, use the known bit width of the enumerators. 12332 EnumDecl *Enum = ET->getDecl(); 12333 // In C++11, enums can have a fixed underlying type. Use this type to 12334 // compute the range. 12335 if (Enum->isFixed()) { 12336 return IntRange(C.getIntWidth(QualType(T, 0)), 12337 !ET->isSignedIntegerOrEnumerationType()); 12338 } 12339 12340 unsigned NumPositive = Enum->getNumPositiveBits(); 12341 unsigned NumNegative = Enum->getNumNegativeBits(); 12342 12343 if (NumNegative == 0) 12344 return IntRange(NumPositive, true/*NonNegative*/); 12345 else 12346 return IntRange(std::max(NumPositive + 1, NumNegative), 12347 false/*NonNegative*/); 12348 } 12349 12350 if (const auto *EIT = dyn_cast<BitIntType>(T)) 12351 return IntRange(EIT->getNumBits(), EIT->isUnsigned()); 12352 12353 const BuiltinType *BT = cast<BuiltinType>(T); 12354 assert(BT->isInteger()); 12355 12356 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 12357 } 12358 12359 /// Returns the "target" range of a canonical integral type, i.e. 12360 /// the range of values expressible in the type. 12361 /// 12362 /// This matches forValueOfCanonicalType except that enums have the 12363 /// full range of their type, not the range of their enumerators. 12364 static IntRange forTargetOfCanonicalType(ASTContext &C, const Type *T) { 12365 assert(T->isCanonicalUnqualified()); 12366 12367 if (const VectorType *VT = dyn_cast<VectorType>(T)) 12368 T = VT->getElementType().getTypePtr(); 12369 if (const ComplexType *CT = dyn_cast<ComplexType>(T)) 12370 T = CT->getElementType().getTypePtr(); 12371 if (const AtomicType *AT = dyn_cast<AtomicType>(T)) 12372 T = AT->getValueType().getTypePtr(); 12373 if (const EnumType *ET = dyn_cast<EnumType>(T)) 12374 T = C.getCanonicalType(ET->getDecl()->getIntegerType()).getTypePtr(); 12375 12376 if (const auto *EIT = dyn_cast<BitIntType>(T)) 12377 return IntRange(EIT->getNumBits(), EIT->isUnsigned()); 12378 12379 const BuiltinType *BT = cast<BuiltinType>(T); 12380 assert(BT->isInteger()); 12381 12382 return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); 12383 } 12384 12385 /// Returns the supremum of two ranges: i.e. their conservative merge. 12386 static IntRange join(IntRange L, IntRange R) { 12387 bool Unsigned = L.NonNegative && R.NonNegative; 12388 return IntRange(std::max(L.valueBits(), R.valueBits()) + !Unsigned, 12389 L.NonNegative && R.NonNegative); 12390 } 12391 12392 /// Return the range of a bitwise-AND of the two ranges. 12393 static IntRange bit_and(IntRange L, IntRange R) { 12394 unsigned Bits = std::max(L.Width, R.Width); 12395 bool NonNegative = false; 12396 if (L.NonNegative) { 12397 Bits = std::min(Bits, L.Width); 12398 NonNegative = true; 12399 } 12400 if (R.NonNegative) { 12401 Bits = std::min(Bits, R.Width); 12402 NonNegative = true; 12403 } 12404 return IntRange(Bits, NonNegative); 12405 } 12406 12407 /// Return the range of a sum of the two ranges. 12408 static IntRange sum(IntRange L, IntRange R) { 12409 bool Unsigned = L.NonNegative && R.NonNegative; 12410 return IntRange(std::max(L.valueBits(), R.valueBits()) + 1 + !Unsigned, 12411 Unsigned); 12412 } 12413 12414 /// Return the range of a difference of the two ranges. 12415 static IntRange difference(IntRange L, IntRange R) { 12416 // We need a 1-bit-wider range if: 12417 // 1) LHS can be negative: least value can be reduced. 12418 // 2) RHS can be negative: greatest value can be increased. 12419 bool CanWiden = !L.NonNegative || !R.NonNegative; 12420 bool Unsigned = L.NonNegative && R.Width == 0; 12421 return IntRange(std::max(L.valueBits(), R.valueBits()) + CanWiden + 12422 !Unsigned, 12423 Unsigned); 12424 } 12425 12426 /// Return the range of a product of the two ranges. 12427 static IntRange product(IntRange L, IntRange R) { 12428 // If both LHS and RHS can be negative, we can form 12429 // -2^L * -2^R = 2^(L + R) 12430 // which requires L + R + 1 value bits to represent. 12431 bool CanWiden = !L.NonNegative && !R.NonNegative; 12432 bool Unsigned = L.NonNegative && R.NonNegative; 12433 return IntRange(L.valueBits() + R.valueBits() + CanWiden + !Unsigned, 12434 Unsigned); 12435 } 12436 12437 /// Return the range of a remainder operation between the two ranges. 12438 static IntRange rem(IntRange L, IntRange R) { 12439 // The result of a remainder can't be larger than the result of 12440 // either side. The sign of the result is the sign of the LHS. 12441 bool Unsigned = L.NonNegative; 12442 return IntRange(std::min(L.valueBits(), R.valueBits()) + !Unsigned, 12443 Unsigned); 12444 } 12445}; 12446 12447} // namespace 12448 12449static IntRange GetValueRange(ASTContext &C, llvm::APSInt &value, 12450 unsigned MaxWidth) { 12451 if (value.isSigned() && value.isNegative()) 12452 return IntRange(value.getMinSignedBits(), false); 12453 12454 if (value.getBitWidth() > MaxWidth) 12455 value = value.trunc(MaxWidth); 12456 12457 // isNonNegative() just checks the sign bit without considering 12458 // signedness. 12459 return IntRange(value.getActiveBits(), true); 12460} 12461 12462static IntRange GetValueRange(ASTContext &C, APValue &result, QualType Ty, 12463 unsigned MaxWidth) { 12464 if (result.isInt()) 12465 return GetValueRange(C, result.getInt(), MaxWidth); 12466 12467 if (result.isVector()) { 12468 IntRange R = GetValueRange(C, result.getVectorElt(0), Ty, MaxWidth); 12469 for (unsigned i = 1, e = result.getVectorLength(); i != e; ++i) { 12470 IntRange El = GetValueRange(C, result.getVectorElt(i), Ty, MaxWidth); 12471 R = IntRange::join(R, El); 12472 } 12473 return R; 12474 } 12475 12476 if (result.isComplexInt()) { 12477 IntRange R = GetValueRange(C, result.getComplexIntReal(), MaxWidth); 12478 IntRange I = GetValueRange(C, result.getComplexIntImag(), MaxWidth); 12479 return IntRange::join(R, I); 12480 } 12481 12482 // This can happen with lossless casts to intptr_t of "based" lvalues. 12483 // Assume it might use arbitrary bits. 12484 // FIXME: The only reason we need to pass the type in here is to get 12485 // the sign right on this one case. It would be nice if APValue 12486 // preserved this. 12487 assert(result.isLValue() || result.isAddrLabelDiff()); 12488 return IntRange(MaxWidth, Ty->isUnsignedIntegerOrEnumerationType()); 12489} 12490 12491static QualType GetExprType(const Expr *E) { 12492 QualType Ty = E->getType(); 12493 if (const AtomicType *AtomicRHS = Ty->getAs<AtomicType>()) 12494 Ty = AtomicRHS->getValueType(); 12495 return Ty; 12496} 12497 12498/// Pseudo-evaluate the given integer expression, estimating the 12499/// range of values it might take. 12500/// 12501/// \param MaxWidth The width to which the value will be truncated. 12502/// \param Approximate If \c true, return a likely range for the result: in 12503/// particular, assume that arithmetic on narrower types doesn't leave 12504/// those types. If \c false, return a range including all possible 12505/// result values. 12506static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth, 12507 bool InConstantContext, bool Approximate) { 12508 E = E->IgnoreParens(); 12509 12510 // Try a full evaluation first. 12511 Expr::EvalResult result; 12512 if (E->EvaluateAsRValue(result, C, InConstantContext)) 12513 return GetValueRange(C, result.Val, GetExprType(E), MaxWidth); 12514 12515 // I think we only want to look through implicit casts here; if the 12516 // user has an explicit widening cast, we should treat the value as 12517 // being of the new, wider type. 12518 if (const auto *CE = dyn_cast<ImplicitCastExpr>(E)) { 12519 if (CE->getCastKind() == CK_NoOp || CE->getCastKind() == CK_LValueToRValue) 12520 return GetExprRange(C, CE->getSubExpr(), MaxWidth, InConstantContext, 12521 Approximate); 12522 12523 IntRange OutputTypeRange = IntRange::forValueOfType(C, GetExprType(CE)); 12524 12525 bool isIntegerCast = CE->getCastKind() == CK_IntegralCast || 12526 CE->getCastKind() == CK_BooleanToSignedIntegral; 12527 12528 // Assume that non-integer casts can span the full range of the type. 12529 if (!isIntegerCast) 12530 return OutputTypeRange; 12531 12532 IntRange SubRange = GetExprRange(C, CE->getSubExpr(), 12533 std::min(MaxWidth, OutputTypeRange.Width), 12534 InConstantContext, Approximate); 12535 12536 // Bail out if the subexpr's range is as wide as the cast type. 12537 if (SubRange.Width >= OutputTypeRange.Width) 12538 return OutputTypeRange; 12539 12540 // Otherwise, we take the smaller width, and we're non-negative if 12541 // either the output type or the subexpr is. 12542 return IntRange(SubRange.Width, 12543 SubRange.NonNegative || OutputTypeRange.NonNegative); 12544 } 12545 12546 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) { 12547 // If we can fold the condition, just take that operand. 12548 bool CondResult; 12549 if (CO->getCond()->EvaluateAsBooleanCondition(CondResult, C)) 12550 return GetExprRange(C, 12551 CondResult ? CO->getTrueExpr() : CO->getFalseExpr(), 12552 MaxWidth, InConstantContext, Approximate); 12553 12554 // Otherwise, conservatively merge. 12555 // GetExprRange requires an integer expression, but a throw expression 12556 // results in a void type. 12557 Expr *E = CO->getTrueExpr(); 12558 IntRange L = E->getType()->isVoidType() 12559 ? IntRange{0, true} 12560 : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate); 12561 E = CO->getFalseExpr(); 12562 IntRange R = E->getType()->isVoidType() 12563 ? IntRange{0, true} 12564 : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate); 12565 return IntRange::join(L, R); 12566 } 12567 12568 if (const auto *BO = dyn_cast<BinaryOperator>(E)) { 12569 IntRange (*Combine)(IntRange, IntRange) = IntRange::join; 12570 12571 switch (BO->getOpcode()) { 12572 case BO_Cmp: 12573 llvm_unreachable("builtin <=> should have class type"); 12574 12575 // Boolean-valued operations are single-bit and positive. 12576 case BO_LAnd: 12577 case BO_LOr: 12578 case BO_LT: 12579 case BO_GT: 12580 case BO_LE: 12581 case BO_GE: 12582 case BO_EQ: 12583 case BO_NE: 12584 return IntRange::forBoolType(); 12585 12586 // The type of the assignments is the type of the LHS, so the RHS 12587 // is not necessarily the same type. 12588 case BO_MulAssign: 12589 case BO_DivAssign: 12590 case BO_RemAssign: 12591 case BO_AddAssign: 12592 case BO_SubAssign: 12593 case BO_XorAssign: 12594 case BO_OrAssign: 12595 // TODO: bitfields? 12596 return IntRange::forValueOfType(C, GetExprType(E)); 12597 12598 // Simple assignments just pass through the RHS, which will have 12599 // been coerced to the LHS type. 12600 case BO_Assign: 12601 // TODO: bitfields? 12602 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext, 12603 Approximate); 12604 12605 // Operations with opaque sources are black-listed. 12606 case BO_PtrMemD: 12607 case BO_PtrMemI: 12608 return IntRange::forValueOfType(C, GetExprType(E)); 12609 12610 // Bitwise-and uses the *infinum* of the two source ranges. 12611 case BO_And: 12612 case BO_AndAssign: 12613 Combine = IntRange::bit_and; 12614 break; 12615 12616 // Left shift gets black-listed based on a judgement call. 12617 case BO_Shl: 12618 // ...except that we want to treat '1 << (blah)' as logically 12619 // positive. It's an important idiom. 12620 if (IntegerLiteral *I 12621 = dyn_cast<IntegerLiteral>(BO->getLHS()->IgnoreParenCasts())) { 12622 if (I->getValue() == 1) { 12623 IntRange R = IntRange::forValueOfType(C, GetExprType(E)); 12624 return IntRange(R.Width, /*NonNegative*/ true); 12625 } 12626 } 12627 [[fallthrough]]; 12628 12629 case BO_ShlAssign: 12630 return IntRange::forValueOfType(C, GetExprType(E)); 12631 12632 // Right shift by a constant can narrow its left argument. 12633 case BO_Shr: 12634 case BO_ShrAssign: { 12635 IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth, InConstantContext, 12636 Approximate); 12637 12638 // If the shift amount is a positive constant, drop the width by 12639 // that much. 12640 if (std::optional<llvm::APSInt> shift = 12641 BO->getRHS()->getIntegerConstantExpr(C)) { 12642 if (shift->isNonNegative()) { 12643 unsigned zext = shift->getZExtValue(); 12644 if (zext >= L.Width) 12645 L.Width = (L.NonNegative ? 0 : 1); 12646 else 12647 L.Width -= zext; 12648 } 12649 } 12650 12651 return L; 12652 } 12653 12654 // Comma acts as its right operand. 12655 case BO_Comma: 12656 return GetExprRange(C, BO->getRHS(), MaxWidth, InConstantContext, 12657 Approximate); 12658 12659 case BO_Add: 12660 if (!Approximate) 12661 Combine = IntRange::sum; 12662 break; 12663 12664 case BO_Sub: 12665 if (BO->getLHS()->getType()->isPointerType()) 12666 return IntRange::forValueOfType(C, GetExprType(E)); 12667 if (!Approximate) 12668 Combine = IntRange::difference; 12669 break; 12670 12671 case BO_Mul: 12672 if (!Approximate) 12673 Combine = IntRange::product; 12674 break; 12675 12676 // The width of a division result is mostly determined by the size 12677 // of the LHS. 12678 case BO_Div: { 12679 // Don't 'pre-truncate' the operands. 12680 unsigned opWidth = C.getIntWidth(GetExprType(E)); 12681 IntRange L = GetExprRange(C, BO->getLHS(), opWidth, InConstantContext, 12682 Approximate); 12683 12684 // If the divisor is constant, use that. 12685 if (std::optional<llvm::APSInt> divisor = 12686 BO->getRHS()->getIntegerConstantExpr(C)) { 12687 unsigned log2 = divisor->logBase2(); // floor(log_2(divisor)) 12688 if (log2 >= L.Width) 12689 L.Width = (L.NonNegative ? 0 : 1); 12690 else 12691 L.Width = std::min(L.Width - log2, MaxWidth); 12692 return L; 12693 } 12694 12695 // Otherwise, just use the LHS's width. 12696 // FIXME: This is wrong if the LHS could be its minimal value and the RHS 12697 // could be -1. 12698 IntRange R = GetExprRange(C, BO->getRHS(), opWidth, InConstantContext, 12699 Approximate); 12700 return IntRange(L.Width, L.NonNegative && R.NonNegative); 12701 } 12702 12703 case BO_Rem: 12704 Combine = IntRange::rem; 12705 break; 12706 12707 // The default behavior is okay for these. 12708 case BO_Xor: 12709 case BO_Or: 12710 break; 12711 } 12712 12713 // Combine the two ranges, but limit the result to the type in which we 12714 // performed the computation. 12715 QualType T = GetExprType(E); 12716 unsigned opWidth = C.getIntWidth(T); 12717 IntRange L = 12718 GetExprRange(C, BO->getLHS(), opWidth, InConstantContext, Approximate); 12719 IntRange R = 12720 GetExprRange(C, BO->getRHS(), opWidth, InConstantContext, Approximate); 12721 IntRange C = Combine(L, R); 12722 C.NonNegative |= T->isUnsignedIntegerOrEnumerationType(); 12723 C.Width = std::min(C.Width, MaxWidth); 12724 return C; 12725 } 12726 12727 if (const auto *UO = dyn_cast<UnaryOperator>(E)) { 12728 switch (UO->getOpcode()) { 12729 // Boolean-valued operations are white-listed. 12730 case UO_LNot: 12731 return IntRange::forBoolType(); 12732 12733 // Operations with opaque sources are black-listed. 12734 case UO_Deref: 12735 case UO_AddrOf: // should be impossible 12736 return IntRange::forValueOfType(C, GetExprType(E)); 12737 12738 default: 12739 return GetExprRange(C, UO->getSubExpr(), MaxWidth, InConstantContext, 12740 Approximate); 12741 } 12742 } 12743 12744 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 12745 return GetExprRange(C, OVE->getSourceExpr(), MaxWidth, InConstantContext, 12746 Approximate); 12747 12748 if (const auto *BitField = E->getSourceBitField()) 12749 return IntRange(BitField->getBitWidthValue(C), 12750 BitField->getType()->isUnsignedIntegerOrEnumerationType()); 12751 12752 return IntRange::forValueOfType(C, GetExprType(E)); 12753} 12754 12755static IntRange GetExprRange(ASTContext &C, const Expr *E, 12756 bool InConstantContext, bool Approximate) { 12757 return GetExprRange(C, E, C.getIntWidth(GetExprType(E)), InConstantContext, 12758 Approximate); 12759} 12760 12761/// Checks whether the given value, which currently has the given 12762/// source semantics, has the same value when coerced through the 12763/// target semantics. 12764static bool IsSameFloatAfterCast(const llvm::APFloat &value, 12765 const llvm::fltSemantics &Src, 12766 const llvm::fltSemantics &Tgt) { 12767 llvm::APFloat truncated = value; 12768 12769 bool ignored; 12770 truncated.convert(Src, llvm::APFloat::rmNearestTiesToEven, &ignored); 12771 truncated.convert(Tgt, llvm::APFloat::rmNearestTiesToEven, &ignored); 12772 12773 return truncated.bitwiseIsEqual(value); 12774} 12775 12776/// Checks whether the given value, which currently has the given 12777/// source semantics, has the same value when coerced through the 12778/// target semantics. 12779/// 12780/// The value might be a vector of floats (or a complex number). 12781static bool IsSameFloatAfterCast(const APValue &value, 12782 const llvm::fltSemantics &Src, 12783 const llvm::fltSemantics &Tgt) { 12784 if (value.isFloat()) 12785 return IsSameFloatAfterCast(value.getFloat(), Src, Tgt); 12786 12787 if (value.isVector()) { 12788 for (unsigned i = 0, e = value.getVectorLength(); i != e; ++i) 12789 if (!IsSameFloatAfterCast(value.getVectorElt(i), Src, Tgt)) 12790 return false; 12791 return true; 12792 } 12793 12794 assert(value.isComplexFloat()); 12795 return (IsSameFloatAfterCast(value.getComplexFloatReal(), Src, Tgt) && 12796 IsSameFloatAfterCast(value.getComplexFloatImag(), Src, Tgt)); 12797} 12798 12799static void AnalyzeImplicitConversions(Sema &S, Expr *E, SourceLocation CC, 12800 bool IsListInit = false); 12801 12802static bool IsEnumConstOrFromMacro(Sema &S, Expr *E) { 12803 // Suppress cases where we are comparing against an enum constant. 12804 if (const DeclRefExpr *DR = 12805 dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) 12806 if (isa<EnumConstantDecl>(DR->getDecl())) 12807 return true; 12808 12809 // Suppress cases where the value is expanded from a macro, unless that macro 12810 // is how a language represents a boolean literal. This is the case in both C 12811 // and Objective-C. 12812 SourceLocation BeginLoc = E->getBeginLoc(); 12813 if (BeginLoc.isMacroID()) { 12814 StringRef MacroName = Lexer::getImmediateMacroName( 12815 BeginLoc, S.getSourceManager(), S.getLangOpts()); 12816 return MacroName != "YES" && MacroName != "NO" && 12817 MacroName != "true" && MacroName != "false"; 12818 } 12819 12820 return false; 12821} 12822 12823static bool isKnownToHaveUnsignedValue(Expr *E) { 12824 return E->getType()->isIntegerType() && 12825 (!E->getType()->isSignedIntegerType() || 12826 !E->IgnoreParenImpCasts()->getType()->isSignedIntegerType()); 12827} 12828 12829namespace { 12830/// The promoted range of values of a type. In general this has the 12831/// following structure: 12832/// 12833/// |-----------| . . . |-----------| 12834/// ^ ^ ^ ^ 12835/// Min HoleMin HoleMax Max 12836/// 12837/// ... where there is only a hole if a signed type is promoted to unsigned 12838/// (in which case Min and Max are the smallest and largest representable 12839/// values). 12840struct PromotedRange { 12841 // Min, or HoleMax if there is a hole. 12842 llvm::APSInt PromotedMin; 12843 // Max, or HoleMin if there is a hole. 12844 llvm::APSInt PromotedMax; 12845 12846 PromotedRange(IntRange R, unsigned BitWidth, bool Unsigned) { 12847 if (R.Width == 0) 12848 PromotedMin = PromotedMax = llvm::APSInt(BitWidth, Unsigned); 12849 else if (R.Width >= BitWidth && !Unsigned) { 12850 // Promotion made the type *narrower*. This happens when promoting 12851 // a < 32-bit unsigned / <= 32-bit signed bit-field to 'signed int'. 12852 // Treat all values of 'signed int' as being in range for now. 12853 PromotedMin = llvm::APSInt::getMinValue(BitWidth, Unsigned); 12854 PromotedMax = llvm::APSInt::getMaxValue(BitWidth, Unsigned); 12855 } else { 12856 PromotedMin = llvm::APSInt::getMinValue(R.Width, R.NonNegative) 12857 .extOrTrunc(BitWidth); 12858 PromotedMin.setIsUnsigned(Unsigned); 12859 12860 PromotedMax = llvm::APSInt::getMaxValue(R.Width, R.NonNegative) 12861 .extOrTrunc(BitWidth); 12862 PromotedMax.setIsUnsigned(Unsigned); 12863 } 12864 } 12865 12866 // Determine whether this range is contiguous (has no hole). 12867 bool isContiguous() const { return PromotedMin <= PromotedMax; } 12868 12869 // Where a constant value is within the range. 12870 enum ComparisonResult { 12871 LT = 0x1, 12872 LE = 0x2, 12873 GT = 0x4, 12874 GE = 0x8, 12875 EQ = 0x10, 12876 NE = 0x20, 12877 InRangeFlag = 0x40, 12878 12879 Less = LE | LT | NE, 12880 Min = LE | InRangeFlag, 12881 InRange = InRangeFlag, 12882 Max = GE | InRangeFlag, 12883 Greater = GE | GT | NE, 12884 12885 OnlyValue = LE | GE | EQ | InRangeFlag, 12886 InHole = NE 12887 }; 12888 12889 ComparisonResult compare(const llvm::APSInt &Value) const { 12890 assert(Value.getBitWidth() == PromotedMin.getBitWidth() && 12891 Value.isUnsigned() == PromotedMin.isUnsigned()); 12892 if (!isContiguous()) { 12893 assert(Value.isUnsigned() && "discontiguous range for signed compare"); 12894 if (Value.isMinValue()) return Min; 12895 if (Value.isMaxValue()) return Max; 12896 if (Value >= PromotedMin) return InRange; 12897 if (Value <= PromotedMax) return InRange; 12898 return InHole; 12899 } 12900 12901 switch (llvm::APSInt::compareValues(Value, PromotedMin)) { 12902 case -1: return Less; 12903 case 0: return PromotedMin == PromotedMax ? OnlyValue : Min; 12904 case 1: 12905 switch (llvm::APSInt::compareValues(Value, PromotedMax)) { 12906 case -1: return InRange; 12907 case 0: return Max; 12908 case 1: return Greater; 12909 } 12910 } 12911 12912 llvm_unreachable("impossible compare result"); 12913 } 12914 12915 static std::optional<StringRef> 12916 constantValue(BinaryOperatorKind Op, ComparisonResult R, bool ConstantOnRHS) { 12917 if (Op == BO_Cmp) { 12918 ComparisonResult LTFlag = LT, GTFlag = GT; 12919 if (ConstantOnRHS) std::swap(LTFlag, GTFlag); 12920 12921 if (R & EQ) return StringRef("'std::strong_ordering::equal'"); 12922 if (R & LTFlag) return StringRef("'std::strong_ordering::less'"); 12923 if (R & GTFlag) return StringRef("'std::strong_ordering::greater'"); 12924 return std::nullopt; 12925 } 12926 12927 ComparisonResult TrueFlag, FalseFlag; 12928 if (Op == BO_EQ) { 12929 TrueFlag = EQ; 12930 FalseFlag = NE; 12931 } else if (Op == BO_NE) { 12932 TrueFlag = NE; 12933 FalseFlag = EQ; 12934 } else { 12935 if ((Op == BO_LT || Op == BO_GE) ^ ConstantOnRHS) { 12936 TrueFlag = LT; 12937 FalseFlag = GE; 12938 } else { 12939 TrueFlag = GT; 12940 FalseFlag = LE; 12941 } 12942 if (Op == BO_GE || Op == BO_LE) 12943 std::swap(TrueFlag, FalseFlag); 12944 } 12945 if (R & TrueFlag) 12946 return StringRef("true"); 12947 if (R & FalseFlag) 12948 return StringRef("false"); 12949 return std::nullopt; 12950 } 12951}; 12952} 12953 12954static bool HasEnumType(Expr *E) { 12955 // Strip off implicit integral promotions. 12956 while (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { 12957 if (ICE->getCastKind() != CK_IntegralCast && 12958 ICE->getCastKind() != CK_NoOp) 12959 break; 12960 E = ICE->getSubExpr(); 12961 } 12962 12963 return E->getType()->isEnumeralType(); 12964} 12965 12966static int classifyConstantValue(Expr *Constant) { 12967 // The values of this enumeration are used in the diagnostics 12968 // diag::warn_out_of_range_compare and diag::warn_tautological_bool_compare. 12969 enum ConstantValueKind { 12970 Miscellaneous = 0, 12971 LiteralTrue, 12972 LiteralFalse 12973 }; 12974 if (auto *BL = dyn_cast<CXXBoolLiteralExpr>(Constant)) 12975 return BL->getValue() ? ConstantValueKind::LiteralTrue 12976 : ConstantValueKind::LiteralFalse; 12977 return ConstantValueKind::Miscellaneous; 12978} 12979 12980static bool CheckTautologicalComparison(Sema &S, BinaryOperator *E, 12981 Expr *Constant, Expr *Other, 12982 const llvm::APSInt &Value, 12983 bool RhsConstant) { 12984 if (S.inTemplateInstantiation()) 12985 return false; 12986 12987 Expr *OriginalOther = Other; 12988 12989 Constant = Constant->IgnoreParenImpCasts(); 12990 Other = Other->IgnoreParenImpCasts(); 12991 12992 // Suppress warnings on tautological comparisons between values of the same 12993 // enumeration type. There are only two ways we could warn on this: 12994 // - If the constant is outside the range of representable values of 12995 // the enumeration. In such a case, we should warn about the cast 12996 // to enumeration type, not about the comparison. 12997 // - If the constant is the maximum / minimum in-range value. For an 12998 // enumeratin type, such comparisons can be meaningful and useful. 12999 if (Constant->getType()->isEnumeralType() && 13000 S.Context.hasSameUnqualifiedType(Constant->getType(), Other->getType())) 13001 return false; 13002 13003 IntRange OtherValueRange = GetExprRange( 13004 S.Context, Other, S.isConstantEvaluated(), /*Approximate*/ false); 13005 13006 QualType OtherT = Other->getType(); 13007 if (const auto *AT = OtherT->getAs<AtomicType>()) 13008 OtherT = AT->getValueType(); 13009 IntRange OtherTypeRange = IntRange::forValueOfType(S.Context, OtherT); 13010 13011 // Special case for ObjC BOOL on targets where its a typedef for a signed char 13012 // (Namely, macOS). FIXME: IntRange::forValueOfType should do this. 13013 bool IsObjCSignedCharBool = S.getLangOpts().ObjC && 13014 S.NSAPIObj->isObjCBOOLType(OtherT) && 13015 OtherT->isSpecificBuiltinType(BuiltinType::SChar); 13016 13017 // Whether we're treating Other as being a bool because of the form of 13018 // expression despite it having another type (typically 'int' in C). 13019 bool OtherIsBooleanDespiteType = 13020 !OtherT->isBooleanType() && Other->isKnownToHaveBooleanValue(); 13021 if (OtherIsBooleanDespiteType || IsObjCSignedCharBool) 13022 OtherTypeRange = OtherValueRange = IntRange::forBoolType(); 13023 13024 // Check if all values in the range of possible values of this expression 13025 // lead to the same comparison outcome. 13026 PromotedRange OtherPromotedValueRange(OtherValueRange, Value.getBitWidth(), 13027 Value.isUnsigned()); 13028 auto Cmp = OtherPromotedValueRange.compare(Value); 13029 auto Result = PromotedRange::constantValue(E->getOpcode(), Cmp, RhsConstant); 13030 if (!Result) 13031 return false; 13032 13033 // Also consider the range determined by the type alone. This allows us to 13034 // classify the warning under the proper diagnostic group. 13035 bool TautologicalTypeCompare = false; 13036 { 13037 PromotedRange OtherPromotedTypeRange(OtherTypeRange, Value.getBitWidth(), 13038 Value.isUnsigned()); 13039 auto TypeCmp = OtherPromotedTypeRange.compare(Value); 13040 if (auto TypeResult = PromotedRange::constantValue(E->getOpcode(), TypeCmp, 13041 RhsConstant)) { 13042 TautologicalTypeCompare = true; 13043 Cmp = TypeCmp; 13044 Result = TypeResult; 13045 } 13046 } 13047 13048 // Don't warn if the non-constant operand actually always evaluates to the 13049 // same value. 13050 if (!TautologicalTypeCompare && OtherValueRange.Width == 0) 13051 return false; 13052 13053 // Suppress the diagnostic for an in-range comparison if the constant comes 13054 // from a macro or enumerator. We don't want to diagnose 13055 // 13056 // some_long_value <= INT_MAX 13057 // 13058 // when sizeof(int) == sizeof(long). 13059 bool InRange = Cmp & PromotedRange::InRangeFlag; 13060 if (InRange && IsEnumConstOrFromMacro(S, Constant)) 13061 return false; 13062 13063 // A comparison of an unsigned bit-field against 0 is really a type problem, 13064 // even though at the type level the bit-field might promote to 'signed int'. 13065 if (Other->refersToBitField() && InRange && Value == 0 && 13066 Other->getType()->isUnsignedIntegerOrEnumerationType()) 13067 TautologicalTypeCompare = true; 13068 13069 // If this is a comparison to an enum constant, include that 13070 // constant in the diagnostic. 13071 const EnumConstantDecl *ED = nullptr; 13072 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Constant)) 13073 ED = dyn_cast<EnumConstantDecl>(DR->getDecl()); 13074 13075 // Should be enough for uint128 (39 decimal digits) 13076 SmallString<64> PrettySourceValue; 13077 llvm::raw_svector_ostream OS(PrettySourceValue); 13078 if (ED) { 13079 OS << '\'' << *ED << "' (" << Value << ")"; 13080 } else if (auto *BL = dyn_cast<ObjCBoolLiteralExpr>( 13081 Constant->IgnoreParenImpCasts())) { 13082 OS << (BL->getValue() ? "YES" : "NO"); 13083 } else { 13084 OS << Value; 13085 } 13086 13087 if (!TautologicalTypeCompare) { 13088 S.Diag(E->getOperatorLoc(), diag::warn_tautological_compare_value_range) 13089 << RhsConstant << OtherValueRange.Width << OtherValueRange.NonNegative 13090 << E->getOpcodeStr() << OS.str() << *Result 13091 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); 13092 return true; 13093 } 13094 13095 if (IsObjCSignedCharBool) { 13096 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 13097 S.PDiag(diag::warn_tautological_compare_objc_bool) 13098 << OS.str() << *Result); 13099 return true; 13100 } 13101 13102 // FIXME: We use a somewhat different formatting for the in-range cases and 13103 // cases involving boolean values for historical reasons. We should pick a 13104 // consistent way of presenting these diagnostics. 13105 if (!InRange || Other->isKnownToHaveBooleanValue()) { 13106 13107 S.DiagRuntimeBehavior( 13108 E->getOperatorLoc(), E, 13109 S.PDiag(!InRange ? diag::warn_out_of_range_compare 13110 : diag::warn_tautological_bool_compare) 13111 << OS.str() << classifyConstantValue(Constant) << OtherT 13112 << OtherIsBooleanDespiteType << *Result 13113 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange()); 13114 } else { 13115 bool IsCharTy = OtherT.withoutLocalFastQualifiers() == S.Context.CharTy; 13116 unsigned Diag = 13117 (isKnownToHaveUnsignedValue(OriginalOther) && Value == 0) 13118 ? (HasEnumType(OriginalOther) 13119 ? diag::warn_unsigned_enum_always_true_comparison 13120 : IsCharTy ? diag::warn_unsigned_char_always_true_comparison 13121 : diag::warn_unsigned_always_true_comparison) 13122 : diag::warn_tautological_constant_compare; 13123 13124 S.Diag(E->getOperatorLoc(), Diag) 13125 << RhsConstant << OtherT << E->getOpcodeStr() << OS.str() << *Result 13126 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); 13127 } 13128 13129 return true; 13130} 13131 13132/// Analyze the operands of the given comparison. Implements the 13133/// fallback case from AnalyzeComparison. 13134static void AnalyzeImpConvsInComparison(Sema &S, BinaryOperator *E) { 13135 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 13136 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 13137} 13138 13139/// Implements -Wsign-compare. 13140/// 13141/// \param E the binary operator to check for warnings 13142static void AnalyzeComparison(Sema &S, BinaryOperator *E) { 13143 // The type the comparison is being performed in. 13144 QualType T = E->getLHS()->getType(); 13145 13146 // Only analyze comparison operators where both sides have been converted to 13147 // the same type. 13148 if (!S.Context.hasSameUnqualifiedType(T, E->getRHS()->getType())) 13149 return AnalyzeImpConvsInComparison(S, E); 13150 13151 // Don't analyze value-dependent comparisons directly. 13152 if (E->isValueDependent()) 13153 return AnalyzeImpConvsInComparison(S, E); 13154 13155 Expr *LHS = E->getLHS(); 13156 Expr *RHS = E->getRHS(); 13157 13158 if (T->isIntegralType(S.Context)) { 13159 std::optional<llvm::APSInt> RHSValue = 13160 RHS->getIntegerConstantExpr(S.Context); 13161 std::optional<llvm::APSInt> LHSValue = 13162 LHS->getIntegerConstantExpr(S.Context); 13163 13164 // We don't care about expressions whose result is a constant. 13165 if (RHSValue && LHSValue) 13166 return AnalyzeImpConvsInComparison(S, E); 13167 13168 // We only care about expressions where just one side is literal 13169 if ((bool)RHSValue ^ (bool)LHSValue) { 13170 // Is the constant on the RHS or LHS? 13171 const bool RhsConstant = (bool)RHSValue; 13172 Expr *Const = RhsConstant ? RHS : LHS; 13173 Expr *Other = RhsConstant ? LHS : RHS; 13174 const llvm::APSInt &Value = RhsConstant ? *RHSValue : *LHSValue; 13175 13176 // Check whether an integer constant comparison results in a value 13177 // of 'true' or 'false'. 13178 if (CheckTautologicalComparison(S, E, Const, Other, Value, RhsConstant)) 13179 return AnalyzeImpConvsInComparison(S, E); 13180 } 13181 } 13182 13183 if (!T->hasUnsignedIntegerRepresentation()) { 13184 // We don't do anything special if this isn't an unsigned integral 13185 // comparison: we're only interested in integral comparisons, and 13186 // signed comparisons only happen in cases we don't care to warn about. 13187 return AnalyzeImpConvsInComparison(S, E); 13188 } 13189 13190 LHS = LHS->IgnoreParenImpCasts(); 13191 RHS = RHS->IgnoreParenImpCasts(); 13192 13193 if (!S.getLangOpts().CPlusPlus) { 13194 // Avoid warning about comparison of integers with different signs when 13195 // RHS/LHS has a `typeof(E)` type whose sign is different from the sign of 13196 // the type of `E`. 13197 if (const auto *TET = dyn_cast<TypeOfExprType>(LHS->getType())) 13198 LHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 13199 if (const auto *TET = dyn_cast<TypeOfExprType>(RHS->getType())) 13200 RHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts(); 13201 } 13202 13203 // Check to see if one of the (unmodified) operands is of different 13204 // signedness. 13205 Expr *signedOperand, *unsignedOperand; 13206 if (LHS->getType()->hasSignedIntegerRepresentation()) { 13207 assert(!RHS->getType()->hasSignedIntegerRepresentation() && 13208 "unsigned comparison between two signed integer expressions?"); 13209 signedOperand = LHS; 13210 unsignedOperand = RHS; 13211 } else if (RHS->getType()->hasSignedIntegerRepresentation()) { 13212 signedOperand = RHS; 13213 unsignedOperand = LHS; 13214 } else { 13215 return AnalyzeImpConvsInComparison(S, E); 13216 } 13217 13218 // Otherwise, calculate the effective range of the signed operand. 13219 IntRange signedRange = GetExprRange( 13220 S.Context, signedOperand, S.isConstantEvaluated(), /*Approximate*/ true); 13221 13222 // Go ahead and analyze implicit conversions in the operands. Note 13223 // that we skip the implicit conversions on both sides. 13224 AnalyzeImplicitConversions(S, LHS, E->getOperatorLoc()); 13225 AnalyzeImplicitConversions(S, RHS, E->getOperatorLoc()); 13226 13227 // If the signed range is non-negative, -Wsign-compare won't fire. 13228 if (signedRange.NonNegative) 13229 return; 13230 13231 // For (in)equality comparisons, if the unsigned operand is a 13232 // constant which cannot collide with a overflowed signed operand, 13233 // then reinterpreting the signed operand as unsigned will not 13234 // change the result of the comparison. 13235 if (E->isEqualityOp()) { 13236 unsigned comparisonWidth = S.Context.getIntWidth(T); 13237 IntRange unsignedRange = 13238 GetExprRange(S.Context, unsignedOperand, S.isConstantEvaluated(), 13239 /*Approximate*/ true); 13240 13241 // We should never be unable to prove that the unsigned operand is 13242 // non-negative. 13243 assert(unsignedRange.NonNegative && "unsigned range includes negative?"); 13244 13245 if (unsignedRange.Width < comparisonWidth) 13246 return; 13247 } 13248 13249 S.DiagRuntimeBehavior(E->getOperatorLoc(), E, 13250 S.PDiag(diag::warn_mixed_sign_comparison) 13251 << LHS->getType() << RHS->getType() 13252 << LHS->getSourceRange() << RHS->getSourceRange()); 13253} 13254 13255/// Analyzes an attempt to assign the given value to a bitfield. 13256/// 13257/// Returns true if there was something fishy about the attempt. 13258static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init, 13259 SourceLocation InitLoc) { 13260 assert(Bitfield->isBitField()); 13261 if (Bitfield->isInvalidDecl()) 13262 return false; 13263 13264 // White-list bool bitfields. 13265 QualType BitfieldType = Bitfield->getType(); 13266 if (BitfieldType->isBooleanType()) 13267 return false; 13268 13269 if (BitfieldType->isEnumeralType()) { 13270 EnumDecl *BitfieldEnumDecl = BitfieldType->castAs<EnumType>()->getDecl(); 13271 // If the underlying enum type was not explicitly specified as an unsigned 13272 // type and the enum contain only positive values, MSVC++ will cause an 13273 // inconsistency by storing this as a signed type. 13274 if (S.getLangOpts().CPlusPlus11 && 13275 !BitfieldEnumDecl->getIntegerTypeSourceInfo() && 13276 BitfieldEnumDecl->getNumPositiveBits() > 0 && 13277 BitfieldEnumDecl->getNumNegativeBits() == 0) { 13278 S.Diag(InitLoc, diag::warn_no_underlying_type_specified_for_enum_bitfield) 13279 << BitfieldEnumDecl; 13280 } 13281 } 13282 13283 // Ignore value- or type-dependent expressions. 13284 if (Bitfield->getBitWidth()->isValueDependent() || 13285 Bitfield->getBitWidth()->isTypeDependent() || 13286 Init->isValueDependent() || 13287 Init->isTypeDependent()) 13288 return false; 13289 13290 Expr *OriginalInit = Init->IgnoreParenImpCasts(); 13291 unsigned FieldWidth = Bitfield->getBitWidthValue(S.Context); 13292 13293 Expr::EvalResult Result; 13294 if (!OriginalInit->EvaluateAsInt(Result, S.Context, 13295 Expr::SE_AllowSideEffects)) { 13296 // The RHS is not constant. If the RHS has an enum type, make sure the 13297 // bitfield is wide enough to hold all the values of the enum without 13298 // truncation. 13299 if (const auto *EnumTy = OriginalInit->getType()->getAs<EnumType>()) { 13300 EnumDecl *ED = EnumTy->getDecl(); 13301 bool SignedBitfield = BitfieldType->isSignedIntegerType(); 13302 13303 // Enum types are implicitly signed on Windows, so check if there are any 13304 // negative enumerators to see if the enum was intended to be signed or 13305 // not. 13306 bool SignedEnum = ED->getNumNegativeBits() > 0; 13307 13308 // Check for surprising sign changes when assigning enum values to a 13309 // bitfield of different signedness. If the bitfield is signed and we 13310 // have exactly the right number of bits to store this unsigned enum, 13311 // suggest changing the enum to an unsigned type. This typically happens 13312 // on Windows where unfixed enums always use an underlying type of 'int'. 13313 unsigned DiagID = 0; 13314 if (SignedEnum && !SignedBitfield) { 13315 DiagID = diag::warn_unsigned_bitfield_assigned_signed_enum; 13316 } else if (SignedBitfield && !SignedEnum && 13317 ED->getNumPositiveBits() == FieldWidth) { 13318 DiagID = diag::warn_signed_bitfield_enum_conversion; 13319 } 13320 13321 if (DiagID) { 13322 S.Diag(InitLoc, DiagID) << Bitfield << ED; 13323 TypeSourceInfo *TSI = Bitfield->getTypeSourceInfo(); 13324 SourceRange TypeRange = 13325 TSI ? TSI->getTypeLoc().getSourceRange() : SourceRange(); 13326 S.Diag(Bitfield->getTypeSpecStartLoc(), diag::note_change_bitfield_sign) 13327 << SignedEnum << TypeRange; 13328 } 13329 13330 // Compute the required bitwidth. If the enum has negative values, we need 13331 // one more bit than the normal number of positive bits to represent the 13332 // sign bit. 13333 unsigned BitsNeeded = SignedEnum ? std::max(ED->getNumPositiveBits() + 1, 13334 ED->getNumNegativeBits()) 13335 : ED->getNumPositiveBits(); 13336 13337 // Check the bitwidth. 13338 if (BitsNeeded > FieldWidth) { 13339 Expr *WidthExpr = Bitfield->getBitWidth(); 13340 S.Diag(InitLoc, diag::warn_bitfield_too_small_for_enum) 13341 << Bitfield << ED; 13342 S.Diag(WidthExpr->getExprLoc(), diag::note_widen_bitfield) 13343 << BitsNeeded << ED << WidthExpr->getSourceRange(); 13344 } 13345 } 13346 13347 return false; 13348 } 13349 13350 llvm::APSInt Value = Result.Val.getInt(); 13351 13352 unsigned OriginalWidth = Value.getBitWidth(); 13353 13354 // In C, the macro 'true' from stdbool.h will evaluate to '1'; To reduce 13355 // false positives where the user is demonstrating they intend to use the 13356 // bit-field as a Boolean, check to see if the value is 1 and we're assigning 13357 // to a one-bit bit-field to see if the value came from a macro named 'true'. 13358 bool OneAssignedToOneBitBitfield = FieldWidth == 1 && Value == 1; 13359 if (OneAssignedToOneBitBitfield && !S.LangOpts.CPlusPlus) { 13360 SourceLocation MaybeMacroLoc = OriginalInit->getBeginLoc(); 13361 if (S.SourceMgr.isInSystemMacro(MaybeMacroLoc) && 13362 S.findMacroSpelling(MaybeMacroLoc, "true")) 13363 return false; 13364 } 13365 13366 if (!Value.isSigned() || Value.isNegative()) 13367 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(OriginalInit)) 13368 if (UO->getOpcode() == UO_Minus || UO->getOpcode() == UO_Not) 13369 OriginalWidth = Value.getMinSignedBits(); 13370 13371 if (OriginalWidth <= FieldWidth) 13372 return false; 13373 13374 // Compute the value which the bitfield will contain. 13375 llvm::APSInt TruncatedValue = Value.trunc(FieldWidth); 13376 TruncatedValue.setIsSigned(BitfieldType->isSignedIntegerType()); 13377 13378 // Check whether the stored value is equal to the original value. 13379 TruncatedValue = TruncatedValue.extend(OriginalWidth); 13380 if (llvm::APSInt::isSameValue(Value, TruncatedValue)) 13381 return false; 13382 13383 std::string PrettyValue = toString(Value, 10); 13384 std::string PrettyTrunc = toString(TruncatedValue, 10); 13385 13386 S.Diag(InitLoc, OneAssignedToOneBitBitfield 13387 ? diag::warn_impcast_single_bit_bitield_precision_constant 13388 : diag::warn_impcast_bitfield_precision_constant) 13389 << PrettyValue << PrettyTrunc << OriginalInit->getType() 13390 << Init->getSourceRange(); 13391 13392 return true; 13393} 13394 13395/// Analyze the given simple or compound assignment for warning-worthy 13396/// operations. 13397static void AnalyzeAssignment(Sema &S, BinaryOperator *E) { 13398 // Just recurse on the LHS. 13399 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 13400 13401 // We want to recurse on the RHS as normal unless we're assigning to 13402 // a bitfield. 13403 if (FieldDecl *Bitfield = E->getLHS()->getSourceBitField()) { 13404 if (AnalyzeBitFieldAssignment(S, Bitfield, E->getRHS(), 13405 E->getOperatorLoc())) { 13406 // Recurse, ignoring any implicit conversions on the RHS. 13407 return AnalyzeImplicitConversions(S, E->getRHS()->IgnoreParenImpCasts(), 13408 E->getOperatorLoc()); 13409 } 13410 } 13411 13412 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 13413 13414 // Diagnose implicitly sequentially-consistent atomic assignment. 13415 if (E->getLHS()->getType()->isAtomicType()) 13416 S.Diag(E->getRHS()->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 13417} 13418 13419/// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 13420static void DiagnoseImpCast(Sema &S, Expr *E, QualType SourceType, QualType T, 13421 SourceLocation CContext, unsigned diag, 13422 bool pruneControlFlow = false) { 13423 if (pruneControlFlow) { 13424 S.DiagRuntimeBehavior(E->getExprLoc(), E, 13425 S.PDiag(diag) 13426 << SourceType << T << E->getSourceRange() 13427 << SourceRange(CContext)); 13428 return; 13429 } 13430 S.Diag(E->getExprLoc(), diag) 13431 << SourceType << T << E->getSourceRange() << SourceRange(CContext); 13432} 13433 13434/// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. 13435static void DiagnoseImpCast(Sema &S, Expr *E, QualType T, 13436 SourceLocation CContext, 13437 unsigned diag, bool pruneControlFlow = false) { 13438 DiagnoseImpCast(S, E, E->getType(), T, CContext, diag, pruneControlFlow); 13439} 13440 13441static bool isObjCSignedCharBool(Sema &S, QualType Ty) { 13442 return Ty->isSpecificBuiltinType(BuiltinType::SChar) && 13443 S.getLangOpts().ObjC && S.NSAPIObj->isObjCBOOLType(Ty); 13444} 13445 13446static void adornObjCBoolConversionDiagWithTernaryFixit( 13447 Sema &S, Expr *SourceExpr, const Sema::SemaDiagnosticBuilder &Builder) { 13448 Expr *Ignored = SourceExpr->IgnoreImplicit(); 13449 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(Ignored)) 13450 Ignored = OVE->getSourceExpr(); 13451 bool NeedsParens = isa<AbstractConditionalOperator>(Ignored) || 13452 isa<BinaryOperator>(Ignored) || 13453 isa<CXXOperatorCallExpr>(Ignored); 13454 SourceLocation EndLoc = S.getLocForEndOfToken(SourceExpr->getEndLoc()); 13455 if (NeedsParens) 13456 Builder << FixItHint::CreateInsertion(SourceExpr->getBeginLoc(), "(") 13457 << FixItHint::CreateInsertion(EndLoc, ")"); 13458 Builder << FixItHint::CreateInsertion(EndLoc, " ? YES : NO"); 13459} 13460 13461/// Diagnose an implicit cast from a floating point value to an integer value. 13462static void DiagnoseFloatingImpCast(Sema &S, Expr *E, QualType T, 13463 SourceLocation CContext) { 13464 const bool IsBool = T->isSpecificBuiltinType(BuiltinType::Bool); 13465 const bool PruneWarnings = S.inTemplateInstantiation(); 13466 13467 Expr *InnerE = E->IgnoreParenImpCasts(); 13468 // We also want to warn on, e.g., "int i = -1.234" 13469 if (UnaryOperator *UOp = dyn_cast<UnaryOperator>(InnerE)) 13470 if (UOp->getOpcode() == UO_Minus || UOp->getOpcode() == UO_Plus) 13471 InnerE = UOp->getSubExpr()->IgnoreParenImpCasts(); 13472 13473 const bool IsLiteral = 13474 isa<FloatingLiteral>(E) || isa<FloatingLiteral>(InnerE); 13475 13476 llvm::APFloat Value(0.0); 13477 bool IsConstant = 13478 E->EvaluateAsFloat(Value, S.Context, Expr::SE_AllowSideEffects); 13479 if (!IsConstant) { 13480 if (isObjCSignedCharBool(S, T)) { 13481 return adornObjCBoolConversionDiagWithTernaryFixit( 13482 S, E, 13483 S.Diag(CContext, diag::warn_impcast_float_to_objc_signed_char_bool) 13484 << E->getType()); 13485 } 13486 13487 return DiagnoseImpCast(S, E, T, CContext, 13488 diag::warn_impcast_float_integer, PruneWarnings); 13489 } 13490 13491 bool isExact = false; 13492 13493 llvm::APSInt IntegerValue(S.Context.getIntWidth(T), 13494 T->hasUnsignedIntegerRepresentation()); 13495 llvm::APFloat::opStatus Result = Value.convertToInteger( 13496 IntegerValue, llvm::APFloat::rmTowardZero, &isExact); 13497 13498 // FIXME: Force the precision of the source value down so we don't print 13499 // digits which are usually useless (we don't really care here if we 13500 // truncate a digit by accident in edge cases). Ideally, APFloat::toString 13501 // would automatically print the shortest representation, but it's a bit 13502 // tricky to implement. 13503 SmallString<16> PrettySourceValue; 13504 unsigned precision = llvm::APFloat::semanticsPrecision(Value.getSemantics()); 13505 precision = (precision * 59 + 195) / 196; 13506 Value.toString(PrettySourceValue, precision); 13507 13508 if (isObjCSignedCharBool(S, T) && IntegerValue != 0 && IntegerValue != 1) { 13509 return adornObjCBoolConversionDiagWithTernaryFixit( 13510 S, E, 13511 S.Diag(CContext, diag::warn_impcast_constant_value_to_objc_bool) 13512 << PrettySourceValue); 13513 } 13514 13515 if (Result == llvm::APFloat::opOK && isExact) { 13516 if (IsLiteral) return; 13517 return DiagnoseImpCast(S, E, T, CContext, diag::warn_impcast_float_integer, 13518 PruneWarnings); 13519 } 13520 13521 // Conversion of a floating-point value to a non-bool integer where the 13522 // integral part cannot be represented by the integer type is undefined. 13523 if (!IsBool && Result == llvm::APFloat::opInvalidOp) 13524 return DiagnoseImpCast( 13525 S, E, T, CContext, 13526 IsLiteral ? diag::warn_impcast_literal_float_to_integer_out_of_range 13527 : diag::warn_impcast_float_to_integer_out_of_range, 13528 PruneWarnings); 13529 13530 unsigned DiagID = 0; 13531 if (IsLiteral) { 13532 // Warn on floating point literal to integer. 13533 DiagID = diag::warn_impcast_literal_float_to_integer; 13534 } else if (IntegerValue == 0) { 13535 if (Value.isZero()) { // Skip -0.0 to 0 conversion. 13536 return DiagnoseImpCast(S, E, T, CContext, 13537 diag::warn_impcast_float_integer, PruneWarnings); 13538 } 13539 // Warn on non-zero to zero conversion. 13540 DiagID = diag::warn_impcast_float_to_integer_zero; 13541 } else { 13542 if (IntegerValue.isUnsigned()) { 13543 if (!IntegerValue.isMaxValue()) { 13544 return DiagnoseImpCast(S, E, T, CContext, 13545 diag::warn_impcast_float_integer, PruneWarnings); 13546 } 13547 } else { // IntegerValue.isSigned() 13548 if (!IntegerValue.isMaxSignedValue() && 13549 !IntegerValue.isMinSignedValue()) { 13550 return DiagnoseImpCast(S, E, T, CContext, 13551 diag::warn_impcast_float_integer, PruneWarnings); 13552 } 13553 } 13554 // Warn on evaluatable floating point expression to integer conversion. 13555 DiagID = diag::warn_impcast_float_to_integer; 13556 } 13557 13558 SmallString<16> PrettyTargetValue; 13559 if (IsBool) 13560 PrettyTargetValue = Value.isZero() ? "false" : "true"; 13561 else 13562 IntegerValue.toString(PrettyTargetValue); 13563 13564 if (PruneWarnings) { 13565 S.DiagRuntimeBehavior(E->getExprLoc(), E, 13566 S.PDiag(DiagID) 13567 << E->getType() << T.getUnqualifiedType() 13568 << PrettySourceValue << PrettyTargetValue 13569 << E->getSourceRange() << SourceRange(CContext)); 13570 } else { 13571 S.Diag(E->getExprLoc(), DiagID) 13572 << E->getType() << T.getUnqualifiedType() << PrettySourceValue 13573 << PrettyTargetValue << E->getSourceRange() << SourceRange(CContext); 13574 } 13575} 13576 13577/// Analyze the given compound assignment for the possible losing of 13578/// floating-point precision. 13579static void AnalyzeCompoundAssignment(Sema &S, BinaryOperator *E) { 13580 assert(isa<CompoundAssignOperator>(E) && 13581 "Must be compound assignment operation"); 13582 // Recurse on the LHS and RHS in here 13583 AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); 13584 AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); 13585 13586 if (E->getLHS()->getType()->isAtomicType()) 13587 S.Diag(E->getOperatorLoc(), diag::warn_atomic_implicit_seq_cst); 13588 13589 // Now check the outermost expression 13590 const auto *ResultBT = E->getLHS()->getType()->getAs<BuiltinType>(); 13591 const auto *RBT = cast<CompoundAssignOperator>(E) 13592 ->getComputationResultType() 13593 ->getAs<BuiltinType>(); 13594 13595 // The below checks assume source is floating point. 13596 if (!ResultBT || !RBT || !RBT->isFloatingPoint()) return; 13597 13598 // If source is floating point but target is an integer. 13599 if (ResultBT->isInteger()) 13600 return DiagnoseImpCast(S, E, E->getRHS()->getType(), E->getLHS()->getType(), 13601 E->getExprLoc(), diag::warn_impcast_float_integer); 13602 13603 if (!ResultBT->isFloatingPoint()) 13604 return; 13605 13606 // If both source and target are floating points, warn about losing precision. 13607 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 13608 QualType(ResultBT, 0), QualType(RBT, 0)); 13609 if (Order < 0 && !S.SourceMgr.isInSystemMacro(E->getOperatorLoc())) 13610 // warn about dropping FP rank. 13611 DiagnoseImpCast(S, E->getRHS(), E->getLHS()->getType(), E->getOperatorLoc(), 13612 diag::warn_impcast_float_result_precision); 13613} 13614 13615static std::string PrettyPrintInRange(const llvm::APSInt &Value, 13616 IntRange Range) { 13617 if (!Range.Width) return "0"; 13618 13619 llvm::APSInt ValueInRange = Value; 13620 ValueInRange.setIsSigned(!Range.NonNegative); 13621 ValueInRange = ValueInRange.trunc(Range.Width); 13622 return toString(ValueInRange, 10); 13623} 13624 13625static bool IsImplicitBoolFloatConversion(Sema &S, Expr *Ex, bool ToBool) { 13626 if (!isa<ImplicitCastExpr>(Ex)) 13627 return false; 13628 13629 Expr *InnerE = Ex->IgnoreParenImpCasts(); 13630 const Type *Target = S.Context.getCanonicalType(Ex->getType()).getTypePtr(); 13631 const Type *Source = 13632 S.Context.getCanonicalType(InnerE->getType()).getTypePtr(); 13633 if (Target->isDependentType()) 13634 return false; 13635 13636 const BuiltinType *FloatCandidateBT = 13637 dyn_cast<BuiltinType>(ToBool ? Source : Target); 13638 const Type *BoolCandidateType = ToBool ? Target : Source; 13639 13640 return (BoolCandidateType->isSpecificBuiltinType(BuiltinType::Bool) && 13641 FloatCandidateBT && (FloatCandidateBT->isFloatingPoint())); 13642} 13643 13644static void CheckImplicitArgumentConversions(Sema &S, CallExpr *TheCall, 13645 SourceLocation CC) { 13646 unsigned NumArgs = TheCall->getNumArgs(); 13647 for (unsigned i = 0; i < NumArgs; ++i) { 13648 Expr *CurrA = TheCall->getArg(i); 13649 if (!IsImplicitBoolFloatConversion(S, CurrA, true)) 13650 continue; 13651 13652 bool IsSwapped = ((i > 0) && 13653 IsImplicitBoolFloatConversion(S, TheCall->getArg(i - 1), false)); 13654 IsSwapped |= ((i < (NumArgs - 1)) && 13655 IsImplicitBoolFloatConversion(S, TheCall->getArg(i + 1), false)); 13656 if (IsSwapped) { 13657 // Warn on this floating-point to bool conversion. 13658 DiagnoseImpCast(S, CurrA->IgnoreParenImpCasts(), 13659 CurrA->getType(), CC, 13660 diag::warn_impcast_floating_point_to_bool); 13661 } 13662 } 13663} 13664 13665static void DiagnoseNullConversion(Sema &S, Expr *E, QualType T, 13666 SourceLocation CC) { 13667 if (S.Diags.isIgnored(diag::warn_impcast_null_pointer_to_integer, 13668 E->getExprLoc())) 13669 return; 13670 13671 // Don't warn on functions which have return type nullptr_t. 13672 if (isa<CallExpr>(E)) 13673 return; 13674 13675 // Check for NULL (GNUNull) or nullptr (CXX11_nullptr). 13676 const Expr *NewE = E->IgnoreParenImpCasts(); 13677 bool IsGNUNullExpr = isa<GNUNullExpr>(NewE); 13678 bool HasNullPtrType = NewE->getType()->isNullPtrType(); 13679 if (!IsGNUNullExpr && !HasNullPtrType) 13680 return; 13681 13682 // Return if target type is a safe conversion. 13683 if (T->isAnyPointerType() || T->isBlockPointerType() || 13684 T->isMemberPointerType() || !T->isScalarType() || T->isNullPtrType()) 13685 return; 13686 13687 SourceLocation Loc = E->getSourceRange().getBegin(); 13688 13689 // Venture through the macro stacks to get to the source of macro arguments. 13690 // The new location is a better location than the complete location that was 13691 // passed in. 13692 Loc = S.SourceMgr.getTopMacroCallerLoc(Loc); 13693 CC = S.SourceMgr.getTopMacroCallerLoc(CC); 13694 13695 // __null is usually wrapped in a macro. Go up a macro if that is the case. 13696 if (IsGNUNullExpr && Loc.isMacroID()) { 13697 StringRef MacroName = Lexer::getImmediateMacroNameForDiagnostics( 13698 Loc, S.SourceMgr, S.getLangOpts()); 13699 if (MacroName == "NULL") 13700 Loc = S.SourceMgr.getImmediateExpansionRange(Loc).getBegin(); 13701 } 13702 13703 // Only warn if the null and context location are in the same macro expansion. 13704 if (S.SourceMgr.getFileID(Loc) != S.SourceMgr.getFileID(CC)) 13705 return; 13706 13707 S.Diag(Loc, diag::warn_impcast_null_pointer_to_integer) 13708 << HasNullPtrType << T << SourceRange(CC) 13709 << FixItHint::CreateReplacement(Loc, 13710 S.getFixItZeroLiteralForType(T, Loc)); 13711} 13712 13713static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 13714 ObjCArrayLiteral *ArrayLiteral); 13715 13716static void 13717checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 13718 ObjCDictionaryLiteral *DictionaryLiteral); 13719 13720/// Check a single element within a collection literal against the 13721/// target element type. 13722static void checkObjCCollectionLiteralElement(Sema &S, 13723 QualType TargetElementType, 13724 Expr *Element, 13725 unsigned ElementKind) { 13726 // Skip a bitcast to 'id' or qualified 'id'. 13727 if (auto ICE = dyn_cast<ImplicitCastExpr>(Element)) { 13728 if (ICE->getCastKind() == CK_BitCast && 13729 ICE->getSubExpr()->getType()->getAs<ObjCObjectPointerType>()) 13730 Element = ICE->getSubExpr(); 13731 } 13732 13733 QualType ElementType = Element->getType(); 13734 ExprResult ElementResult(Element); 13735 if (ElementType->getAs<ObjCObjectPointerType>() && 13736 S.CheckSingleAssignmentConstraints(TargetElementType, 13737 ElementResult, 13738 false, false) 13739 != Sema::Compatible) { 13740 S.Diag(Element->getBeginLoc(), diag::warn_objc_collection_literal_element) 13741 << ElementType << ElementKind << TargetElementType 13742 << Element->getSourceRange(); 13743 } 13744 13745 if (auto ArrayLiteral = dyn_cast<ObjCArrayLiteral>(Element)) 13746 checkObjCArrayLiteral(S, TargetElementType, ArrayLiteral); 13747 else if (auto DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(Element)) 13748 checkObjCDictionaryLiteral(S, TargetElementType, DictionaryLiteral); 13749} 13750 13751/// Check an Objective-C array literal being converted to the given 13752/// target type. 13753static void checkObjCArrayLiteral(Sema &S, QualType TargetType, 13754 ObjCArrayLiteral *ArrayLiteral) { 13755 if (!S.NSArrayDecl) 13756 return; 13757 13758 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 13759 if (!TargetObjCPtr) 13760 return; 13761 13762 if (TargetObjCPtr->isUnspecialized() || 13763 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 13764 != S.NSArrayDecl->getCanonicalDecl()) 13765 return; 13766 13767 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 13768 if (TypeArgs.size() != 1) 13769 return; 13770 13771 QualType TargetElementType = TypeArgs[0]; 13772 for (unsigned I = 0, N = ArrayLiteral->getNumElements(); I != N; ++I) { 13773 checkObjCCollectionLiteralElement(S, TargetElementType, 13774 ArrayLiteral->getElement(I), 13775 0); 13776 } 13777} 13778 13779/// Check an Objective-C dictionary literal being converted to the given 13780/// target type. 13781static void 13782checkObjCDictionaryLiteral(Sema &S, QualType TargetType, 13783 ObjCDictionaryLiteral *DictionaryLiteral) { 13784 if (!S.NSDictionaryDecl) 13785 return; 13786 13787 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); 13788 if (!TargetObjCPtr) 13789 return; 13790 13791 if (TargetObjCPtr->isUnspecialized() || 13792 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() 13793 != S.NSDictionaryDecl->getCanonicalDecl()) 13794 return; 13795 13796 auto TypeArgs = TargetObjCPtr->getTypeArgs(); 13797 if (TypeArgs.size() != 2) 13798 return; 13799 13800 QualType TargetKeyType = TypeArgs[0]; 13801 QualType TargetObjectType = TypeArgs[1]; 13802 for (unsigned I = 0, N = DictionaryLiteral->getNumElements(); I != N; ++I) { 13803 auto Element = DictionaryLiteral->getKeyValueElement(I); 13804 checkObjCCollectionLiteralElement(S, TargetKeyType, Element.Key, 1); 13805 checkObjCCollectionLiteralElement(S, TargetObjectType, Element.Value, 2); 13806 } 13807} 13808 13809// Helper function to filter out cases for constant width constant conversion. 13810// Don't warn on char array initialization or for non-decimal values. 13811static bool isSameWidthConstantConversion(Sema &S, Expr *E, QualType T, 13812 SourceLocation CC) { 13813 // If initializing from a constant, and the constant starts with '0', 13814 // then it is a binary, octal, or hexadecimal. Allow these constants 13815 // to fill all the bits, even if there is a sign change. 13816 if (auto *IntLit = dyn_cast<IntegerLiteral>(E->IgnoreParenImpCasts())) { 13817 const char FirstLiteralCharacter = 13818 S.getSourceManager().getCharacterData(IntLit->getBeginLoc())[0]; 13819 if (FirstLiteralCharacter == '0') 13820 return false; 13821 } 13822 13823 // If the CC location points to a '{', and the type is char, then assume 13824 // assume it is an array initialization. 13825 if (CC.isValid() && T->isCharType()) { 13826 const char FirstContextCharacter = 13827 S.getSourceManager().getCharacterData(CC)[0]; 13828 if (FirstContextCharacter == '{') 13829 return false; 13830 } 13831 13832 return true; 13833} 13834 13835static const IntegerLiteral *getIntegerLiteral(Expr *E) { 13836 const auto *IL = dyn_cast<IntegerLiteral>(E); 13837 if (!IL) { 13838 if (auto *UO = dyn_cast<UnaryOperator>(E)) { 13839 if (UO->getOpcode() == UO_Minus) 13840 return dyn_cast<IntegerLiteral>(UO->getSubExpr()); 13841 } 13842 } 13843 13844 return IL; 13845} 13846 13847static void DiagnoseIntInBoolContext(Sema &S, Expr *E) { 13848 E = E->IgnoreParenImpCasts(); 13849 SourceLocation ExprLoc = E->getExprLoc(); 13850 13851 if (const auto *BO = dyn_cast<BinaryOperator>(E)) { 13852 BinaryOperator::Opcode Opc = BO->getOpcode(); 13853 Expr::EvalResult Result; 13854 // Do not diagnose unsigned shifts. 13855 if (Opc == BO_Shl) { 13856 const auto *LHS = getIntegerLiteral(BO->getLHS()); 13857 const auto *RHS = getIntegerLiteral(BO->getRHS()); 13858 if (LHS && LHS->getValue() == 0) 13859 S.Diag(ExprLoc, diag::warn_left_shift_always) << 0; 13860 else if (!E->isValueDependent() && LHS && RHS && 13861 RHS->getValue().isNonNegative() && 13862 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) 13863 S.Diag(ExprLoc, diag::warn_left_shift_always) 13864 << (Result.Val.getInt() != 0); 13865 else if (E->getType()->isSignedIntegerType()) 13866 S.Diag(ExprLoc, diag::warn_left_shift_in_bool_context) << E; 13867 } 13868 } 13869 13870 if (const auto *CO = dyn_cast<ConditionalOperator>(E)) { 13871 const auto *LHS = getIntegerLiteral(CO->getTrueExpr()); 13872 const auto *RHS = getIntegerLiteral(CO->getFalseExpr()); 13873 if (!LHS || !RHS) 13874 return; 13875 if ((LHS->getValue() == 0 || LHS->getValue() == 1) && 13876 (RHS->getValue() == 0 || RHS->getValue() == 1)) 13877 // Do not diagnose common idioms. 13878 return; 13879 if (LHS->getValue() != 0 && RHS->getValue() != 0) 13880 S.Diag(ExprLoc, diag::warn_integer_constants_in_conditional_always_true); 13881 } 13882} 13883 13884static void CheckImplicitConversion(Sema &S, Expr *E, QualType T, 13885 SourceLocation CC, 13886 bool *ICContext = nullptr, 13887 bool IsListInit = false) { 13888 if (E->isTypeDependent() || E->isValueDependent()) return; 13889 13890 const Type *Source = S.Context.getCanonicalType(E->getType()).getTypePtr(); 13891 const Type *Target = S.Context.getCanonicalType(T).getTypePtr(); 13892 if (Source == Target) return; 13893 if (Target->isDependentType()) return; 13894 13895 // If the conversion context location is invalid don't complain. We also 13896 // don't want to emit a warning if the issue occurs from the expansion of 13897 // a system macro. The problem is that 'getSpellingLoc()' is slow, so we 13898 // delay this check as long as possible. Once we detect we are in that 13899 // scenario, we just return. 13900 if (CC.isInvalid()) 13901 return; 13902 13903 if (Source->isAtomicType()) 13904 S.Diag(E->getExprLoc(), diag::warn_atomic_implicit_seq_cst); 13905 13906 // Diagnose implicit casts to bool. 13907 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) { 13908 if (isa<StringLiteral>(E)) 13909 // Warn on string literal to bool. Checks for string literals in logical 13910 // and expressions, for instance, assert(0 && "error here"), are 13911 // prevented by a check in AnalyzeImplicitConversions(). 13912 return DiagnoseImpCast(S, E, T, CC, 13913 diag::warn_impcast_string_literal_to_bool); 13914 if (isa<ObjCStringLiteral>(E) || isa<ObjCArrayLiteral>(E) || 13915 isa<ObjCDictionaryLiteral>(E) || isa<ObjCBoxedExpr>(E)) { 13916 // This covers the literal expressions that evaluate to Objective-C 13917 // objects. 13918 return DiagnoseImpCast(S, E, T, CC, 13919 diag::warn_impcast_objective_c_literal_to_bool); 13920 } 13921 if (Source->isPointerType() || Source->canDecayToPointerType()) { 13922 // Warn on pointer to bool conversion that is always true. 13923 S.DiagnoseAlwaysNonNullPointer(E, Expr::NPCK_NotNull, /*IsEqual*/ false, 13924 SourceRange(CC)); 13925 } 13926 } 13927 13928 // If the we're converting a constant to an ObjC BOOL on a platform where BOOL 13929 // is a typedef for signed char (macOS), then that constant value has to be 1 13930 // or 0. 13931 if (isObjCSignedCharBool(S, T) && Source->isIntegralType(S.Context)) { 13932 Expr::EvalResult Result; 13933 if (E->EvaluateAsInt(Result, S.getASTContext(), 13934 Expr::SE_AllowSideEffects)) { 13935 if (Result.Val.getInt() != 1 && Result.Val.getInt() != 0) { 13936 adornObjCBoolConversionDiagWithTernaryFixit( 13937 S, E, 13938 S.Diag(CC, diag::warn_impcast_constant_value_to_objc_bool) 13939 << toString(Result.Val.getInt(), 10)); 13940 } 13941 return; 13942 } 13943 } 13944 13945 // Check implicit casts from Objective-C collection literals to specialized 13946 // collection types, e.g., NSArray<NSString *> *. 13947 if (auto *ArrayLiteral = dyn_cast<ObjCArrayLiteral>(E)) 13948 checkObjCArrayLiteral(S, QualType(Target, 0), ArrayLiteral); 13949 else if (auto *DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(E)) 13950 checkObjCDictionaryLiteral(S, QualType(Target, 0), DictionaryLiteral); 13951 13952 // Strip vector types. 13953 if (isa<VectorType>(Source)) { 13954 if (Target->isVLSTBuiltinType() && 13955 (S.Context.areCompatibleSveTypes(QualType(Target, 0), 13956 QualType(Source, 0)) || 13957 S.Context.areLaxCompatibleSveTypes(QualType(Target, 0), 13958 QualType(Source, 0)))) 13959 return; 13960 13961 if (!isa<VectorType>(Target)) { 13962 if (S.SourceMgr.isInSystemMacro(CC)) 13963 return; 13964 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_vector_scalar); 13965 } 13966 13967 // If the vector cast is cast between two vectors of the same size, it is 13968 // a bitcast, not a conversion. 13969 if (S.Context.getTypeSize(Source) == S.Context.getTypeSize(Target)) 13970 return; 13971 13972 Source = cast<VectorType>(Source)->getElementType().getTypePtr(); 13973 Target = cast<VectorType>(Target)->getElementType().getTypePtr(); 13974 } 13975 if (auto VecTy = dyn_cast<VectorType>(Target)) 13976 Target = VecTy->getElementType().getTypePtr(); 13977 13978 // Strip complex types. 13979 if (isa<ComplexType>(Source)) { 13980 if (!isa<ComplexType>(Target)) { 13981 if (S.SourceMgr.isInSystemMacro(CC) || Target->isBooleanType()) 13982 return; 13983 13984 return DiagnoseImpCast(S, E, T, CC, 13985 S.getLangOpts().CPlusPlus 13986 ? diag::err_impcast_complex_scalar 13987 : diag::warn_impcast_complex_scalar); 13988 } 13989 13990 Source = cast<ComplexType>(Source)->getElementType().getTypePtr(); 13991 Target = cast<ComplexType>(Target)->getElementType().getTypePtr(); 13992 } 13993 13994 const BuiltinType *SourceBT = dyn_cast<BuiltinType>(Source); 13995 const BuiltinType *TargetBT = dyn_cast<BuiltinType>(Target); 13996 13997 // Strip SVE vector types 13998 if (SourceBT && SourceBT->isVLSTBuiltinType()) { 13999 // Need the original target type for vector type checks 14000 const Type *OriginalTarget = S.Context.getCanonicalType(T).getTypePtr(); 14001 // Handle conversion from scalable to fixed when msve-vector-bits is 14002 // specified 14003 if (S.Context.areCompatibleSveTypes(QualType(OriginalTarget, 0), 14004 QualType(Source, 0)) || 14005 S.Context.areLaxCompatibleSveTypes(QualType(OriginalTarget, 0), 14006 QualType(Source, 0))) 14007 return; 14008 14009 // If the vector cast is cast between two vectors of the same size, it is 14010 // a bitcast, not a conversion. 14011 if (S.Context.getTypeSize(Source) == S.Context.getTypeSize(Target)) 14012 return; 14013 14014 Source = SourceBT->getSveEltType(S.Context).getTypePtr(); 14015 } 14016 14017 if (TargetBT && TargetBT->isVLSTBuiltinType()) 14018 Target = TargetBT->getSveEltType(S.Context).getTypePtr(); 14019 14020 // If the source is floating point... 14021 if (SourceBT && SourceBT->isFloatingPoint()) { 14022 // ...and the target is floating point... 14023 if (TargetBT && TargetBT->isFloatingPoint()) { 14024 // ...then warn if we're dropping FP rank. 14025 14026 int Order = S.getASTContext().getFloatingTypeSemanticOrder( 14027 QualType(SourceBT, 0), QualType(TargetBT, 0)); 14028 if (Order > 0) { 14029 // Don't warn about float constants that are precisely 14030 // representable in the target type. 14031 Expr::EvalResult result; 14032 if (E->EvaluateAsRValue(result, S.Context)) { 14033 // Value might be a float, a float vector, or a float complex. 14034 if (IsSameFloatAfterCast(result.Val, 14035 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0)), 14036 S.Context.getFloatTypeSemantics(QualType(SourceBT, 0)))) 14037 return; 14038 } 14039 14040 if (S.SourceMgr.isInSystemMacro(CC)) 14041 return; 14042 14043 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_float_precision); 14044 } 14045 // ... or possibly if we're increasing rank, too 14046 else if (Order < 0) { 14047 if (S.SourceMgr.isInSystemMacro(CC)) 14048 return; 14049 14050 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_double_promotion); 14051 } 14052 return; 14053 } 14054 14055 // If the target is integral, always warn. 14056 if (TargetBT && TargetBT->isInteger()) { 14057 if (S.SourceMgr.isInSystemMacro(CC)) 14058 return; 14059 14060 DiagnoseFloatingImpCast(S, E, T, CC); 14061 } 14062 14063 // Detect the case where a call result is converted from floating-point to 14064 // to bool, and the final argument to the call is converted from bool, to 14065 // discover this typo: 14066 // 14067 // bool b = fabs(x < 1.0); // should be "bool b = fabs(x) < 1.0;" 14068 // 14069 // FIXME: This is an incredibly special case; is there some more general 14070 // way to detect this class of misplaced-parentheses bug? 14071 if (Target->isBooleanType() && isa<CallExpr>(E)) { 14072 // Check last argument of function call to see if it is an 14073 // implicit cast from a type matching the type the result 14074 // is being cast to. 14075 CallExpr *CEx = cast<CallExpr>(E); 14076 if (unsigned NumArgs = CEx->getNumArgs()) { 14077 Expr *LastA = CEx->getArg(NumArgs - 1); 14078 Expr *InnerE = LastA->IgnoreParenImpCasts(); 14079 if (isa<ImplicitCastExpr>(LastA) && 14080 InnerE->getType()->isBooleanType()) { 14081 // Warn on this floating-point to bool conversion 14082 DiagnoseImpCast(S, E, T, CC, 14083 diag::warn_impcast_floating_point_to_bool); 14084 } 14085 } 14086 } 14087 return; 14088 } 14089 14090 // Valid casts involving fixed point types should be accounted for here. 14091 if (Source->isFixedPointType()) { 14092 if (Target->isUnsaturatedFixedPointType()) { 14093 Expr::EvalResult Result; 14094 if (E->EvaluateAsFixedPoint(Result, S.Context, Expr::SE_AllowSideEffects, 14095 S.isConstantEvaluated())) { 14096 llvm::APFixedPoint Value = Result.Val.getFixedPoint(); 14097 llvm::APFixedPoint MaxVal = S.Context.getFixedPointMax(T); 14098 llvm::APFixedPoint MinVal = S.Context.getFixedPointMin(T); 14099 if (Value > MaxVal || Value < MinVal) { 14100 S.DiagRuntimeBehavior(E->getExprLoc(), E, 14101 S.PDiag(diag::warn_impcast_fixed_point_range) 14102 << Value.toString() << T 14103 << E->getSourceRange() 14104 << clang::SourceRange(CC)); 14105 return; 14106 } 14107 } 14108 } else if (Target->isIntegerType()) { 14109 Expr::EvalResult Result; 14110 if (!S.isConstantEvaluated() && 14111 E->EvaluateAsFixedPoint(Result, S.Context, 14112 Expr::SE_AllowSideEffects)) { 14113 llvm::APFixedPoint FXResult = Result.Val.getFixedPoint(); 14114 14115 bool Overflowed; 14116 llvm::APSInt IntResult = FXResult.convertToInt( 14117 S.Context.getIntWidth(T), 14118 Target->isSignedIntegerOrEnumerationType(), &Overflowed); 14119 14120 if (Overflowed) { 14121 S.DiagRuntimeBehavior(E->getExprLoc(), E, 14122 S.PDiag(diag::warn_impcast_fixed_point_range) 14123 << FXResult.toString() << T 14124 << E->getSourceRange() 14125 << clang::SourceRange(CC)); 14126 return; 14127 } 14128 } 14129 } 14130 } else if (Target->isUnsaturatedFixedPointType()) { 14131 if (Source->isIntegerType()) { 14132 Expr::EvalResult Result; 14133 if (!S.isConstantEvaluated() && 14134 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects)) { 14135 llvm::APSInt Value = Result.Val.getInt(); 14136 14137 bool Overflowed; 14138 llvm::APFixedPoint IntResult = llvm::APFixedPoint::getFromIntValue( 14139 Value, S.Context.getFixedPointSemantics(T), &Overflowed); 14140 14141 if (Overflowed) { 14142 S.DiagRuntimeBehavior(E->getExprLoc(), E, 14143 S.PDiag(diag::warn_impcast_fixed_point_range) 14144 << toString(Value, /*Radix=*/10) << T 14145 << E->getSourceRange() 14146 << clang::SourceRange(CC)); 14147 return; 14148 } 14149 } 14150 } 14151 } 14152 14153 // If we are casting an integer type to a floating point type without 14154 // initialization-list syntax, we might lose accuracy if the floating 14155 // point type has a narrower significand than the integer type. 14156 if (SourceBT && TargetBT && SourceBT->isIntegerType() && 14157 TargetBT->isFloatingType() && !IsListInit) { 14158 // Determine the number of precision bits in the source integer type. 14159 IntRange SourceRange = GetExprRange(S.Context, E, S.isConstantEvaluated(), 14160 /*Approximate*/ true); 14161 unsigned int SourcePrecision = SourceRange.Width; 14162 14163 // Determine the number of precision bits in the 14164 // target floating point type. 14165 unsigned int TargetPrecision = llvm::APFloatBase::semanticsPrecision( 14166 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0))); 14167 14168 if (SourcePrecision > 0 && TargetPrecision > 0 && 14169 SourcePrecision > TargetPrecision) { 14170 14171 if (std::optional<llvm::APSInt> SourceInt = 14172 E->getIntegerConstantExpr(S.Context)) { 14173 // If the source integer is a constant, convert it to the target 14174 // floating point type. Issue a warning if the value changes 14175 // during the whole conversion. 14176 llvm::APFloat TargetFloatValue( 14177 S.Context.getFloatTypeSemantics(QualType(TargetBT, 0))); 14178 llvm::APFloat::opStatus ConversionStatus = 14179 TargetFloatValue.convertFromAPInt( 14180 *SourceInt, SourceBT->isSignedInteger(), 14181 llvm::APFloat::rmNearestTiesToEven); 14182 14183 if (ConversionStatus != llvm::APFloat::opOK) { 14184 SmallString<32> PrettySourceValue; 14185 SourceInt->toString(PrettySourceValue, 10); 14186 SmallString<32> PrettyTargetValue; 14187 TargetFloatValue.toString(PrettyTargetValue, TargetPrecision); 14188 14189 S.DiagRuntimeBehavior( 14190 E->getExprLoc(), E, 14191 S.PDiag(diag::warn_impcast_integer_float_precision_constant) 14192 << PrettySourceValue << PrettyTargetValue << E->getType() << T 14193 << E->getSourceRange() << clang::SourceRange(CC)); 14194 } 14195 } else { 14196 // Otherwise, the implicit conversion may lose precision. 14197 DiagnoseImpCast(S, E, T, CC, 14198 diag::warn_impcast_integer_float_precision); 14199 } 14200 } 14201 } 14202 14203 DiagnoseNullConversion(S, E, T, CC); 14204 14205 S.DiscardMisalignedMemberAddress(Target, E); 14206 14207 if (Target->isBooleanType()) 14208 DiagnoseIntInBoolContext(S, E); 14209 14210 if (!Source->isIntegerType() || !Target->isIntegerType()) 14211 return; 14212 14213 // TODO: remove this early return once the false positives for constant->bool 14214 // in templates, macros, etc, are reduced or removed. 14215 if (Target->isSpecificBuiltinType(BuiltinType::Bool)) 14216 return; 14217 14218 if (isObjCSignedCharBool(S, T) && !Source->isCharType() && 14219 !E->isKnownToHaveBooleanValue(/*Semantic=*/false)) { 14220 return adornObjCBoolConversionDiagWithTernaryFixit( 14221 S, E, 14222 S.Diag(CC, diag::warn_impcast_int_to_objc_signed_char_bool) 14223 << E->getType()); 14224 } 14225 14226 IntRange SourceTypeRange = 14227 IntRange::forTargetOfCanonicalType(S.Context, Source); 14228 IntRange LikelySourceRange = 14229 GetExprRange(S.Context, E, S.isConstantEvaluated(), /*Approximate*/ true); 14230 IntRange TargetRange = IntRange::forTargetOfCanonicalType(S.Context, Target); 14231 14232 if (LikelySourceRange.Width > TargetRange.Width) { 14233 // If the source is a constant, use a default-on diagnostic. 14234 // TODO: this should happen for bitfield stores, too. 14235 Expr::EvalResult Result; 14236 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects, 14237 S.isConstantEvaluated())) { 14238 llvm::APSInt Value(32); 14239 Value = Result.Val.getInt(); 14240 14241 if (S.SourceMgr.isInSystemMacro(CC)) 14242 return; 14243 14244 std::string PrettySourceValue = toString(Value, 10); 14245 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 14246 14247 S.DiagRuntimeBehavior( 14248 E->getExprLoc(), E, 14249 S.PDiag(diag::warn_impcast_integer_precision_constant) 14250 << PrettySourceValue << PrettyTargetValue << E->getType() << T 14251 << E->getSourceRange() << SourceRange(CC)); 14252 return; 14253 } 14254 14255 // People want to build with -Wshorten-64-to-32 and not -Wconversion. 14256 if (S.SourceMgr.isInSystemMacro(CC)) 14257 return; 14258 14259 if (TargetRange.Width == 32 && S.Context.getIntWidth(E->getType()) == 64) 14260 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_64_32, 14261 /* pruneControlFlow */ true); 14262 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_precision); 14263 } 14264 14265 if (TargetRange.Width > SourceTypeRange.Width) { 14266 if (auto *UO = dyn_cast<UnaryOperator>(E)) 14267 if (UO->getOpcode() == UO_Minus) 14268 if (Source->isUnsignedIntegerType()) { 14269 if (Target->isUnsignedIntegerType()) 14270 return DiagnoseImpCast(S, E, T, CC, 14271 diag::warn_impcast_high_order_zero_bits); 14272 if (Target->isSignedIntegerType()) 14273 return DiagnoseImpCast(S, E, T, CC, 14274 diag::warn_impcast_nonnegative_result); 14275 } 14276 } 14277 14278 if (TargetRange.Width == LikelySourceRange.Width && 14279 !TargetRange.NonNegative && LikelySourceRange.NonNegative && 14280 Source->isSignedIntegerType()) { 14281 // Warn when doing a signed to signed conversion, warn if the positive 14282 // source value is exactly the width of the target type, which will 14283 // cause a negative value to be stored. 14284 14285 Expr::EvalResult Result; 14286 if (E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects) && 14287 !S.SourceMgr.isInSystemMacro(CC)) { 14288 llvm::APSInt Value = Result.Val.getInt(); 14289 if (isSameWidthConstantConversion(S, E, T, CC)) { 14290 std::string PrettySourceValue = toString(Value, 10); 14291 std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); 14292 14293 S.DiagRuntimeBehavior( 14294 E->getExprLoc(), E, 14295 S.PDiag(diag::warn_impcast_integer_precision_constant) 14296 << PrettySourceValue << PrettyTargetValue << E->getType() << T 14297 << E->getSourceRange() << SourceRange(CC)); 14298 return; 14299 } 14300 } 14301 14302 // Fall through for non-constants to give a sign conversion warning. 14303 } 14304 14305 if ((!isa<EnumType>(Target) || !isa<EnumType>(Source)) && 14306 ((TargetRange.NonNegative && !LikelySourceRange.NonNegative) || 14307 (!TargetRange.NonNegative && LikelySourceRange.NonNegative && 14308 LikelySourceRange.Width == TargetRange.Width))) { 14309 if (S.SourceMgr.isInSystemMacro(CC)) 14310 return; 14311 14312 unsigned DiagID = diag::warn_impcast_integer_sign; 14313 14314 // Traditionally, gcc has warned about this under -Wsign-compare. 14315 // We also want to warn about it in -Wconversion. 14316 // So if -Wconversion is off, use a completely identical diagnostic 14317 // in the sign-compare group. 14318 // The conditional-checking code will 14319 if (ICContext) { 14320 DiagID = diag::warn_impcast_integer_sign_conditional; 14321 *ICContext = true; 14322 } 14323 14324 return DiagnoseImpCast(S, E, T, CC, DiagID); 14325 } 14326 14327 // Diagnose conversions between different enumeration types. 14328 // In C, we pretend that the type of an EnumConstantDecl is its enumeration 14329 // type, to give us better diagnostics. 14330 QualType SourceType = E->getType(); 14331 if (!S.getLangOpts().CPlusPlus) { 14332 if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 14333 if (EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(DRE->getDecl())) { 14334 EnumDecl *Enum = cast<EnumDecl>(ECD->getDeclContext()); 14335 SourceType = S.Context.getTypeDeclType(Enum); 14336 Source = S.Context.getCanonicalType(SourceType).getTypePtr(); 14337 } 14338 } 14339 14340 if (const EnumType *SourceEnum = Source->getAs<EnumType>()) 14341 if (const EnumType *TargetEnum = Target->getAs<EnumType>()) 14342 if (SourceEnum->getDecl()->hasNameForLinkage() && 14343 TargetEnum->getDecl()->hasNameForLinkage() && 14344 SourceEnum != TargetEnum) { 14345 if (S.SourceMgr.isInSystemMacro(CC)) 14346 return; 14347 14348 return DiagnoseImpCast(S, E, SourceType, T, CC, 14349 diag::warn_impcast_different_enum_types); 14350 } 14351} 14352 14353static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E, 14354 SourceLocation CC, QualType T); 14355 14356static void CheckConditionalOperand(Sema &S, Expr *E, QualType T, 14357 SourceLocation CC, bool &ICContext) { 14358 E = E->IgnoreParenImpCasts(); 14359 14360 if (auto *CO = dyn_cast<AbstractConditionalOperator>(E)) 14361 return CheckConditionalOperator(S, CO, CC, T); 14362 14363 AnalyzeImplicitConversions(S, E, CC); 14364 if (E->getType() != T) 14365 return CheckImplicitConversion(S, E, T, CC, &ICContext); 14366} 14367 14368static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E, 14369 SourceLocation CC, QualType T) { 14370 AnalyzeImplicitConversions(S, E->getCond(), E->getQuestionLoc()); 14371 14372 Expr *TrueExpr = E->getTrueExpr(); 14373 if (auto *BCO = dyn_cast<BinaryConditionalOperator>(E)) 14374 TrueExpr = BCO->getCommon(); 14375 14376 bool Suspicious = false; 14377 CheckConditionalOperand(S, TrueExpr, T, CC, Suspicious); 14378 CheckConditionalOperand(S, E->getFalseExpr(), T, CC, Suspicious); 14379 14380 if (T->isBooleanType()) 14381 DiagnoseIntInBoolContext(S, E); 14382 14383 // If -Wconversion would have warned about either of the candidates 14384 // for a signedness conversion to the context type... 14385 if (!Suspicious) return; 14386 14387 // ...but it's currently ignored... 14388 if (!S.Diags.isIgnored(diag::warn_impcast_integer_sign_conditional, CC)) 14389 return; 14390 14391 // ...then check whether it would have warned about either of the 14392 // candidates for a signedness conversion to the condition type. 14393 if (E->getType() == T) return; 14394 14395 Suspicious = false; 14396 CheckImplicitConversion(S, TrueExpr->IgnoreParenImpCasts(), 14397 E->getType(), CC, &Suspicious); 14398 if (!Suspicious) 14399 CheckImplicitConversion(S, E->getFalseExpr()->IgnoreParenImpCasts(), 14400 E->getType(), CC, &Suspicious); 14401} 14402 14403/// Check conversion of given expression to boolean. 14404/// Input argument E is a logical expression. 14405static void CheckBoolLikeConversion(Sema &S, Expr *E, SourceLocation CC) { 14406 if (S.getLangOpts().Bool) 14407 return; 14408 if (E->IgnoreParenImpCasts()->getType()->isAtomicType()) 14409 return; 14410 CheckImplicitConversion(S, E->IgnoreParenImpCasts(), S.Context.BoolTy, CC); 14411} 14412 14413namespace { 14414struct AnalyzeImplicitConversionsWorkItem { 14415 Expr *E; 14416 SourceLocation CC; 14417 bool IsListInit; 14418}; 14419} 14420 14421/// Data recursive variant of AnalyzeImplicitConversions. Subexpressions 14422/// that should be visited are added to WorkList. 14423static void AnalyzeImplicitConversions( 14424 Sema &S, AnalyzeImplicitConversionsWorkItem Item, 14425 llvm::SmallVectorImpl<AnalyzeImplicitConversionsWorkItem> &WorkList) { 14426 Expr *OrigE = Item.E; 14427 SourceLocation CC = Item.CC; 14428 14429 QualType T = OrigE->getType(); 14430 Expr *E = OrigE->IgnoreParenImpCasts(); 14431 14432 // Propagate whether we are in a C++ list initialization expression. 14433 // If so, we do not issue warnings for implicit int-float conversion 14434 // precision loss, because C++11 narrowing already handles it. 14435 bool IsListInit = Item.IsListInit || 14436 (isa<InitListExpr>(OrigE) && S.getLangOpts().CPlusPlus); 14437 14438 if (E->isTypeDependent() || E->isValueDependent()) 14439 return; 14440 14441 Expr *SourceExpr = E; 14442 // Examine, but don't traverse into the source expression of an 14443 // OpaqueValueExpr, since it may have multiple parents and we don't want to 14444 // emit duplicate diagnostics. Its fine to examine the form or attempt to 14445 // evaluate it in the context of checking the specific conversion to T though. 14446 if (auto *OVE = dyn_cast<OpaqueValueExpr>(E)) 14447 if (auto *Src = OVE->getSourceExpr()) 14448 SourceExpr = Src; 14449 14450 if (const auto *UO = dyn_cast<UnaryOperator>(SourceExpr)) 14451 if (UO->getOpcode() == UO_Not && 14452 UO->getSubExpr()->isKnownToHaveBooleanValue()) 14453 S.Diag(UO->getBeginLoc(), diag::warn_bitwise_negation_bool) 14454 << OrigE->getSourceRange() << T->isBooleanType() 14455 << FixItHint::CreateReplacement(UO->getBeginLoc(), "!"); 14456 14457 if (const auto *BO = dyn_cast<BinaryOperator>(SourceExpr)) 14458 if ((BO->getOpcode() == BO_And || BO->getOpcode() == BO_Or) && 14459 BO->getLHS()->isKnownToHaveBooleanValue() && 14460 BO->getRHS()->isKnownToHaveBooleanValue() && 14461 BO->getLHS()->HasSideEffects(S.Context) && 14462 BO->getRHS()->HasSideEffects(S.Context)) { 14463 S.Diag(BO->getBeginLoc(), diag::warn_bitwise_instead_of_logical) 14464 << (BO->getOpcode() == BO_And ? "&" : "|") << OrigE->getSourceRange() 14465 << FixItHint::CreateReplacement( 14466 BO->getOperatorLoc(), 14467 (BO->getOpcode() == BO_And ? "&&" : "||")); 14468 S.Diag(BO->getBeginLoc(), diag::note_cast_operand_to_int); 14469 } 14470 14471 // For conditional operators, we analyze the arguments as if they 14472 // were being fed directly into the output. 14473 if (auto *CO = dyn_cast<AbstractConditionalOperator>(SourceExpr)) { 14474 CheckConditionalOperator(S, CO, CC, T); 14475 return; 14476 } 14477 14478 // Check implicit argument conversions for function calls. 14479 if (CallExpr *Call = dyn_cast<CallExpr>(SourceExpr)) 14480 CheckImplicitArgumentConversions(S, Call, CC); 14481 14482 // Go ahead and check any implicit conversions we might have skipped. 14483 // The non-canonical typecheck is just an optimization; 14484 // CheckImplicitConversion will filter out dead implicit conversions. 14485 if (SourceExpr->getType() != T) 14486 CheckImplicitConversion(S, SourceExpr, T, CC, nullptr, IsListInit); 14487 14488 // Now continue drilling into this expression. 14489 14490 if (PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E)) { 14491 // The bound subexpressions in a PseudoObjectExpr are not reachable 14492 // as transitive children. 14493 // FIXME: Use a more uniform representation for this. 14494 for (auto *SE : POE->semantics()) 14495 if (auto *OVE = dyn_cast<OpaqueValueExpr>(SE)) 14496 WorkList.push_back({OVE->getSourceExpr(), CC, IsListInit}); 14497 } 14498 14499 // Skip past explicit casts. 14500 if (auto *CE = dyn_cast<ExplicitCastExpr>(E)) { 14501 E = CE->getSubExpr()->IgnoreParenImpCasts(); 14502 if (!CE->getType()->isVoidType() && E->getType()->isAtomicType()) 14503 S.Diag(E->getBeginLoc(), diag::warn_atomic_implicit_seq_cst); 14504 WorkList.push_back({E, CC, IsListInit}); 14505 return; 14506 } 14507 14508 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 14509 // Do a somewhat different check with comparison operators. 14510 if (BO->isComparisonOp()) 14511 return AnalyzeComparison(S, BO); 14512 14513 // And with simple assignments. 14514 if (BO->getOpcode() == BO_Assign) 14515 return AnalyzeAssignment(S, BO); 14516 // And with compound assignments. 14517 if (BO->isAssignmentOp()) 14518 return AnalyzeCompoundAssignment(S, BO); 14519 } 14520 14521 // These break the otherwise-useful invariant below. Fortunately, 14522 // we don't really need to recurse into them, because any internal 14523 // expressions should have been analyzed already when they were 14524 // built into statements. 14525 if (isa<StmtExpr>(E)) return; 14526 14527 // Don't descend into unevaluated contexts. 14528 if (isa<UnaryExprOrTypeTraitExpr>(E)) return; 14529 14530 // Now just recurse over the expression's children. 14531 CC = E->getExprLoc(); 14532 BinaryOperator *BO = dyn_cast<BinaryOperator>(E); 14533 bool IsLogicalAndOperator = BO && BO->getOpcode() == BO_LAnd; 14534 for (Stmt *SubStmt : E->children()) { 14535 Expr *ChildExpr = dyn_cast_or_null<Expr>(SubStmt); 14536 if (!ChildExpr) 14537 continue; 14538 14539 if (auto *CSE = dyn_cast<CoroutineSuspendExpr>(E)) 14540 if (ChildExpr == CSE->getOperand()) 14541 // Do not recurse over a CoroutineSuspendExpr's operand. 14542 // The operand is also a subexpression of getCommonExpr(), and 14543 // recursing into it directly would produce duplicate diagnostics. 14544 continue; 14545 14546 if (IsLogicalAndOperator && 14547 isa<StringLiteral>(ChildExpr->IgnoreParenImpCasts())) 14548 // Ignore checking string literals that are in logical and operators. 14549 // This is a common pattern for asserts. 14550 continue; 14551 WorkList.push_back({ChildExpr, CC, IsListInit}); 14552 } 14553 14554 if (BO && BO->isLogicalOp()) { 14555 Expr *SubExpr = BO->getLHS()->IgnoreParenImpCasts(); 14556 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 14557 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 14558 14559 SubExpr = BO->getRHS()->IgnoreParenImpCasts(); 14560 if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) 14561 ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); 14562 } 14563 14564 if (const UnaryOperator *U = dyn_cast<UnaryOperator>(E)) { 14565 if (U->getOpcode() == UO_LNot) { 14566 ::CheckBoolLikeConversion(S, U->getSubExpr(), CC); 14567 } else if (U->getOpcode() != UO_AddrOf) { 14568 if (U->getSubExpr()->getType()->isAtomicType()) 14569 S.Diag(U->getSubExpr()->getBeginLoc(), 14570 diag::warn_atomic_implicit_seq_cst); 14571 } 14572 } 14573} 14574 14575/// AnalyzeImplicitConversions - Find and report any interesting 14576/// implicit conversions in the given expression. There are a couple 14577/// of competing diagnostics here, -Wconversion and -Wsign-compare. 14578static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC, 14579 bool IsListInit/*= false*/) { 14580 llvm::SmallVector<AnalyzeImplicitConversionsWorkItem, 16> WorkList; 14581 WorkList.push_back({OrigE, CC, IsListInit}); 14582 while (!WorkList.empty()) 14583 AnalyzeImplicitConversions(S, WorkList.pop_back_val(), WorkList); 14584} 14585 14586/// Diagnose integer type and any valid implicit conversion to it. 14587static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, const QualType &IntT) { 14588 // Taking into account implicit conversions, 14589 // allow any integer. 14590 if (!E->getType()->isIntegerType()) { 14591 S.Diag(E->getBeginLoc(), 14592 diag::err_opencl_enqueue_kernel_invalid_local_size_type); 14593 return true; 14594 } 14595 // Potentially emit standard warnings for implicit conversions if enabled 14596 // using -Wconversion. 14597 CheckImplicitConversion(S, E, IntT, E->getBeginLoc()); 14598 return false; 14599} 14600 14601// Helper function for Sema::DiagnoseAlwaysNonNullPointer. 14602// Returns true when emitting a warning about taking the address of a reference. 14603static bool CheckForReference(Sema &SemaRef, const Expr *E, 14604 const PartialDiagnostic &PD) { 14605 E = E->IgnoreParenImpCasts(); 14606 14607 const FunctionDecl *FD = nullptr; 14608 14609 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) { 14610 if (!DRE->getDecl()->getType()->isReferenceType()) 14611 return false; 14612 } else if (const MemberExpr *M = dyn_cast<MemberExpr>(E)) { 14613 if (!M->getMemberDecl()->getType()->isReferenceType()) 14614 return false; 14615 } else if (const CallExpr *Call = dyn_cast<CallExpr>(E)) { 14616 if (!Call->getCallReturnType(SemaRef.Context)->isReferenceType()) 14617 return false; 14618 FD = Call->getDirectCallee(); 14619 } else { 14620 return false; 14621 } 14622 14623 SemaRef.Diag(E->getExprLoc(), PD); 14624 14625 // If possible, point to location of function. 14626 if (FD) { 14627 SemaRef.Diag(FD->getLocation(), diag::note_reference_is_return_value) << FD; 14628 } 14629 14630 return true; 14631} 14632 14633// Returns true if the SourceLocation is expanded from any macro body. 14634// Returns false if the SourceLocation is invalid, is from not in a macro 14635// expansion, or is from expanded from a top-level macro argument. 14636static bool IsInAnyMacroBody(const SourceManager &SM, SourceLocation Loc) { 14637 if (Loc.isInvalid()) 14638 return false; 14639 14640 while (Loc.isMacroID()) { 14641 if (SM.isMacroBodyExpansion(Loc)) 14642 return true; 14643 Loc = SM.getImmediateMacroCallerLoc(Loc); 14644 } 14645 14646 return false; 14647} 14648 14649/// Diagnose pointers that are always non-null. 14650/// \param E the expression containing the pointer 14651/// \param NullKind NPCK_NotNull if E is a cast to bool, otherwise, E is 14652/// compared to a null pointer 14653/// \param IsEqual True when the comparison is equal to a null pointer 14654/// \param Range Extra SourceRange to highlight in the diagnostic 14655void Sema::DiagnoseAlwaysNonNullPointer(Expr *E, 14656 Expr::NullPointerConstantKind NullKind, 14657 bool IsEqual, SourceRange Range) { 14658 if (!E) 14659 return; 14660 14661 // Don't warn inside macros. 14662 if (E->getExprLoc().isMacroID()) { 14663 const SourceManager &SM = getSourceManager(); 14664 if (IsInAnyMacroBody(SM, E->getExprLoc()) || 14665 IsInAnyMacroBody(SM, Range.getBegin())) 14666 return; 14667 } 14668 E = E->IgnoreImpCasts(); 14669 14670 const bool IsCompare = NullKind != Expr::NPCK_NotNull; 14671 14672 if (isa<CXXThisExpr>(E)) { 14673 unsigned DiagID = IsCompare ? diag::warn_this_null_compare 14674 : diag::warn_this_bool_conversion; 14675 Diag(E->getExprLoc(), DiagID) << E->getSourceRange() << Range << IsEqual; 14676 return; 14677 } 14678 14679 bool IsAddressOf = false; 14680 14681 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 14682 if (UO->getOpcode() != UO_AddrOf) 14683 return; 14684 IsAddressOf = true; 14685 E = UO->getSubExpr(); 14686 } 14687 14688 if (IsAddressOf) { 14689 unsigned DiagID = IsCompare 14690 ? diag::warn_address_of_reference_null_compare 14691 : diag::warn_address_of_reference_bool_conversion; 14692 PartialDiagnostic PD = PDiag(DiagID) << E->getSourceRange() << Range 14693 << IsEqual; 14694 if (CheckForReference(*this, E, PD)) { 14695 return; 14696 } 14697 } 14698 14699 auto ComplainAboutNonnullParamOrCall = [&](const Attr *NonnullAttr) { 14700 bool IsParam = isa<NonNullAttr>(NonnullAttr); 14701 std::string Str; 14702 llvm::raw_string_ostream S(Str); 14703 E->printPretty(S, nullptr, getPrintingPolicy()); 14704 unsigned DiagID = IsCompare ? diag::warn_nonnull_expr_compare 14705 : diag::warn_cast_nonnull_to_bool; 14706 Diag(E->getExprLoc(), DiagID) << IsParam << S.str() 14707 << E->getSourceRange() << Range << IsEqual; 14708 Diag(NonnullAttr->getLocation(), diag::note_declared_nonnull) << IsParam; 14709 }; 14710 14711 // If we have a CallExpr that is tagged with returns_nonnull, we can complain. 14712 if (auto *Call = dyn_cast<CallExpr>(E->IgnoreParenImpCasts())) { 14713 if (auto *Callee = Call->getDirectCallee()) { 14714 if (const Attr *A = Callee->getAttr<ReturnsNonNullAttr>()) { 14715 ComplainAboutNonnullParamOrCall(A); 14716 return; 14717 } 14718 } 14719 } 14720 14721 // Expect to find a single Decl. Skip anything more complicated. 14722 ValueDecl *D = nullptr; 14723 if (DeclRefExpr *R = dyn_cast<DeclRefExpr>(E)) { 14724 D = R->getDecl(); 14725 } else if (MemberExpr *M = dyn_cast<MemberExpr>(E)) { 14726 D = M->getMemberDecl(); 14727 } 14728 14729 // Weak Decls can be null. 14730 if (!D || D->isWeak()) 14731 return; 14732 14733 // Check for parameter decl with nonnull attribute 14734 if (const auto* PV = dyn_cast<ParmVarDecl>(D)) { 14735 if (getCurFunction() && 14736 !getCurFunction()->ModifiedNonNullParams.count(PV)) { 14737 if (const Attr *A = PV->getAttr<NonNullAttr>()) { 14738 ComplainAboutNonnullParamOrCall(A); 14739 return; 14740 } 14741 14742 if (const auto *FD = dyn_cast<FunctionDecl>(PV->getDeclContext())) { 14743 // Skip function template not specialized yet. 14744 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate) 14745 return; 14746 auto ParamIter = llvm::find(FD->parameters(), PV); 14747 assert(ParamIter != FD->param_end()); 14748 unsigned ParamNo = std::distance(FD->param_begin(), ParamIter); 14749 14750 for (const auto *NonNull : FD->specific_attrs<NonNullAttr>()) { 14751 if (!NonNull->args_size()) { 14752 ComplainAboutNonnullParamOrCall(NonNull); 14753 return; 14754 } 14755 14756 for (const ParamIdx &ArgNo : NonNull->args()) { 14757 if (ArgNo.getASTIndex() == ParamNo) { 14758 ComplainAboutNonnullParamOrCall(NonNull); 14759 return; 14760 } 14761 } 14762 } 14763 } 14764 } 14765 } 14766 14767 QualType T = D->getType(); 14768 const bool IsArray = T->isArrayType(); 14769 const bool IsFunction = T->isFunctionType(); 14770 14771 // Address of function is used to silence the function warning. 14772 if (IsAddressOf && IsFunction) { 14773 return; 14774 } 14775 14776 // Found nothing. 14777 if (!IsAddressOf && !IsFunction && !IsArray) 14778 return; 14779 14780 // Pretty print the expression for the diagnostic. 14781 std::string Str; 14782 llvm::raw_string_ostream S(Str); 14783 E->printPretty(S, nullptr, getPrintingPolicy()); 14784 14785 unsigned DiagID = IsCompare ? diag::warn_null_pointer_compare 14786 : diag::warn_impcast_pointer_to_bool; 14787 enum { 14788 AddressOf, 14789 FunctionPointer, 14790 ArrayPointer 14791 } DiagType; 14792 if (IsAddressOf) 14793 DiagType = AddressOf; 14794 else if (IsFunction) 14795 DiagType = FunctionPointer; 14796 else if (IsArray) 14797 DiagType = ArrayPointer; 14798 else 14799 llvm_unreachable("Could not determine diagnostic."); 14800 Diag(E->getExprLoc(), DiagID) << DiagType << S.str() << E->getSourceRange() 14801 << Range << IsEqual; 14802 14803 if (!IsFunction) 14804 return; 14805 14806 // Suggest '&' to silence the function warning. 14807 Diag(E->getExprLoc(), diag::note_function_warning_silence) 14808 << FixItHint::CreateInsertion(E->getBeginLoc(), "&"); 14809 14810 // Check to see if '()' fixit should be emitted. 14811 QualType ReturnType; 14812 UnresolvedSet<4> NonTemplateOverloads; 14813 tryExprAsCall(*E, ReturnType, NonTemplateOverloads); 14814 if (ReturnType.isNull()) 14815 return; 14816 14817 if (IsCompare) { 14818 // There are two cases here. If there is null constant, the only suggest 14819 // for a pointer return type. If the null is 0, then suggest if the return 14820 // type is a pointer or an integer type. 14821 if (!ReturnType->isPointerType()) { 14822 if (NullKind == Expr::NPCK_ZeroExpression || 14823 NullKind == Expr::NPCK_ZeroLiteral) { 14824 if (!ReturnType->isIntegerType()) 14825 return; 14826 } else { 14827 return; 14828 } 14829 } 14830 } else { // !IsCompare 14831 // For function to bool, only suggest if the function pointer has bool 14832 // return type. 14833 if (!ReturnType->isSpecificBuiltinType(BuiltinType::Bool)) 14834 return; 14835 } 14836 Diag(E->getExprLoc(), diag::note_function_to_function_call) 14837 << FixItHint::CreateInsertion(getLocForEndOfToken(E->getEndLoc()), "()"); 14838} 14839 14840/// Diagnoses "dangerous" implicit conversions within the given 14841/// expression (which is a full expression). Implements -Wconversion 14842/// and -Wsign-compare. 14843/// 14844/// \param CC the "context" location of the implicit conversion, i.e. 14845/// the most location of the syntactic entity requiring the implicit 14846/// conversion 14847void Sema::CheckImplicitConversions(Expr *E, SourceLocation CC) { 14848 // Don't diagnose in unevaluated contexts. 14849 if (isUnevaluatedContext()) 14850 return; 14851 14852 // Don't diagnose for value- or type-dependent expressions. 14853 if (E->isTypeDependent() || E->isValueDependent()) 14854 return; 14855 14856 // Check for array bounds violations in cases where the check isn't triggered 14857 // elsewhere for other Expr types (like BinaryOperators), e.g. when an 14858 // ArraySubscriptExpr is on the RHS of a variable initialization. 14859 CheckArrayAccess(E); 14860 14861 // This is not the right CC for (e.g.) a variable initialization. 14862 AnalyzeImplicitConversions(*this, E, CC); 14863} 14864 14865/// CheckBoolLikeConversion - Check conversion of given expression to boolean. 14866/// Input argument E is a logical expression. 14867void Sema::CheckBoolLikeConversion(Expr *E, SourceLocation CC) { 14868 ::CheckBoolLikeConversion(*this, E, CC); 14869} 14870 14871/// Diagnose when expression is an integer constant expression and its evaluation 14872/// results in integer overflow 14873void Sema::CheckForIntOverflow (Expr *E) { 14874 // Use a work list to deal with nested struct initializers. 14875 SmallVector<Expr *, 2> Exprs(1, E); 14876 14877 do { 14878 Expr *OriginalE = Exprs.pop_back_val(); 14879 Expr *E = OriginalE->IgnoreParenCasts(); 14880 14881 if (isa<BinaryOperator>(E)) { 14882 E->EvaluateForOverflow(Context); 14883 continue; 14884 } 14885 14886 if (auto InitList = dyn_cast<InitListExpr>(OriginalE)) 14887 Exprs.append(InitList->inits().begin(), InitList->inits().end()); 14888 else if (isa<ObjCBoxedExpr>(OriginalE)) 14889 E->EvaluateForOverflow(Context); 14890 else if (auto Call = dyn_cast<CallExpr>(E)) 14891 Exprs.append(Call->arg_begin(), Call->arg_end()); 14892 else if (auto Message = dyn_cast<ObjCMessageExpr>(E)) 14893 Exprs.append(Message->arg_begin(), Message->arg_end()); 14894 else if (auto Construct = dyn_cast<CXXConstructExpr>(E)) 14895 Exprs.append(Construct->arg_begin(), Construct->arg_end()); 14896 else if (auto Array = dyn_cast<ArraySubscriptExpr>(E)) 14897 Exprs.push_back(Array->getIdx()); 14898 else if (auto Compound = dyn_cast<CompoundLiteralExpr>(E)) 14899 Exprs.push_back(Compound->getInitializer()); 14900 else if (auto New = dyn_cast<CXXNewExpr>(E)) { 14901 if (New->isArray()) 14902 if (auto ArraySize = New->getArraySize()) 14903 Exprs.push_back(*ArraySize); 14904 } 14905 } while (!Exprs.empty()); 14906} 14907 14908namespace { 14909 14910/// Visitor for expressions which looks for unsequenced operations on the 14911/// same object. 14912class SequenceChecker : public ConstEvaluatedExprVisitor<SequenceChecker> { 14913 using Base = ConstEvaluatedExprVisitor<SequenceChecker>; 14914 14915 /// A tree of sequenced regions within an expression. Two regions are 14916 /// unsequenced if one is an ancestor or a descendent of the other. When we 14917 /// finish processing an expression with sequencing, such as a comma 14918 /// expression, we fold its tree nodes into its parent, since they are 14919 /// unsequenced with respect to nodes we will visit later. 14920 class SequenceTree { 14921 struct Value { 14922 explicit Value(unsigned Parent) : Parent(Parent), Merged(false) {} 14923 unsigned Parent : 31; 14924 unsigned Merged : 1; 14925 }; 14926 SmallVector<Value, 8> Values; 14927 14928 public: 14929 /// A region within an expression which may be sequenced with respect 14930 /// to some other region. 14931 class Seq { 14932 friend class SequenceTree; 14933 14934 unsigned Index; 14935 14936 explicit Seq(unsigned N) : Index(N) {} 14937 14938 public: 14939 Seq() : Index(0) {} 14940 }; 14941 14942 SequenceTree() { Values.push_back(Value(0)); } 14943 Seq root() const { return Seq(0); } 14944 14945 /// Create a new sequence of operations, which is an unsequenced 14946 /// subset of \p Parent. This sequence of operations is sequenced with 14947 /// respect to other children of \p Parent. 14948 Seq allocate(Seq Parent) { 14949 Values.push_back(Value(Parent.Index)); 14950 return Seq(Values.size() - 1); 14951 } 14952 14953 /// Merge a sequence of operations into its parent. 14954 void merge(Seq S) { 14955 Values[S.Index].Merged = true; 14956 } 14957 14958 /// Determine whether two operations are unsequenced. This operation 14959 /// is asymmetric: \p Cur should be the more recent sequence, and \p Old 14960 /// should have been merged into its parent as appropriate. 14961 bool isUnsequenced(Seq Cur, Seq Old) { 14962 unsigned C = representative(Cur.Index); 14963 unsigned Target = representative(Old.Index); 14964 while (C >= Target) { 14965 if (C == Target) 14966 return true; 14967 C = Values[C].Parent; 14968 } 14969 return false; 14970 } 14971 14972 private: 14973 /// Pick a representative for a sequence. 14974 unsigned representative(unsigned K) { 14975 if (Values[K].Merged) 14976 // Perform path compression as we go. 14977 return Values[K].Parent = representative(Values[K].Parent); 14978 return K; 14979 } 14980 }; 14981 14982 /// An object for which we can track unsequenced uses. 14983 using Object = const NamedDecl *; 14984 14985 /// Different flavors of object usage which we track. We only track the 14986 /// least-sequenced usage of each kind. 14987 enum UsageKind { 14988 /// A read of an object. Multiple unsequenced reads are OK. 14989 UK_Use, 14990 14991 /// A modification of an object which is sequenced before the value 14992 /// computation of the expression, such as ++n in C++. 14993 UK_ModAsValue, 14994 14995 /// A modification of an object which is not sequenced before the value 14996 /// computation of the expression, such as n++. 14997 UK_ModAsSideEffect, 14998 14999 UK_Count = UK_ModAsSideEffect + 1 15000 }; 15001 15002 /// Bundle together a sequencing region and the expression corresponding 15003 /// to a specific usage. One Usage is stored for each usage kind in UsageInfo. 15004 struct Usage { 15005 const Expr *UsageExpr; 15006 SequenceTree::Seq Seq; 15007 15008 Usage() : UsageExpr(nullptr) {} 15009 }; 15010 15011 struct UsageInfo { 15012 Usage Uses[UK_Count]; 15013 15014 /// Have we issued a diagnostic for this object already? 15015 bool Diagnosed; 15016 15017 UsageInfo() : Diagnosed(false) {} 15018 }; 15019 using UsageInfoMap = llvm::SmallDenseMap<Object, UsageInfo, 16>; 15020 15021 Sema &SemaRef; 15022 15023 /// Sequenced regions within the expression. 15024 SequenceTree Tree; 15025 15026 /// Declaration modifications and references which we have seen. 15027 UsageInfoMap UsageMap; 15028 15029 /// The region we are currently within. 15030 SequenceTree::Seq Region; 15031 15032 /// Filled in with declarations which were modified as a side-effect 15033 /// (that is, post-increment operations). 15034 SmallVectorImpl<std::pair<Object, Usage>> *ModAsSideEffect = nullptr; 15035 15036 /// Expressions to check later. We defer checking these to reduce 15037 /// stack usage. 15038 SmallVectorImpl<const Expr *> &WorkList; 15039 15040 /// RAII object wrapping the visitation of a sequenced subexpression of an 15041 /// expression. At the end of this process, the side-effects of the evaluation 15042 /// become sequenced with respect to the value computation of the result, so 15043 /// we downgrade any UK_ModAsSideEffect within the evaluation to 15044 /// UK_ModAsValue. 15045 struct SequencedSubexpression { 15046 SequencedSubexpression(SequenceChecker &Self) 15047 : Self(Self), OldModAsSideEffect(Self.ModAsSideEffect) { 15048 Self.ModAsSideEffect = &ModAsSideEffect; 15049 } 15050 15051 ~SequencedSubexpression() { 15052 for (const std::pair<Object, Usage> &M : llvm::reverse(ModAsSideEffect)) { 15053 // Add a new usage with usage kind UK_ModAsValue, and then restore 15054 // the previous usage with UK_ModAsSideEffect (thus clearing it if 15055 // the previous one was empty). 15056 UsageInfo &UI = Self.UsageMap[M.first]; 15057 auto &SideEffectUsage = UI.Uses[UK_ModAsSideEffect]; 15058 Self.addUsage(M.first, UI, SideEffectUsage.UsageExpr, UK_ModAsValue); 15059 SideEffectUsage = M.second; 15060 } 15061 Self.ModAsSideEffect = OldModAsSideEffect; 15062 } 15063 15064 SequenceChecker &Self; 15065 SmallVector<std::pair<Object, Usage>, 4> ModAsSideEffect; 15066 SmallVectorImpl<std::pair<Object, Usage>> *OldModAsSideEffect; 15067 }; 15068 15069 /// RAII object wrapping the visitation of a subexpression which we might 15070 /// choose to evaluate as a constant. If any subexpression is evaluated and 15071 /// found to be non-constant, this allows us to suppress the evaluation of 15072 /// the outer expression. 15073 class EvaluationTracker { 15074 public: 15075 EvaluationTracker(SequenceChecker &Self) 15076 : Self(Self), Prev(Self.EvalTracker) { 15077 Self.EvalTracker = this; 15078 } 15079 15080 ~EvaluationTracker() { 15081 Self.EvalTracker = Prev; 15082 if (Prev) 15083 Prev->EvalOK &= EvalOK; 15084 } 15085 15086 bool evaluate(const Expr *E, bool &Result) { 15087 if (!EvalOK || E->isValueDependent()) 15088 return false; 15089 EvalOK = E->EvaluateAsBooleanCondition( 15090 Result, Self.SemaRef.Context, Self.SemaRef.isConstantEvaluated()); 15091 return EvalOK; 15092 } 15093 15094 private: 15095 SequenceChecker &Self; 15096 EvaluationTracker *Prev; 15097 bool EvalOK = true; 15098 } *EvalTracker = nullptr; 15099 15100 /// Find the object which is produced by the specified expression, 15101 /// if any. 15102 Object getObject(const Expr *E, bool Mod) const { 15103 E = E->IgnoreParenCasts(); 15104 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 15105 if (Mod && (UO->getOpcode() == UO_PreInc || UO->getOpcode() == UO_PreDec)) 15106 return getObject(UO->getSubExpr(), Mod); 15107 } else if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { 15108 if (BO->getOpcode() == BO_Comma) 15109 return getObject(BO->getRHS(), Mod); 15110 if (Mod && BO->isAssignmentOp()) 15111 return getObject(BO->getLHS(), Mod); 15112 } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) { 15113 // FIXME: Check for more interesting cases, like "x.n = ++x.n". 15114 if (isa<CXXThisExpr>(ME->getBase()->IgnoreParenCasts())) 15115 return ME->getMemberDecl(); 15116 } else if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) 15117 // FIXME: If this is a reference, map through to its value. 15118 return DRE->getDecl(); 15119 return nullptr; 15120 } 15121 15122 /// Note that an object \p O was modified or used by an expression 15123 /// \p UsageExpr with usage kind \p UK. \p UI is the \p UsageInfo for 15124 /// the object \p O as obtained via the \p UsageMap. 15125 void addUsage(Object O, UsageInfo &UI, const Expr *UsageExpr, UsageKind UK) { 15126 // Get the old usage for the given object and usage kind. 15127 Usage &U = UI.Uses[UK]; 15128 if (!U.UsageExpr || !Tree.isUnsequenced(Region, U.Seq)) { 15129 // If we have a modification as side effect and are in a sequenced 15130 // subexpression, save the old Usage so that we can restore it later 15131 // in SequencedSubexpression::~SequencedSubexpression. 15132 if (UK == UK_ModAsSideEffect && ModAsSideEffect) 15133 ModAsSideEffect->push_back(std::make_pair(O, U)); 15134 // Then record the new usage with the current sequencing region. 15135 U.UsageExpr = UsageExpr; 15136 U.Seq = Region; 15137 } 15138 } 15139 15140 /// Check whether a modification or use of an object \p O in an expression 15141 /// \p UsageExpr conflicts with a prior usage of kind \p OtherKind. \p UI is 15142 /// the \p UsageInfo for the object \p O as obtained via the \p UsageMap. 15143 /// \p IsModMod is true when we are checking for a mod-mod unsequenced 15144 /// usage and false we are checking for a mod-use unsequenced usage. 15145 void checkUsage(Object O, UsageInfo &UI, const Expr *UsageExpr, 15146 UsageKind OtherKind, bool IsModMod) { 15147 if (UI.Diagnosed) 15148 return; 15149 15150 const Usage &U = UI.Uses[OtherKind]; 15151 if (!U.UsageExpr || !Tree.isUnsequenced(Region, U.Seq)) 15152 return; 15153 15154 const Expr *Mod = U.UsageExpr; 15155 const Expr *ModOrUse = UsageExpr; 15156 if (OtherKind == UK_Use) 15157 std::swap(Mod, ModOrUse); 15158 15159 SemaRef.DiagRuntimeBehavior( 15160 Mod->getExprLoc(), {Mod, ModOrUse}, 15161 SemaRef.PDiag(IsModMod ? diag::warn_unsequenced_mod_mod 15162 : diag::warn_unsequenced_mod_use) 15163 << O << SourceRange(ModOrUse->getExprLoc())); 15164 UI.Diagnosed = true; 15165 } 15166 15167 // A note on note{Pre, Post}{Use, Mod}: 15168 // 15169 // (It helps to follow the algorithm with an expression such as 15170 // "((++k)++, k) = k" or "k = (k++, k++)". Both contain unsequenced 15171 // operations before C++17 and both are well-defined in C++17). 15172 // 15173 // When visiting a node which uses/modify an object we first call notePreUse 15174 // or notePreMod before visiting its sub-expression(s). At this point the 15175 // children of the current node have not yet been visited and so the eventual 15176 // uses/modifications resulting from the children of the current node have not 15177 // been recorded yet. 15178 // 15179 // We then visit the children of the current node. After that notePostUse or 15180 // notePostMod is called. These will 1) detect an unsequenced modification 15181 // as side effect (as in "k++ + k") and 2) add a new usage with the 15182 // appropriate usage kind. 15183 // 15184 // We also have to be careful that some operation sequences modification as 15185 // side effect as well (for example: || or ,). To account for this we wrap 15186 // the visitation of such a sub-expression (for example: the LHS of || or ,) 15187 // with SequencedSubexpression. SequencedSubexpression is an RAII object 15188 // which record usages which are modifications as side effect, and then 15189 // downgrade them (or more accurately restore the previous usage which was a 15190 // modification as side effect) when exiting the scope of the sequenced 15191 // subexpression. 15192 15193 void notePreUse(Object O, const Expr *UseExpr) { 15194 UsageInfo &UI = UsageMap[O]; 15195 // Uses conflict with other modifications. 15196 checkUsage(O, UI, UseExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/false); 15197 } 15198 15199 void notePostUse(Object O, const Expr *UseExpr) { 15200 UsageInfo &UI = UsageMap[O]; 15201 checkUsage(O, UI, UseExpr, /*OtherKind=*/UK_ModAsSideEffect, 15202 /*IsModMod=*/false); 15203 addUsage(O, UI, UseExpr, /*UsageKind=*/UK_Use); 15204 } 15205 15206 void notePreMod(Object O, const Expr *ModExpr) { 15207 UsageInfo &UI = UsageMap[O]; 15208 // Modifications conflict with other modifications and with uses. 15209 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/true); 15210 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_Use, /*IsModMod=*/false); 15211 } 15212 15213 void notePostMod(Object O, const Expr *ModExpr, UsageKind UK) { 15214 UsageInfo &UI = UsageMap[O]; 15215 checkUsage(O, UI, ModExpr, /*OtherKind=*/UK_ModAsSideEffect, 15216 /*IsModMod=*/true); 15217 addUsage(O, UI, ModExpr, /*UsageKind=*/UK); 15218 } 15219 15220public: 15221 SequenceChecker(Sema &S, const Expr *E, 15222 SmallVectorImpl<const Expr *> &WorkList) 15223 : Base(S.Context), SemaRef(S), Region(Tree.root()), WorkList(WorkList) { 15224 Visit(E); 15225 // Silence a -Wunused-private-field since WorkList is now unused. 15226 // TODO: Evaluate if it can be used, and if not remove it. 15227 (void)this->WorkList; 15228 } 15229 15230 void VisitStmt(const Stmt *S) { 15231 // Skip all statements which aren't expressions for now. 15232 } 15233 15234 void VisitExpr(const Expr *E) { 15235 // By default, just recurse to evaluated subexpressions. 15236 Base::VisitStmt(E); 15237 } 15238 15239 void VisitCastExpr(const CastExpr *E) { 15240 Object O = Object(); 15241 if (E->getCastKind() == CK_LValueToRValue) 15242 O = getObject(E->getSubExpr(), false); 15243 15244 if (O) 15245 notePreUse(O, E); 15246 VisitExpr(E); 15247 if (O) 15248 notePostUse(O, E); 15249 } 15250 15251 void VisitSequencedExpressions(const Expr *SequencedBefore, 15252 const Expr *SequencedAfter) { 15253 SequenceTree::Seq BeforeRegion = Tree.allocate(Region); 15254 SequenceTree::Seq AfterRegion = Tree.allocate(Region); 15255 SequenceTree::Seq OldRegion = Region; 15256 15257 { 15258 SequencedSubexpression SeqBefore(*this); 15259 Region = BeforeRegion; 15260 Visit(SequencedBefore); 15261 } 15262 15263 Region = AfterRegion; 15264 Visit(SequencedAfter); 15265 15266 Region = OldRegion; 15267 15268 Tree.merge(BeforeRegion); 15269 Tree.merge(AfterRegion); 15270 } 15271 15272 void VisitArraySubscriptExpr(const ArraySubscriptExpr *ASE) { 15273 // C++17 [expr.sub]p1: 15274 // The expression E1[E2] is identical (by definition) to *((E1)+(E2)). The 15275 // expression E1 is sequenced before the expression E2. 15276 if (SemaRef.getLangOpts().CPlusPlus17) 15277 VisitSequencedExpressions(ASE->getLHS(), ASE->getRHS()); 15278 else { 15279 Visit(ASE->getLHS()); 15280 Visit(ASE->getRHS()); 15281 } 15282 } 15283 15284 void VisitBinPtrMemD(const BinaryOperator *BO) { VisitBinPtrMem(BO); } 15285 void VisitBinPtrMemI(const BinaryOperator *BO) { VisitBinPtrMem(BO); } 15286 void VisitBinPtrMem(const BinaryOperator *BO) { 15287 // C++17 [expr.mptr.oper]p4: 15288 // Abbreviating pm-expression.*cast-expression as E1.*E2, [...] 15289 // the expression E1 is sequenced before the expression E2. 15290 if (SemaRef.getLangOpts().CPlusPlus17) 15291 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 15292 else { 15293 Visit(BO->getLHS()); 15294 Visit(BO->getRHS()); 15295 } 15296 } 15297 15298 void VisitBinShl(const BinaryOperator *BO) { VisitBinShlShr(BO); } 15299 void VisitBinShr(const BinaryOperator *BO) { VisitBinShlShr(BO); } 15300 void VisitBinShlShr(const BinaryOperator *BO) { 15301 // C++17 [expr.shift]p4: 15302 // The expression E1 is sequenced before the expression E2. 15303 if (SemaRef.getLangOpts().CPlusPlus17) 15304 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 15305 else { 15306 Visit(BO->getLHS()); 15307 Visit(BO->getRHS()); 15308 } 15309 } 15310 15311 void VisitBinComma(const BinaryOperator *BO) { 15312 // C++11 [expr.comma]p1: 15313 // Every value computation and side effect associated with the left 15314 // expression is sequenced before every value computation and side 15315 // effect associated with the right expression. 15316 VisitSequencedExpressions(BO->getLHS(), BO->getRHS()); 15317 } 15318 15319 void VisitBinAssign(const BinaryOperator *BO) { 15320 SequenceTree::Seq RHSRegion; 15321 SequenceTree::Seq LHSRegion; 15322 if (SemaRef.getLangOpts().CPlusPlus17) { 15323 RHSRegion = Tree.allocate(Region); 15324 LHSRegion = Tree.allocate(Region); 15325 } else { 15326 RHSRegion = Region; 15327 LHSRegion = Region; 15328 } 15329 SequenceTree::Seq OldRegion = Region; 15330 15331 // C++11 [expr.ass]p1: 15332 // [...] the assignment is sequenced after the value computation 15333 // of the right and left operands, [...] 15334 // 15335 // so check it before inspecting the operands and update the 15336 // map afterwards. 15337 Object O = getObject(BO->getLHS(), /*Mod=*/true); 15338 if (O) 15339 notePreMod(O, BO); 15340 15341 if (SemaRef.getLangOpts().CPlusPlus17) { 15342 // C++17 [expr.ass]p1: 15343 // [...] The right operand is sequenced before the left operand. [...] 15344 { 15345 SequencedSubexpression SeqBefore(*this); 15346 Region = RHSRegion; 15347 Visit(BO->getRHS()); 15348 } 15349 15350 Region = LHSRegion; 15351 Visit(BO->getLHS()); 15352 15353 if (O && isa<CompoundAssignOperator>(BO)) 15354 notePostUse(O, BO); 15355 15356 } else { 15357 // C++11 does not specify any sequencing between the LHS and RHS. 15358 Region = LHSRegion; 15359 Visit(BO->getLHS()); 15360 15361 if (O && isa<CompoundAssignOperator>(BO)) 15362 notePostUse(O, BO); 15363 15364 Region = RHSRegion; 15365 Visit(BO->getRHS()); 15366 } 15367 15368 // C++11 [expr.ass]p1: 15369 // the assignment is sequenced [...] before the value computation of the 15370 // assignment expression. 15371 // C11 6.5.16/3 has no such rule. 15372 Region = OldRegion; 15373 if (O) 15374 notePostMod(O, BO, 15375 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 15376 : UK_ModAsSideEffect); 15377 if (SemaRef.getLangOpts().CPlusPlus17) { 15378 Tree.merge(RHSRegion); 15379 Tree.merge(LHSRegion); 15380 } 15381 } 15382 15383 void VisitCompoundAssignOperator(const CompoundAssignOperator *CAO) { 15384 VisitBinAssign(CAO); 15385 } 15386 15387 void VisitUnaryPreInc(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 15388 void VisitUnaryPreDec(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } 15389 void VisitUnaryPreIncDec(const UnaryOperator *UO) { 15390 Object O = getObject(UO->getSubExpr(), true); 15391 if (!O) 15392 return VisitExpr(UO); 15393 15394 notePreMod(O, UO); 15395 Visit(UO->getSubExpr()); 15396 // C++11 [expr.pre.incr]p1: 15397 // the expression ++x is equivalent to x+=1 15398 notePostMod(O, UO, 15399 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue 15400 : UK_ModAsSideEffect); 15401 } 15402 15403 void VisitUnaryPostInc(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 15404 void VisitUnaryPostDec(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } 15405 void VisitUnaryPostIncDec(const UnaryOperator *UO) { 15406 Object O = getObject(UO->getSubExpr(), true); 15407 if (!O) 15408 return VisitExpr(UO); 15409 15410 notePreMod(O, UO); 15411 Visit(UO->getSubExpr()); 15412 notePostMod(O, UO, UK_ModAsSideEffect); 15413 } 15414 15415 void VisitBinLOr(const BinaryOperator *BO) { 15416 // C++11 [expr.log.or]p2: 15417 // If the second expression is evaluated, every value computation and 15418 // side effect associated with the first expression is sequenced before 15419 // every value computation and side effect associated with the 15420 // second expression. 15421 SequenceTree::Seq LHSRegion = Tree.allocate(Region); 15422 SequenceTree::Seq RHSRegion = Tree.allocate(Region); 15423 SequenceTree::Seq OldRegion = Region; 15424 15425 EvaluationTracker Eval(*this); 15426 { 15427 SequencedSubexpression Sequenced(*this); 15428 Region = LHSRegion; 15429 Visit(BO->getLHS()); 15430 } 15431 15432 // C++11 [expr.log.or]p1: 15433 // [...] the second operand is not evaluated if the first operand 15434 // evaluates to true. 15435 bool EvalResult = false; 15436 bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult); 15437 bool ShouldVisitRHS = !EvalOK || (EvalOK && !EvalResult); 15438 if (ShouldVisitRHS) { 15439 Region = RHSRegion; 15440 Visit(BO->getRHS()); 15441 } 15442 15443 Region = OldRegion; 15444 Tree.merge(LHSRegion); 15445 Tree.merge(RHSRegion); 15446 } 15447 15448 void VisitBinLAnd(const BinaryOperator *BO) { 15449 // C++11 [expr.log.and]p2: 15450 // If the second expression is evaluated, every value computation and 15451 // side effect associated with the first expression is sequenced before 15452 // every value computation and side effect associated with the 15453 // second expression. 15454 SequenceTree::Seq LHSRegion = Tree.allocate(Region); 15455 SequenceTree::Seq RHSRegion = Tree.allocate(Region); 15456 SequenceTree::Seq OldRegion = Region; 15457 15458 EvaluationTracker Eval(*this); 15459 { 15460 SequencedSubexpression Sequenced(*this); 15461 Region = LHSRegion; 15462 Visit(BO->getLHS()); 15463 } 15464 15465 // C++11 [expr.log.and]p1: 15466 // [...] the second operand is not evaluated if the first operand is false. 15467 bool EvalResult = false; 15468 bool EvalOK = Eval.evaluate(BO->getLHS(), EvalResult); 15469 bool ShouldVisitRHS = !EvalOK || (EvalOK && EvalResult); 15470 if (ShouldVisitRHS) { 15471 Region = RHSRegion; 15472 Visit(BO->getRHS()); 15473 } 15474 15475 Region = OldRegion; 15476 Tree.merge(LHSRegion); 15477 Tree.merge(RHSRegion); 15478 } 15479 15480 void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO) { 15481 // C++11 [expr.cond]p1: 15482 // [...] Every value computation and side effect associated with the first 15483 // expression is sequenced before every value computation and side effect 15484 // associated with the second or third expression. 15485 SequenceTree::Seq ConditionRegion = Tree.allocate(Region); 15486 15487 // No sequencing is specified between the true and false expression. 15488 // However since exactly one of both is going to be evaluated we can 15489 // consider them to be sequenced. This is needed to avoid warning on 15490 // something like "x ? y+= 1 : y += 2;" in the case where we will visit 15491 // both the true and false expressions because we can't evaluate x. 15492 // This will still allow us to detect an expression like (pre C++17) 15493 // "(x ? y += 1 : y += 2) = y". 15494 // 15495 // We don't wrap the visitation of the true and false expression with 15496 // SequencedSubexpression because we don't want to downgrade modifications 15497 // as side effect in the true and false expressions after the visition 15498 // is done. (for example in the expression "(x ? y++ : y++) + y" we should 15499 // not warn between the two "y++", but we should warn between the "y++" 15500 // and the "y". 15501 SequenceTree::Seq TrueRegion = Tree.allocate(Region); 15502 SequenceTree::Seq FalseRegion = Tree.allocate(Region); 15503 SequenceTree::Seq OldRegion = Region; 15504 15505 EvaluationTracker Eval(*this); 15506 { 15507 SequencedSubexpression Sequenced(*this); 15508 Region = ConditionRegion; 15509 Visit(CO->getCond()); 15510 } 15511 15512 // C++11 [expr.cond]p1: 15513 // [...] The first expression is contextually converted to bool (Clause 4). 15514 // It is evaluated and if it is true, the result of the conditional 15515 // expression is the value of the second expression, otherwise that of the 15516 // third expression. Only one of the second and third expressions is 15517 // evaluated. [...] 15518 bool EvalResult = false; 15519 bool EvalOK = Eval.evaluate(CO->getCond(), EvalResult); 15520 bool ShouldVisitTrueExpr = !EvalOK || (EvalOK && EvalResult); 15521 bool ShouldVisitFalseExpr = !EvalOK || (EvalOK && !EvalResult); 15522 if (ShouldVisitTrueExpr) { 15523 Region = TrueRegion; 15524 Visit(CO->getTrueExpr()); 15525 } 15526 if (ShouldVisitFalseExpr) { 15527 Region = FalseRegion; 15528 Visit(CO->getFalseExpr()); 15529 } 15530 15531 Region = OldRegion; 15532 Tree.merge(ConditionRegion); 15533 Tree.merge(TrueRegion); 15534 Tree.merge(FalseRegion); 15535 } 15536 15537 void VisitCallExpr(const CallExpr *CE) { 15538 // FIXME: CXXNewExpr and CXXDeleteExpr implicitly call functions. 15539 15540 if (CE->isUnevaluatedBuiltinCall(Context)) 15541 return; 15542 15543 // C++11 [intro.execution]p15: 15544 // When calling a function [...], every value computation and side effect 15545 // associated with any argument expression, or with the postfix expression 15546 // designating the called function, is sequenced before execution of every 15547 // expression or statement in the body of the function [and thus before 15548 // the value computation of its result]. 15549 SequencedSubexpression Sequenced(*this); 15550 SemaRef.runWithSufficientStackSpace(CE->getExprLoc(), [&] { 15551 // C++17 [expr.call]p5 15552 // The postfix-expression is sequenced before each expression in the 15553 // expression-list and any default argument. [...] 15554 SequenceTree::Seq CalleeRegion; 15555 SequenceTree::Seq OtherRegion; 15556 if (SemaRef.getLangOpts().CPlusPlus17) { 15557 CalleeRegion = Tree.allocate(Region); 15558 OtherRegion = Tree.allocate(Region); 15559 } else { 15560 CalleeRegion = Region; 15561 OtherRegion = Region; 15562 } 15563 SequenceTree::Seq OldRegion = Region; 15564 15565 // Visit the callee expression first. 15566 Region = CalleeRegion; 15567 if (SemaRef.getLangOpts().CPlusPlus17) { 15568 SequencedSubexpression Sequenced(*this); 15569 Visit(CE->getCallee()); 15570 } else { 15571 Visit(CE->getCallee()); 15572 } 15573 15574 // Then visit the argument expressions. 15575 Region = OtherRegion; 15576 for (const Expr *Argument : CE->arguments()) 15577 Visit(Argument); 15578 15579 Region = OldRegion; 15580 if (SemaRef.getLangOpts().CPlusPlus17) { 15581 Tree.merge(CalleeRegion); 15582 Tree.merge(OtherRegion); 15583 } 15584 }); 15585 } 15586 15587 void VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *CXXOCE) { 15588 // C++17 [over.match.oper]p2: 15589 // [...] the operator notation is first transformed to the equivalent 15590 // function-call notation as summarized in Table 12 (where @ denotes one 15591 // of the operators covered in the specified subclause). However, the 15592 // operands are sequenced in the order prescribed for the built-in 15593 // operator (Clause 8). 15594 // 15595 // From the above only overloaded binary operators and overloaded call 15596 // operators have sequencing rules in C++17 that we need to handle 15597 // separately. 15598 if (!SemaRef.getLangOpts().CPlusPlus17 || 15599 (CXXOCE->getNumArgs() != 2 && CXXOCE->getOperator() != OO_Call)) 15600 return VisitCallExpr(CXXOCE); 15601 15602 enum { 15603 NoSequencing, 15604 LHSBeforeRHS, 15605 RHSBeforeLHS, 15606 LHSBeforeRest 15607 } SequencingKind; 15608 switch (CXXOCE->getOperator()) { 15609 case OO_Equal: 15610 case OO_PlusEqual: 15611 case OO_MinusEqual: 15612 case OO_StarEqual: 15613 case OO_SlashEqual: 15614 case OO_PercentEqual: 15615 case OO_CaretEqual: 15616 case OO_AmpEqual: 15617 case OO_PipeEqual: 15618 case OO_LessLessEqual: 15619 case OO_GreaterGreaterEqual: 15620 SequencingKind = RHSBeforeLHS; 15621 break; 15622 15623 case OO_LessLess: 15624 case OO_GreaterGreater: 15625 case OO_AmpAmp: 15626 case OO_PipePipe: 15627 case OO_Comma: 15628 case OO_ArrowStar: 15629 case OO_Subscript: 15630 SequencingKind = LHSBeforeRHS; 15631 break; 15632 15633 case OO_Call: 15634 SequencingKind = LHSBeforeRest; 15635 break; 15636 15637 default: 15638 SequencingKind = NoSequencing; 15639 break; 15640 } 15641 15642 if (SequencingKind == NoSequencing) 15643 return VisitCallExpr(CXXOCE); 15644 15645 // This is a call, so all subexpressions are sequenced before the result. 15646 SequencedSubexpression Sequenced(*this); 15647 15648 SemaRef.runWithSufficientStackSpace(CXXOCE->getExprLoc(), [&] { 15649 assert(SemaRef.getLangOpts().CPlusPlus17 && 15650 "Should only get there with C++17 and above!"); 15651 assert((CXXOCE->getNumArgs() == 2 || CXXOCE->getOperator() == OO_Call) && 15652 "Should only get there with an overloaded binary operator" 15653 " or an overloaded call operator!"); 15654 15655 if (SequencingKind == LHSBeforeRest) { 15656 assert(CXXOCE->getOperator() == OO_Call && 15657 "We should only have an overloaded call operator here!"); 15658 15659 // This is very similar to VisitCallExpr, except that we only have the 15660 // C++17 case. The postfix-expression is the first argument of the 15661 // CXXOperatorCallExpr. The expressions in the expression-list, if any, 15662 // are in the following arguments. 15663 // 15664 // Note that we intentionally do not visit the callee expression since 15665 // it is just a decayed reference to a function. 15666 SequenceTree::Seq PostfixExprRegion = Tree.allocate(Region); 15667 SequenceTree::Seq ArgsRegion = Tree.allocate(Region); 15668 SequenceTree::Seq OldRegion = Region; 15669 15670 assert(CXXOCE->getNumArgs() >= 1 && 15671 "An overloaded call operator must have at least one argument" 15672 " for the postfix-expression!"); 15673 const Expr *PostfixExpr = CXXOCE->getArgs()[0]; 15674 llvm::ArrayRef<const Expr *> Args(CXXOCE->getArgs() + 1, 15675 CXXOCE->getNumArgs() - 1); 15676 15677 // Visit the postfix-expression first. 15678 { 15679 Region = PostfixExprRegion; 15680 SequencedSubexpression Sequenced(*this); 15681 Visit(PostfixExpr); 15682 } 15683 15684 // Then visit the argument expressions. 15685 Region = ArgsRegion; 15686 for (const Expr *Arg : Args) 15687 Visit(Arg); 15688 15689 Region = OldRegion; 15690 Tree.merge(PostfixExprRegion); 15691 Tree.merge(ArgsRegion); 15692 } else { 15693 assert(CXXOCE->getNumArgs() == 2 && 15694 "Should only have two arguments here!"); 15695 assert((SequencingKind == LHSBeforeRHS || 15696 SequencingKind == RHSBeforeLHS) && 15697 "Unexpected sequencing kind!"); 15698 15699 // We do not visit the callee expression since it is just a decayed 15700 // reference to a function. 15701 const Expr *E1 = CXXOCE->getArg(0); 15702 const Expr *E2 = CXXOCE->getArg(1); 15703 if (SequencingKind == RHSBeforeLHS) 15704 std::swap(E1, E2); 15705 15706 return VisitSequencedExpressions(E1, E2); 15707 } 15708 }); 15709 } 15710 15711 void VisitCXXConstructExpr(const CXXConstructExpr *CCE) { 15712 // This is a call, so all subexpressions are sequenced before the result. 15713 SequencedSubexpression Sequenced(*this); 15714 15715 if (!CCE->isListInitialization()) 15716 return VisitExpr(CCE); 15717 15718 // In C++11, list initializations are sequenced. 15719 SmallVector<SequenceTree::Seq, 32> Elts; 15720 SequenceTree::Seq Parent = Region; 15721 for (CXXConstructExpr::const_arg_iterator I = CCE->arg_begin(), 15722 E = CCE->arg_end(); 15723 I != E; ++I) { 15724 Region = Tree.allocate(Parent); 15725 Elts.push_back(Region); 15726 Visit(*I); 15727 } 15728 15729 // Forget that the initializers are sequenced. 15730 Region = Parent; 15731 for (unsigned I = 0; I < Elts.size(); ++I) 15732 Tree.merge(Elts[I]); 15733 } 15734 15735 void VisitInitListExpr(const InitListExpr *ILE) { 15736 if (!SemaRef.getLangOpts().CPlusPlus11) 15737 return VisitExpr(ILE); 15738 15739 // In C++11, list initializations are sequenced. 15740 SmallVector<SequenceTree::Seq, 32> Elts; 15741 SequenceTree::Seq Parent = Region; 15742 for (unsigned I = 0; I < ILE->getNumInits(); ++I) { 15743 const Expr *E = ILE->getInit(I); 15744 if (!E) 15745 continue; 15746 Region = Tree.allocate(Parent); 15747 Elts.push_back(Region); 15748 Visit(E); 15749 } 15750 15751 // Forget that the initializers are sequenced. 15752 Region = Parent; 15753 for (unsigned I = 0; I < Elts.size(); ++I) 15754 Tree.merge(Elts[I]); 15755 } 15756}; 15757 15758} // namespace 15759 15760void Sema::CheckUnsequencedOperations(const Expr *E) { 15761 SmallVector<const Expr *, 8> WorkList; 15762 WorkList.push_back(E); 15763 while (!WorkList.empty()) { 15764 const Expr *Item = WorkList.pop_back_val(); 15765 SequenceChecker(*this, Item, WorkList); 15766 } 15767} 15768 15769void Sema::CheckCompletedExpr(Expr *E, SourceLocation CheckLoc, 15770 bool IsConstexpr) { 15771 llvm::SaveAndRestore ConstantContext(isConstantEvaluatedOverride, 15772 IsConstexpr || isa<ConstantExpr>(E)); 15773 CheckImplicitConversions(E, CheckLoc); 15774 if (!E->isInstantiationDependent()) 15775 CheckUnsequencedOperations(E); 15776 if (!IsConstexpr && !E->isValueDependent()) 15777 CheckForIntOverflow(E); 15778 DiagnoseMisalignedMembers(); 15779} 15780 15781void Sema::CheckBitFieldInitialization(SourceLocation InitLoc, 15782 FieldDecl *BitField, 15783 Expr *Init) { 15784 (void) AnalyzeBitFieldAssignment(*this, BitField, Init, InitLoc); 15785} 15786 15787static void diagnoseArrayStarInParamType(Sema &S, QualType PType, 15788 SourceLocation Loc) { 15789 if (!PType->isVariablyModifiedType()) 15790 return; 15791 if (const auto *PointerTy = dyn_cast<PointerType>(PType)) { 15792 diagnoseArrayStarInParamType(S, PointerTy->getPointeeType(), Loc); 15793 return; 15794 } 15795 if (const auto *ReferenceTy = dyn_cast<ReferenceType>(PType)) { 15796 diagnoseArrayStarInParamType(S, ReferenceTy->getPointeeType(), Loc); 15797 return; 15798 } 15799 if (const auto *ParenTy = dyn_cast<ParenType>(PType)) { 15800 diagnoseArrayStarInParamType(S, ParenTy->getInnerType(), Loc); 15801 return; 15802 } 15803 15804 const ArrayType *AT = S.Context.getAsArrayType(PType); 15805 if (!AT) 15806 return; 15807 15808 if (AT->getSizeModifier() != ArrayType::Star) { 15809 diagnoseArrayStarInParamType(S, AT->getElementType(), Loc); 15810 return; 15811 } 15812 15813 S.Diag(Loc, diag::err_array_star_in_function_definition); 15814} 15815 15816/// CheckParmsForFunctionDef - Check that the parameters of the given 15817/// function are appropriate for the definition of a function. This 15818/// takes care of any checks that cannot be performed on the 15819/// declaration itself, e.g., that the types of each of the function 15820/// parameters are complete. 15821bool Sema::CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, 15822 bool CheckParameterNames) { 15823 bool HasInvalidParm = false; 15824 for (ParmVarDecl *Param : Parameters) { 15825 // C99 6.7.5.3p4: the parameters in a parameter type list in a 15826 // function declarator that is part of a function definition of 15827 // that function shall not have incomplete type. 15828 // 15829 // This is also C++ [dcl.fct]p6. 15830 if (!Param->isInvalidDecl() && 15831 RequireCompleteType(Param->getLocation(), Param->getType(), 15832 diag::err_typecheck_decl_incomplete_type)) { 15833 Param->setInvalidDecl(); 15834 HasInvalidParm = true; 15835 } 15836 15837 // C99 6.9.1p5: If the declarator includes a parameter type list, the 15838 // declaration of each parameter shall include an identifier. 15839 if (CheckParameterNames && Param->getIdentifier() == nullptr && 15840 !Param->isImplicit() && !getLangOpts().CPlusPlus) { 15841 // Diagnose this as an extension in C17 and earlier. 15842 if (!getLangOpts().C2x) 15843 Diag(Param->getLocation(), diag::ext_parameter_name_omitted_c2x); 15844 } 15845 15846 // C99 6.7.5.3p12: 15847 // If the function declarator is not part of a definition of that 15848 // function, parameters may have incomplete type and may use the [*] 15849 // notation in their sequences of declarator specifiers to specify 15850 // variable length array types. 15851 QualType PType = Param->getOriginalType(); 15852 // FIXME: This diagnostic should point the '[*]' if source-location 15853 // information is added for it. 15854 diagnoseArrayStarInParamType(*this, PType, Param->getLocation()); 15855 15856 // If the parameter is a c++ class type and it has to be destructed in the 15857 // callee function, declare the destructor so that it can be called by the 15858 // callee function. Do not perform any direct access check on the dtor here. 15859 if (!Param->isInvalidDecl()) { 15860 if (CXXRecordDecl *ClassDecl = Param->getType()->getAsCXXRecordDecl()) { 15861 if (!ClassDecl->isInvalidDecl() && 15862 !ClassDecl->hasIrrelevantDestructor() && 15863 !ClassDecl->isDependentContext() && 15864 ClassDecl->isParamDestroyedInCallee()) { 15865 CXXDestructorDecl *Destructor = LookupDestructor(ClassDecl); 15866 MarkFunctionReferenced(Param->getLocation(), Destructor); 15867 DiagnoseUseOfDecl(Destructor, Param->getLocation()); 15868 } 15869 } 15870 } 15871 15872 // Parameters with the pass_object_size attribute only need to be marked 15873 // constant at function definitions. Because we lack information about 15874 // whether we're on a declaration or definition when we're instantiating the 15875 // attribute, we need to check for constness here. 15876 if (const auto *Attr = Param->getAttr<PassObjectSizeAttr>()) 15877 if (!Param->getType().isConstQualified()) 15878 Diag(Param->getLocation(), diag::err_attribute_pointers_only) 15879 << Attr->getSpelling() << 1; 15880 15881 // Check for parameter names shadowing fields from the class. 15882 if (LangOpts.CPlusPlus && !Param->isInvalidDecl()) { 15883 // The owning context for the parameter should be the function, but we 15884 // want to see if this function's declaration context is a record. 15885 DeclContext *DC = Param->getDeclContext(); 15886 if (DC && DC->isFunctionOrMethod()) { 15887 if (auto *RD = dyn_cast<CXXRecordDecl>(DC->getParent())) 15888 CheckShadowInheritedFields(Param->getLocation(), Param->getDeclName(), 15889 RD, /*DeclIsField*/ false); 15890 } 15891 } 15892 } 15893 15894 return HasInvalidParm; 15895} 15896 15897std::optional<std::pair< 15898 CharUnits, CharUnits>> static getBaseAlignmentAndOffsetFromPtr(const Expr 15899 *E, 15900 ASTContext 15901 &Ctx); 15902 15903/// Compute the alignment and offset of the base class object given the 15904/// derived-to-base cast expression and the alignment and offset of the derived 15905/// class object. 15906static std::pair<CharUnits, CharUnits> 15907getDerivedToBaseAlignmentAndOffset(const CastExpr *CE, QualType DerivedType, 15908 CharUnits BaseAlignment, CharUnits Offset, 15909 ASTContext &Ctx) { 15910 for (auto PathI = CE->path_begin(), PathE = CE->path_end(); PathI != PathE; 15911 ++PathI) { 15912 const CXXBaseSpecifier *Base = *PathI; 15913 const CXXRecordDecl *BaseDecl = Base->getType()->getAsCXXRecordDecl(); 15914 if (Base->isVirtual()) { 15915 // The complete object may have a lower alignment than the non-virtual 15916 // alignment of the base, in which case the base may be misaligned. Choose 15917 // the smaller of the non-virtual alignment and BaseAlignment, which is a 15918 // conservative lower bound of the complete object alignment. 15919 CharUnits NonVirtualAlignment = 15920 Ctx.getASTRecordLayout(BaseDecl).getNonVirtualAlignment(); 15921 BaseAlignment = std::min(BaseAlignment, NonVirtualAlignment); 15922 Offset = CharUnits::Zero(); 15923 } else { 15924 const ASTRecordLayout &RL = 15925 Ctx.getASTRecordLayout(DerivedType->getAsCXXRecordDecl()); 15926 Offset += RL.getBaseClassOffset(BaseDecl); 15927 } 15928 DerivedType = Base->getType(); 15929 } 15930 15931 return std::make_pair(BaseAlignment, Offset); 15932} 15933 15934/// Compute the alignment and offset of a binary additive operator. 15935static std::optional<std::pair<CharUnits, CharUnits>> 15936getAlignmentAndOffsetFromBinAddOrSub(const Expr *PtrE, const Expr *IntE, 15937 bool IsSub, ASTContext &Ctx) { 15938 QualType PointeeType = PtrE->getType()->getPointeeType(); 15939 15940 if (!PointeeType->isConstantSizeType()) 15941 return std::nullopt; 15942 15943 auto P = getBaseAlignmentAndOffsetFromPtr(PtrE, Ctx); 15944 15945 if (!P) 15946 return std::nullopt; 15947 15948 CharUnits EltSize = Ctx.getTypeSizeInChars(PointeeType); 15949 if (std::optional<llvm::APSInt> IdxRes = IntE->getIntegerConstantExpr(Ctx)) { 15950 CharUnits Offset = EltSize * IdxRes->getExtValue(); 15951 if (IsSub) 15952 Offset = -Offset; 15953 return std::make_pair(P->first, P->second + Offset); 15954 } 15955 15956 // If the integer expression isn't a constant expression, compute the lower 15957 // bound of the alignment using the alignment and offset of the pointer 15958 // expression and the element size. 15959 return std::make_pair( 15960 P->first.alignmentAtOffset(P->second).alignmentAtOffset(EltSize), 15961 CharUnits::Zero()); 15962} 15963 15964/// This helper function takes an lvalue expression and returns the alignment of 15965/// a VarDecl and a constant offset from the VarDecl. 15966std::optional<std::pair< 15967 CharUnits, 15968 CharUnits>> static getBaseAlignmentAndOffsetFromLValue(const Expr *E, 15969 ASTContext &Ctx) { 15970 E = E->IgnoreParens(); 15971 switch (E->getStmtClass()) { 15972 default: 15973 break; 15974 case Stmt::CStyleCastExprClass: 15975 case Stmt::CXXStaticCastExprClass: 15976 case Stmt::ImplicitCastExprClass: { 15977 auto *CE = cast<CastExpr>(E); 15978 const Expr *From = CE->getSubExpr(); 15979 switch (CE->getCastKind()) { 15980 default: 15981 break; 15982 case CK_NoOp: 15983 return getBaseAlignmentAndOffsetFromLValue(From, Ctx); 15984 case CK_UncheckedDerivedToBase: 15985 case CK_DerivedToBase: { 15986 auto P = getBaseAlignmentAndOffsetFromLValue(From, Ctx); 15987 if (!P) 15988 break; 15989 return getDerivedToBaseAlignmentAndOffset(CE, From->getType(), P->first, 15990 P->second, Ctx); 15991 } 15992 } 15993 break; 15994 } 15995 case Stmt::ArraySubscriptExprClass: { 15996 auto *ASE = cast<ArraySubscriptExpr>(E); 15997 return getAlignmentAndOffsetFromBinAddOrSub(ASE->getBase(), ASE->getIdx(), 15998 false, Ctx); 15999 } 16000 case Stmt::DeclRefExprClass: { 16001 if (auto *VD = dyn_cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl())) { 16002 // FIXME: If VD is captured by copy or is an escaping __block variable, 16003 // use the alignment of VD's type. 16004 if (!VD->getType()->isReferenceType()) 16005 return std::make_pair(Ctx.getDeclAlign(VD), CharUnits::Zero()); 16006 if (VD->hasInit()) 16007 return getBaseAlignmentAndOffsetFromLValue(VD->getInit(), Ctx); 16008 } 16009 break; 16010 } 16011 case Stmt::MemberExprClass: { 16012 auto *ME = cast<MemberExpr>(E); 16013 auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl()); 16014 if (!FD || FD->getType()->isReferenceType() || 16015 FD->getParent()->isInvalidDecl()) 16016 break; 16017 std::optional<std::pair<CharUnits, CharUnits>> P; 16018 if (ME->isArrow()) 16019 P = getBaseAlignmentAndOffsetFromPtr(ME->getBase(), Ctx); 16020 else 16021 P = getBaseAlignmentAndOffsetFromLValue(ME->getBase(), Ctx); 16022 if (!P) 16023 break; 16024 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(FD->getParent()); 16025 uint64_t Offset = Layout.getFieldOffset(FD->getFieldIndex()); 16026 return std::make_pair(P->first, 16027 P->second + CharUnits::fromQuantity(Offset)); 16028 } 16029 case Stmt::UnaryOperatorClass: { 16030 auto *UO = cast<UnaryOperator>(E); 16031 switch (UO->getOpcode()) { 16032 default: 16033 break; 16034 case UO_Deref: 16035 return getBaseAlignmentAndOffsetFromPtr(UO->getSubExpr(), Ctx); 16036 } 16037 break; 16038 } 16039 case Stmt::BinaryOperatorClass: { 16040 auto *BO = cast<BinaryOperator>(E); 16041 auto Opcode = BO->getOpcode(); 16042 switch (Opcode) { 16043 default: 16044 break; 16045 case BO_Comma: 16046 return getBaseAlignmentAndOffsetFromLValue(BO->getRHS(), Ctx); 16047 } 16048 break; 16049 } 16050 } 16051 return std::nullopt; 16052} 16053 16054/// This helper function takes a pointer expression and returns the alignment of 16055/// a VarDecl and a constant offset from the VarDecl. 16056std::optional<std::pair< 16057 CharUnits, CharUnits>> static getBaseAlignmentAndOffsetFromPtr(const Expr 16058 *E, 16059 ASTContext 16060 &Ctx) { 16061 E = E->IgnoreParens(); 16062 switch (E->getStmtClass()) { 16063 default: 16064 break; 16065 case Stmt::CStyleCastExprClass: 16066 case Stmt::CXXStaticCastExprClass: 16067 case Stmt::ImplicitCastExprClass: { 16068 auto *CE = cast<CastExpr>(E); 16069 const Expr *From = CE->getSubExpr(); 16070 switch (CE->getCastKind()) { 16071 default: 16072 break; 16073 case CK_NoOp: 16074 return getBaseAlignmentAndOffsetFromPtr(From, Ctx); 16075 case CK_ArrayToPointerDecay: 16076 return getBaseAlignmentAndOffsetFromLValue(From, Ctx); 16077 case CK_UncheckedDerivedToBase: 16078 case CK_DerivedToBase: { 16079 auto P = getBaseAlignmentAndOffsetFromPtr(From, Ctx); 16080 if (!P) 16081 break; 16082 return getDerivedToBaseAlignmentAndOffset( 16083 CE, From->getType()->getPointeeType(), P->first, P->second, Ctx); 16084 } 16085 } 16086 break; 16087 } 16088 case Stmt::CXXThisExprClass: { 16089 auto *RD = E->getType()->getPointeeType()->getAsCXXRecordDecl(); 16090 CharUnits Alignment = Ctx.getASTRecordLayout(RD).getNonVirtualAlignment(); 16091 return std::make_pair(Alignment, CharUnits::Zero()); 16092 } 16093 case Stmt::UnaryOperatorClass: { 16094 auto *UO = cast<UnaryOperator>(E); 16095 if (UO->getOpcode() == UO_AddrOf) 16096 return getBaseAlignmentAndOffsetFromLValue(UO->getSubExpr(), Ctx); 16097 break; 16098 } 16099 case Stmt::BinaryOperatorClass: { 16100 auto *BO = cast<BinaryOperator>(E); 16101 auto Opcode = BO->getOpcode(); 16102 switch (Opcode) { 16103 default: 16104 break; 16105 case BO_Add: 16106 case BO_Sub: { 16107 const Expr *LHS = BO->getLHS(), *RHS = BO->getRHS(); 16108 if (Opcode == BO_Add && !RHS->getType()->isIntegralOrEnumerationType()) 16109 std::swap(LHS, RHS); 16110 return getAlignmentAndOffsetFromBinAddOrSub(LHS, RHS, Opcode == BO_Sub, 16111 Ctx); 16112 } 16113 case BO_Comma: 16114 return getBaseAlignmentAndOffsetFromPtr(BO->getRHS(), Ctx); 16115 } 16116 break; 16117 } 16118 } 16119 return std::nullopt; 16120} 16121 16122static CharUnits getPresumedAlignmentOfPointer(const Expr *E, Sema &S) { 16123 // See if we can compute the alignment of a VarDecl and an offset from it. 16124 std::optional<std::pair<CharUnits, CharUnits>> P = 16125 getBaseAlignmentAndOffsetFromPtr(E, S.Context); 16126 16127 if (P) 16128 return P->first.alignmentAtOffset(P->second); 16129 16130 // If that failed, return the type's alignment. 16131 return S.Context.getTypeAlignInChars(E->getType()->getPointeeType()); 16132} 16133 16134/// CheckCastAlign - Implements -Wcast-align, which warns when a 16135/// pointer cast increases the alignment requirements. 16136void Sema::CheckCastAlign(Expr *Op, QualType T, SourceRange TRange) { 16137 // This is actually a lot of work to potentially be doing on every 16138 // cast; don't do it if we're ignoring -Wcast_align (as is the default). 16139 if (getDiagnostics().isIgnored(diag::warn_cast_align, TRange.getBegin())) 16140 return; 16141 16142 // Ignore dependent types. 16143 if (T->isDependentType() || Op->getType()->isDependentType()) 16144 return; 16145 16146 // Require that the destination be a pointer type. 16147 const PointerType *DestPtr = T->getAs<PointerType>(); 16148 if (!DestPtr) return; 16149 16150 // If the destination has alignment 1, we're done. 16151 QualType DestPointee = DestPtr->getPointeeType(); 16152 if (DestPointee->isIncompleteType()) return; 16153 CharUnits DestAlign = Context.getTypeAlignInChars(DestPointee); 16154 if (DestAlign.isOne()) return; 16155 16156 // Require that the source be a pointer type. 16157 const PointerType *SrcPtr = Op->getType()->getAs<PointerType>(); 16158 if (!SrcPtr) return; 16159 QualType SrcPointee = SrcPtr->getPointeeType(); 16160 16161 // Explicitly allow casts from cv void*. We already implicitly 16162 // allowed casts to cv void*, since they have alignment 1. 16163 // Also allow casts involving incomplete types, which implicitly 16164 // includes 'void'. 16165 if (SrcPointee->isIncompleteType()) return; 16166 16167 CharUnits SrcAlign = getPresumedAlignmentOfPointer(Op, *this); 16168 16169 if (SrcAlign >= DestAlign) return; 16170 16171 Diag(TRange.getBegin(), diag::warn_cast_align) 16172 << Op->getType() << T 16173 << static_cast<unsigned>(SrcAlign.getQuantity()) 16174 << static_cast<unsigned>(DestAlign.getQuantity()) 16175 << TRange << Op->getSourceRange(); 16176} 16177 16178void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, 16179 const ArraySubscriptExpr *ASE, 16180 bool AllowOnePastEnd, bool IndexNegated) { 16181 // Already diagnosed by the constant evaluator. 16182 if (isConstantEvaluated()) 16183 return; 16184 16185 IndexExpr = IndexExpr->IgnoreParenImpCasts(); 16186 if (IndexExpr->isValueDependent()) 16187 return; 16188 16189 const Type *EffectiveType = 16190 BaseExpr->getType()->getPointeeOrArrayElementType(); 16191 BaseExpr = BaseExpr->IgnoreParenCasts(); 16192 const ConstantArrayType *ArrayTy = 16193 Context.getAsConstantArrayType(BaseExpr->getType()); 16194 16195 LangOptions::StrictFlexArraysLevelKind 16196 StrictFlexArraysLevel = getLangOpts().getStrictFlexArraysLevel(); 16197 16198 const Type *BaseType = 16199 ArrayTy == nullptr ? nullptr : ArrayTy->getElementType().getTypePtr(); 16200 bool IsUnboundedArray = 16201 BaseType == nullptr || BaseExpr->isFlexibleArrayMemberLike( 16202 Context, StrictFlexArraysLevel, 16203 /*IgnoreTemplateOrMacroSubstitution=*/true); 16204 if (EffectiveType->isDependentType() || 16205 (!IsUnboundedArray && BaseType->isDependentType())) 16206 return; 16207 16208 Expr::EvalResult Result; 16209 if (!IndexExpr->EvaluateAsInt(Result, Context, Expr::SE_AllowSideEffects)) 16210 return; 16211 16212 llvm::APSInt index = Result.Val.getInt(); 16213 if (IndexNegated) { 16214 index.setIsUnsigned(false); 16215 index = -index; 16216 } 16217 16218 if (IsUnboundedArray) { 16219 if (EffectiveType->isFunctionType()) 16220 return; 16221 if (index.isUnsigned() || !index.isNegative()) { 16222 const auto &ASTC = getASTContext(); 16223 unsigned AddrBits = ASTC.getTargetInfo().getPointerWidth( 16224 EffectiveType->getCanonicalTypeInternal().getAddressSpace()); 16225 if (index.getBitWidth() < AddrBits) 16226 index = index.zext(AddrBits); 16227 std::optional<CharUnits> ElemCharUnits = 16228 ASTC.getTypeSizeInCharsIfKnown(EffectiveType); 16229 // PR50741 - If EffectiveType has unknown size (e.g., if it's a void 16230 // pointer) bounds-checking isn't meaningful. 16231 if (!ElemCharUnits) 16232 return; 16233 llvm::APInt ElemBytes(index.getBitWidth(), ElemCharUnits->getQuantity()); 16234 // If index has more active bits than address space, we already know 16235 // we have a bounds violation to warn about. Otherwise, compute 16236 // address of (index + 1)th element, and warn about bounds violation 16237 // only if that address exceeds address space. 16238 if (index.getActiveBits() <= AddrBits) { 16239 bool Overflow; 16240 llvm::APInt Product(index); 16241 Product += 1; 16242 Product = Product.umul_ov(ElemBytes, Overflow); 16243 if (!Overflow && Product.getActiveBits() <= AddrBits) 16244 return; 16245 } 16246 16247 // Need to compute max possible elements in address space, since that 16248 // is included in diag message. 16249 llvm::APInt MaxElems = llvm::APInt::getMaxValue(AddrBits); 16250 MaxElems = MaxElems.zext(std::max(AddrBits + 1, ElemBytes.getBitWidth())); 16251 MaxElems += 1; 16252 ElemBytes = ElemBytes.zextOrTrunc(MaxElems.getBitWidth()); 16253 MaxElems = MaxElems.udiv(ElemBytes); 16254 16255 unsigned DiagID = 16256 ASE ? diag::warn_array_index_exceeds_max_addressable_bounds 16257 : diag::warn_ptr_arith_exceeds_max_addressable_bounds; 16258 16259 // Diag message shows element size in bits and in "bytes" (platform- 16260 // dependent CharUnits) 16261 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 16262 PDiag(DiagID) 16263 << toString(index, 10, true) << AddrBits 16264 << (unsigned)ASTC.toBits(*ElemCharUnits) 16265 << toString(ElemBytes, 10, false) 16266 << toString(MaxElems, 10, false) 16267 << (unsigned)MaxElems.getLimitedValue(~0U) 16268 << IndexExpr->getSourceRange()); 16269 16270 const NamedDecl *ND = nullptr; 16271 // Try harder to find a NamedDecl to point at in the note. 16272 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(BaseExpr)) 16273 BaseExpr = ASE->getBase()->IgnoreParenCasts(); 16274 if (const auto *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 16275 ND = DRE->getDecl(); 16276 if (const auto *ME = dyn_cast<MemberExpr>(BaseExpr)) 16277 ND = ME->getMemberDecl(); 16278 16279 if (ND) 16280 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr, 16281 PDiag(diag::note_array_declared_here) << ND); 16282 } 16283 return; 16284 } 16285 16286 if (index.isUnsigned() || !index.isNegative()) { 16287 // It is possible that the type of the base expression after 16288 // IgnoreParenCasts is incomplete, even though the type of the base 16289 // expression before IgnoreParenCasts is complete (see PR39746 for an 16290 // example). In this case we have no information about whether the array 16291 // access exceeds the array bounds. However we can still diagnose an array 16292 // access which precedes the array bounds. 16293 if (BaseType->isIncompleteType()) 16294 return; 16295 16296 llvm::APInt size = ArrayTy->getSize(); 16297 16298 if (BaseType != EffectiveType) { 16299 // Make sure we're comparing apples to apples when comparing index to 16300 // size. 16301 uint64_t ptrarith_typesize = Context.getTypeSize(EffectiveType); 16302 uint64_t array_typesize = Context.getTypeSize(BaseType); 16303 16304 // Handle ptrarith_typesize being zero, such as when casting to void*. 16305 // Use the size in bits (what "getTypeSize()" returns) rather than bytes. 16306 if (!ptrarith_typesize) 16307 ptrarith_typesize = Context.getCharWidth(); 16308 16309 if (ptrarith_typesize != array_typesize) { 16310 // There's a cast to a different size type involved. 16311 uint64_t ratio = array_typesize / ptrarith_typesize; 16312 16313 // TODO: Be smarter about handling cases where array_typesize is not a 16314 // multiple of ptrarith_typesize. 16315 if (ptrarith_typesize * ratio == array_typesize) 16316 size *= llvm::APInt(size.getBitWidth(), ratio); 16317 } 16318 } 16319 16320 if (size.getBitWidth() > index.getBitWidth()) 16321 index = index.zext(size.getBitWidth()); 16322 else if (size.getBitWidth() < index.getBitWidth()) 16323 size = size.zext(index.getBitWidth()); 16324 16325 // For array subscripting the index must be less than size, but for pointer 16326 // arithmetic also allow the index (offset) to be equal to size since 16327 // computing the next address after the end of the array is legal and 16328 // commonly done e.g. in C++ iterators and range-based for loops. 16329 if (AllowOnePastEnd ? index.ule(size) : index.ult(size)) 16330 return; 16331 16332 // Suppress the warning if the subscript expression (as identified by the 16333 // ']' location) and the index expression are both from macro expansions 16334 // within a system header. 16335 if (ASE) { 16336 SourceLocation RBracketLoc = SourceMgr.getSpellingLoc( 16337 ASE->getRBracketLoc()); 16338 if (SourceMgr.isInSystemHeader(RBracketLoc)) { 16339 SourceLocation IndexLoc = 16340 SourceMgr.getSpellingLoc(IndexExpr->getBeginLoc()); 16341 if (SourceMgr.isWrittenInSameFile(RBracketLoc, IndexLoc)) 16342 return; 16343 } 16344 } 16345 16346 unsigned DiagID = ASE ? diag::warn_array_index_exceeds_bounds 16347 : diag::warn_ptr_arith_exceeds_bounds; 16348 unsigned CastMsg = (!ASE || BaseType == EffectiveType) ? 0 : 1; 16349 QualType CastMsgTy = ASE ? ASE->getLHS()->getType() : QualType(); 16350 16351 DiagRuntimeBehavior( 16352 BaseExpr->getBeginLoc(), BaseExpr, 16353 PDiag(DiagID) << toString(index, 10, true) << ArrayTy->desugar() 16354 << CastMsg << CastMsgTy << IndexExpr->getSourceRange()); 16355 } else { 16356 unsigned DiagID = diag::warn_array_index_precedes_bounds; 16357 if (!ASE) { 16358 DiagID = diag::warn_ptr_arith_precedes_bounds; 16359 if (index.isNegative()) index = -index; 16360 } 16361 16362 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, 16363 PDiag(DiagID) << toString(index, 10, true) 16364 << IndexExpr->getSourceRange()); 16365 } 16366 16367 const NamedDecl *ND = nullptr; 16368 // Try harder to find a NamedDecl to point at in the note. 16369 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(BaseExpr)) 16370 BaseExpr = ASE->getBase()->IgnoreParenCasts(); 16371 if (const auto *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) 16372 ND = DRE->getDecl(); 16373 if (const auto *ME = dyn_cast<MemberExpr>(BaseExpr)) 16374 ND = ME->getMemberDecl(); 16375 16376 if (ND) 16377 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr, 16378 PDiag(diag::note_array_declared_here) << ND); 16379} 16380 16381void Sema::CheckArrayAccess(const Expr *expr) { 16382 int AllowOnePastEnd = 0; 16383 while (expr) { 16384 expr = expr->IgnoreParenImpCasts(); 16385 switch (expr->getStmtClass()) { 16386 case Stmt::ArraySubscriptExprClass: { 16387 const ArraySubscriptExpr *ASE = cast<ArraySubscriptExpr>(expr); 16388 CheckArrayAccess(ASE->getBase(), ASE->getIdx(), ASE, 16389 AllowOnePastEnd > 0); 16390 expr = ASE->getBase(); 16391 break; 16392 } 16393 case Stmt::MemberExprClass: { 16394 expr = cast<MemberExpr>(expr)->getBase(); 16395 break; 16396 } 16397 case Stmt::OMPArraySectionExprClass: { 16398 const OMPArraySectionExpr *ASE = cast<OMPArraySectionExpr>(expr); 16399 if (ASE->getLowerBound()) 16400 CheckArrayAccess(ASE->getBase(), ASE->getLowerBound(), 16401 /*ASE=*/nullptr, AllowOnePastEnd > 0); 16402 return; 16403 } 16404 case Stmt::UnaryOperatorClass: { 16405 // Only unwrap the * and & unary operators 16406 const UnaryOperator *UO = cast<UnaryOperator>(expr); 16407 expr = UO->getSubExpr(); 16408 switch (UO->getOpcode()) { 16409 case UO_AddrOf: 16410 AllowOnePastEnd++; 16411 break; 16412 case UO_Deref: 16413 AllowOnePastEnd--; 16414 break; 16415 default: 16416 return; 16417 } 16418 break; 16419 } 16420 case Stmt::ConditionalOperatorClass: { 16421 const ConditionalOperator *cond = cast<ConditionalOperator>(expr); 16422 if (const Expr *lhs = cond->getLHS()) 16423 CheckArrayAccess(lhs); 16424 if (const Expr *rhs = cond->getRHS()) 16425 CheckArrayAccess(rhs); 16426 return; 16427 } 16428 case Stmt::CXXOperatorCallExprClass: { 16429 const auto *OCE = cast<CXXOperatorCallExpr>(expr); 16430 for (const auto *Arg : OCE->arguments()) 16431 CheckArrayAccess(Arg); 16432 return; 16433 } 16434 default: 16435 return; 16436 } 16437 } 16438} 16439 16440//===--- CHECK: Objective-C retain cycles ----------------------------------// 16441 16442namespace { 16443 16444struct RetainCycleOwner { 16445 VarDecl *Variable = nullptr; 16446 SourceRange Range; 16447 SourceLocation Loc; 16448 bool Indirect = false; 16449 16450 RetainCycleOwner() = default; 16451 16452 void setLocsFrom(Expr *e) { 16453 Loc = e->getExprLoc(); 16454 Range = e->getSourceRange(); 16455 } 16456}; 16457 16458} // namespace 16459 16460/// Consider whether capturing the given variable can possibly lead to 16461/// a retain cycle. 16462static bool considerVariable(VarDecl *var, Expr *ref, RetainCycleOwner &owner) { 16463 // In ARC, it's captured strongly iff the variable has __strong 16464 // lifetime. In MRR, it's captured strongly if the variable is 16465 // __block and has an appropriate type. 16466 if (var->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 16467 return false; 16468 16469 owner.Variable = var; 16470 if (ref) 16471 owner.setLocsFrom(ref); 16472 return true; 16473} 16474 16475static bool findRetainCycleOwner(Sema &S, Expr *e, RetainCycleOwner &owner) { 16476 while (true) { 16477 e = e->IgnoreParens(); 16478 if (CastExpr *cast = dyn_cast<CastExpr>(e)) { 16479 switch (cast->getCastKind()) { 16480 case CK_BitCast: 16481 case CK_LValueBitCast: 16482 case CK_LValueToRValue: 16483 case CK_ARCReclaimReturnedObject: 16484 e = cast->getSubExpr(); 16485 continue; 16486 16487 default: 16488 return false; 16489 } 16490 } 16491 16492 if (ObjCIvarRefExpr *ref = dyn_cast<ObjCIvarRefExpr>(e)) { 16493 ObjCIvarDecl *ivar = ref->getDecl(); 16494 if (ivar->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 16495 return false; 16496 16497 // Try to find a retain cycle in the base. 16498 if (!findRetainCycleOwner(S, ref->getBase(), owner)) 16499 return false; 16500 16501 if (ref->isFreeIvar()) owner.setLocsFrom(ref); 16502 owner.Indirect = true; 16503 return true; 16504 } 16505 16506 if (DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e)) { 16507 VarDecl *var = dyn_cast<VarDecl>(ref->getDecl()); 16508 if (!var) return false; 16509 return considerVariable(var, ref, owner); 16510 } 16511 16512 if (MemberExpr *member = dyn_cast<MemberExpr>(e)) { 16513 if (member->isArrow()) return false; 16514 16515 // Don't count this as an indirect ownership. 16516 e = member->getBase(); 16517 continue; 16518 } 16519 16520 if (PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) { 16521 // Only pay attention to pseudo-objects on property references. 16522 ObjCPropertyRefExpr *pre 16523 = dyn_cast<ObjCPropertyRefExpr>(pseudo->getSyntacticForm() 16524 ->IgnoreParens()); 16525 if (!pre) return false; 16526 if (pre->isImplicitProperty()) return false; 16527 ObjCPropertyDecl *property = pre->getExplicitProperty(); 16528 if (!property->isRetaining() && 16529 !(property->getPropertyIvarDecl() && 16530 property->getPropertyIvarDecl()->getType() 16531 .getObjCLifetime() == Qualifiers::OCL_Strong)) 16532 return false; 16533 16534 owner.Indirect = true; 16535 if (pre->isSuperReceiver()) { 16536 owner.Variable = S.getCurMethodDecl()->getSelfDecl(); 16537 if (!owner.Variable) 16538 return false; 16539 owner.Loc = pre->getLocation(); 16540 owner.Range = pre->getSourceRange(); 16541 return true; 16542 } 16543 e = const_cast<Expr*>(cast<OpaqueValueExpr>(pre->getBase()) 16544 ->getSourceExpr()); 16545 continue; 16546 } 16547 16548 // Array ivars? 16549 16550 return false; 16551 } 16552} 16553 16554namespace { 16555 16556 struct FindCaptureVisitor : EvaluatedExprVisitor<FindCaptureVisitor> { 16557 ASTContext &Context; 16558 VarDecl *Variable; 16559 Expr *Capturer = nullptr; 16560 bool VarWillBeReased = false; 16561 16562 FindCaptureVisitor(ASTContext &Context, VarDecl *variable) 16563 : EvaluatedExprVisitor<FindCaptureVisitor>(Context), 16564 Context(Context), Variable(variable) {} 16565 16566 void VisitDeclRefExpr(DeclRefExpr *ref) { 16567 if (ref->getDecl() == Variable && !Capturer) 16568 Capturer = ref; 16569 } 16570 16571 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *ref) { 16572 if (Capturer) return; 16573 Visit(ref->getBase()); 16574 if (Capturer && ref->isFreeIvar()) 16575 Capturer = ref; 16576 } 16577 16578 void VisitBlockExpr(BlockExpr *block) { 16579 // Look inside nested blocks 16580 if (block->getBlockDecl()->capturesVariable(Variable)) 16581 Visit(block->getBlockDecl()->getBody()); 16582 } 16583 16584 void VisitOpaqueValueExpr(OpaqueValueExpr *OVE) { 16585 if (Capturer) return; 16586 if (OVE->getSourceExpr()) 16587 Visit(OVE->getSourceExpr()); 16588 } 16589 16590 void VisitBinaryOperator(BinaryOperator *BinOp) { 16591 if (!Variable || VarWillBeReased || BinOp->getOpcode() != BO_Assign) 16592 return; 16593 Expr *LHS = BinOp->getLHS(); 16594 if (const DeclRefExpr *DRE = dyn_cast_or_null<DeclRefExpr>(LHS)) { 16595 if (DRE->getDecl() != Variable) 16596 return; 16597 if (Expr *RHS = BinOp->getRHS()) { 16598 RHS = RHS->IgnoreParenCasts(); 16599 std::optional<llvm::APSInt> Value; 16600 VarWillBeReased = 16601 (RHS && (Value = RHS->getIntegerConstantExpr(Context)) && 16602 *Value == 0); 16603 } 16604 } 16605 } 16606 }; 16607 16608} // namespace 16609 16610/// Check whether the given argument is a block which captures a 16611/// variable. 16612static Expr *findCapturingExpr(Sema &S, Expr *e, RetainCycleOwner &owner) { 16613 assert(owner.Variable && owner.Loc.isValid()); 16614 16615 e = e->IgnoreParenCasts(); 16616 16617 // Look through [^{...} copy] and Block_copy(^{...}). 16618 if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(e)) { 16619 Selector Cmd = ME->getSelector(); 16620 if (Cmd.isUnarySelector() && Cmd.getNameForSlot(0) == "copy") { 16621 e = ME->getInstanceReceiver(); 16622 if (!e) 16623 return nullptr; 16624 e = e->IgnoreParenCasts(); 16625 } 16626 } else if (CallExpr *CE = dyn_cast<CallExpr>(e)) { 16627 if (CE->getNumArgs() == 1) { 16628 FunctionDecl *Fn = dyn_cast_or_null<FunctionDecl>(CE->getCalleeDecl()); 16629 if (Fn) { 16630 const IdentifierInfo *FnI = Fn->getIdentifier(); 16631 if (FnI && FnI->isStr("_Block_copy")) { 16632 e = CE->getArg(0)->IgnoreParenCasts(); 16633 } 16634 } 16635 } 16636 } 16637 16638 BlockExpr *block = dyn_cast<BlockExpr>(e); 16639 if (!block || !block->getBlockDecl()->capturesVariable(owner.Variable)) 16640 return nullptr; 16641 16642 FindCaptureVisitor visitor(S.Context, owner.Variable); 16643 visitor.Visit(block->getBlockDecl()->getBody()); 16644 return visitor.VarWillBeReased ? nullptr : visitor.Capturer; 16645} 16646 16647static void diagnoseRetainCycle(Sema &S, Expr *capturer, 16648 RetainCycleOwner &owner) { 16649 assert(capturer); 16650 assert(owner.Variable && owner.Loc.isValid()); 16651 16652 S.Diag(capturer->getExprLoc(), diag::warn_arc_retain_cycle) 16653 << owner.Variable << capturer->getSourceRange(); 16654 S.Diag(owner.Loc, diag::note_arc_retain_cycle_owner) 16655 << owner.Indirect << owner.Range; 16656} 16657 16658/// Check for a keyword selector that starts with the word 'add' or 16659/// 'set'. 16660static bool isSetterLikeSelector(Selector sel) { 16661 if (sel.isUnarySelector()) return false; 16662 16663 StringRef str = sel.getNameForSlot(0); 16664 while (!str.empty() && str.front() == '_') str = str.substr(1); 16665 if (str.startswith("set")) 16666 str = str.substr(3); 16667 else if (str.startswith("add")) { 16668 // Specially allow 'addOperationWithBlock:'. 16669 if (sel.getNumArgs() == 1 && str.startswith("addOperationWithBlock")) 16670 return false; 16671 str = str.substr(3); 16672 } 16673 else 16674 return false; 16675 16676 if (str.empty()) return true; 16677 return !isLowercase(str.front()); 16678} 16679 16680static std::optional<int> 16681GetNSMutableArrayArgumentIndex(Sema &S, ObjCMessageExpr *Message) { 16682 bool IsMutableArray = S.NSAPIObj->isSubclassOfNSClass( 16683 Message->getReceiverInterface(), 16684 NSAPI::ClassId_NSMutableArray); 16685 if (!IsMutableArray) { 16686 return std::nullopt; 16687 } 16688 16689 Selector Sel = Message->getSelector(); 16690 16691 std::optional<NSAPI::NSArrayMethodKind> MKOpt = 16692 S.NSAPIObj->getNSArrayMethodKind(Sel); 16693 if (!MKOpt) { 16694 return std::nullopt; 16695 } 16696 16697 NSAPI::NSArrayMethodKind MK = *MKOpt; 16698 16699 switch (MK) { 16700 case NSAPI::NSMutableArr_addObject: 16701 case NSAPI::NSMutableArr_insertObjectAtIndex: 16702 case NSAPI::NSMutableArr_setObjectAtIndexedSubscript: 16703 return 0; 16704 case NSAPI::NSMutableArr_replaceObjectAtIndex: 16705 return 1; 16706 16707 default: 16708 return std::nullopt; 16709 } 16710 16711 return std::nullopt; 16712} 16713 16714static std::optional<int> 16715GetNSMutableDictionaryArgumentIndex(Sema &S, ObjCMessageExpr *Message) { 16716 bool IsMutableDictionary = S.NSAPIObj->isSubclassOfNSClass( 16717 Message->getReceiverInterface(), 16718 NSAPI::ClassId_NSMutableDictionary); 16719 if (!IsMutableDictionary) { 16720 return std::nullopt; 16721 } 16722 16723 Selector Sel = Message->getSelector(); 16724 16725 std::optional<NSAPI::NSDictionaryMethodKind> MKOpt = 16726 S.NSAPIObj->getNSDictionaryMethodKind(Sel); 16727 if (!MKOpt) { 16728 return std::nullopt; 16729 } 16730 16731 NSAPI::NSDictionaryMethodKind MK = *MKOpt; 16732 16733 switch (MK) { 16734 case NSAPI::NSMutableDict_setObjectForKey: 16735 case NSAPI::NSMutableDict_setValueForKey: 16736 case NSAPI::NSMutableDict_setObjectForKeyedSubscript: 16737 return 0; 16738 16739 default: 16740 return std::nullopt; 16741 } 16742 16743 return std::nullopt; 16744} 16745 16746static std::optional<int> GetNSSetArgumentIndex(Sema &S, 16747 ObjCMessageExpr *Message) { 16748 bool IsMutableSet = S.NSAPIObj->isSubclassOfNSClass( 16749 Message->getReceiverInterface(), 16750 NSAPI::ClassId_NSMutableSet); 16751 16752 bool IsMutableOrderedSet = S.NSAPIObj->isSubclassOfNSClass( 16753 Message->getReceiverInterface(), 16754 NSAPI::ClassId_NSMutableOrderedSet); 16755 if (!IsMutableSet && !IsMutableOrderedSet) { 16756 return std::nullopt; 16757 } 16758 16759 Selector Sel = Message->getSelector(); 16760 16761 std::optional<NSAPI::NSSetMethodKind> MKOpt = 16762 S.NSAPIObj->getNSSetMethodKind(Sel); 16763 if (!MKOpt) { 16764 return std::nullopt; 16765 } 16766 16767 NSAPI::NSSetMethodKind MK = *MKOpt; 16768 16769 switch (MK) { 16770 case NSAPI::NSMutableSet_addObject: 16771 case NSAPI::NSOrderedSet_setObjectAtIndex: 16772 case NSAPI::NSOrderedSet_setObjectAtIndexedSubscript: 16773 case NSAPI::NSOrderedSet_insertObjectAtIndex: 16774 return 0; 16775 case NSAPI::NSOrderedSet_replaceObjectAtIndexWithObject: 16776 return 1; 16777 } 16778 16779 return std::nullopt; 16780} 16781 16782void Sema::CheckObjCCircularContainer(ObjCMessageExpr *Message) { 16783 if (!Message->isInstanceMessage()) { 16784 return; 16785 } 16786 16787 std::optional<int> ArgOpt; 16788 16789 if (!(ArgOpt = GetNSMutableArrayArgumentIndex(*this, Message)) && 16790 !(ArgOpt = GetNSMutableDictionaryArgumentIndex(*this, Message)) && 16791 !(ArgOpt = GetNSSetArgumentIndex(*this, Message))) { 16792 return; 16793 } 16794 16795 int ArgIndex = *ArgOpt; 16796 16797 Expr *Arg = Message->getArg(ArgIndex)->IgnoreImpCasts(); 16798 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Arg)) { 16799 Arg = OE->getSourceExpr()->IgnoreImpCasts(); 16800 } 16801 16802 if (Message->getReceiverKind() == ObjCMessageExpr::SuperInstance) { 16803 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 16804 if (ArgRE->isObjCSelfExpr()) { 16805 Diag(Message->getSourceRange().getBegin(), 16806 diag::warn_objc_circular_container) 16807 << ArgRE->getDecl() << StringRef("'super'"); 16808 } 16809 } 16810 } else { 16811 Expr *Receiver = Message->getInstanceReceiver()->IgnoreImpCasts(); 16812 16813 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Receiver)) { 16814 Receiver = OE->getSourceExpr()->IgnoreImpCasts(); 16815 } 16816 16817 if (DeclRefExpr *ReceiverRE = dyn_cast<DeclRefExpr>(Receiver)) { 16818 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { 16819 if (ReceiverRE->getDecl() == ArgRE->getDecl()) { 16820 ValueDecl *Decl = ReceiverRE->getDecl(); 16821 Diag(Message->getSourceRange().getBegin(), 16822 diag::warn_objc_circular_container) 16823 << Decl << Decl; 16824 if (!ArgRE->isObjCSelfExpr()) { 16825 Diag(Decl->getLocation(), 16826 diag::note_objc_circular_container_declared_here) 16827 << Decl; 16828 } 16829 } 16830 } 16831 } else if (ObjCIvarRefExpr *IvarRE = dyn_cast<ObjCIvarRefExpr>(Receiver)) { 16832 if (ObjCIvarRefExpr *IvarArgRE = dyn_cast<ObjCIvarRefExpr>(Arg)) { 16833 if (IvarRE->getDecl() == IvarArgRE->getDecl()) { 16834 ObjCIvarDecl *Decl = IvarRE->getDecl(); 16835 Diag(Message->getSourceRange().getBegin(), 16836 diag::warn_objc_circular_container) 16837 << Decl << Decl; 16838 Diag(Decl->getLocation(), 16839 diag::note_objc_circular_container_declared_here) 16840 << Decl; 16841 } 16842 } 16843 } 16844 } 16845} 16846 16847/// Check a message send to see if it's likely to cause a retain cycle. 16848void Sema::checkRetainCycles(ObjCMessageExpr *msg) { 16849 // Only check instance methods whose selector looks like a setter. 16850 if (!msg->isInstanceMessage() || !isSetterLikeSelector(msg->getSelector())) 16851 return; 16852 16853 // Try to find a variable that the receiver is strongly owned by. 16854 RetainCycleOwner owner; 16855 if (msg->getReceiverKind() == ObjCMessageExpr::Instance) { 16856 if (!findRetainCycleOwner(*this, msg->getInstanceReceiver(), owner)) 16857 return; 16858 } else { 16859 assert(msg->getReceiverKind() == ObjCMessageExpr::SuperInstance); 16860 owner.Variable = getCurMethodDecl()->getSelfDecl(); 16861 owner.Loc = msg->getSuperLoc(); 16862 owner.Range = msg->getSuperLoc(); 16863 } 16864 16865 // Check whether the receiver is captured by any of the arguments. 16866 const ObjCMethodDecl *MD = msg->getMethodDecl(); 16867 for (unsigned i = 0, e = msg->getNumArgs(); i != e; ++i) { 16868 if (Expr *capturer = findCapturingExpr(*this, msg->getArg(i), owner)) { 16869 // noescape blocks should not be retained by the method. 16870 if (MD && MD->parameters()[i]->hasAttr<NoEscapeAttr>()) 16871 continue; 16872 return diagnoseRetainCycle(*this, capturer, owner); 16873 } 16874 } 16875} 16876 16877/// Check a property assign to see if it's likely to cause a retain cycle. 16878void Sema::checkRetainCycles(Expr *receiver, Expr *argument) { 16879 RetainCycleOwner owner; 16880 if (!findRetainCycleOwner(*this, receiver, owner)) 16881 return; 16882 16883 if (Expr *capturer = findCapturingExpr(*this, argument, owner)) 16884 diagnoseRetainCycle(*this, capturer, owner); 16885} 16886 16887void Sema::checkRetainCycles(VarDecl *Var, Expr *Init) { 16888 RetainCycleOwner Owner; 16889 if (!considerVariable(Var, /*DeclRefExpr=*/nullptr, Owner)) 16890 return; 16891 16892 // Because we don't have an expression for the variable, we have to set the 16893 // location explicitly here. 16894 Owner.Loc = Var->getLocation(); 16895 Owner.Range = Var->getSourceRange(); 16896 16897 if (Expr *Capturer = findCapturingExpr(*this, Init, Owner)) 16898 diagnoseRetainCycle(*this, Capturer, Owner); 16899} 16900 16901static bool checkUnsafeAssignLiteral(Sema &S, SourceLocation Loc, 16902 Expr *RHS, bool isProperty) { 16903 // Check if RHS is an Objective-C object literal, which also can get 16904 // immediately zapped in a weak reference. Note that we explicitly 16905 // allow ObjCStringLiterals, since those are designed to never really die. 16906 RHS = RHS->IgnoreParenImpCasts(); 16907 16908 // This enum needs to match with the 'select' in 16909 // warn_objc_arc_literal_assign (off-by-1). 16910 Sema::ObjCLiteralKind Kind = S.CheckLiteralKind(RHS); 16911 if (Kind == Sema::LK_String || Kind == Sema::LK_None) 16912 return false; 16913 16914 S.Diag(Loc, diag::warn_arc_literal_assign) 16915 << (unsigned) Kind 16916 << (isProperty ? 0 : 1) 16917 << RHS->getSourceRange(); 16918 16919 return true; 16920} 16921 16922static bool checkUnsafeAssignObject(Sema &S, SourceLocation Loc, 16923 Qualifiers::ObjCLifetime LT, 16924 Expr *RHS, bool isProperty) { 16925 // Strip off any implicit cast added to get to the one ARC-specific. 16926 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 16927 if (cast->getCastKind() == CK_ARCConsumeObject) { 16928 S.Diag(Loc, diag::warn_arc_retained_assign) 16929 << (LT == Qualifiers::OCL_ExplicitNone) 16930 << (isProperty ? 0 : 1) 16931 << RHS->getSourceRange(); 16932 return true; 16933 } 16934 RHS = cast->getSubExpr(); 16935 } 16936 16937 if (LT == Qualifiers::OCL_Weak && 16938 checkUnsafeAssignLiteral(S, Loc, RHS, isProperty)) 16939 return true; 16940 16941 return false; 16942} 16943 16944bool Sema::checkUnsafeAssigns(SourceLocation Loc, 16945 QualType LHS, Expr *RHS) { 16946 Qualifiers::ObjCLifetime LT = LHS.getObjCLifetime(); 16947 16948 if (LT != Qualifiers::OCL_Weak && LT != Qualifiers::OCL_ExplicitNone) 16949 return false; 16950 16951 if (checkUnsafeAssignObject(*this, Loc, LT, RHS, false)) 16952 return true; 16953 16954 return false; 16955} 16956 16957void Sema::checkUnsafeExprAssigns(SourceLocation Loc, 16958 Expr *LHS, Expr *RHS) { 16959 QualType LHSType; 16960 // PropertyRef on LHS type need be directly obtained from 16961 // its declaration as it has a PseudoType. 16962 ObjCPropertyRefExpr *PRE 16963 = dyn_cast<ObjCPropertyRefExpr>(LHS->IgnoreParens()); 16964 if (PRE && !PRE->isImplicitProperty()) { 16965 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 16966 if (PD) 16967 LHSType = PD->getType(); 16968 } 16969 16970 if (LHSType.isNull()) 16971 LHSType = LHS->getType(); 16972 16973 Qualifiers::ObjCLifetime LT = LHSType.getObjCLifetime(); 16974 16975 if (LT == Qualifiers::OCL_Weak) { 16976 if (!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, Loc)) 16977 getCurFunction()->markSafeWeakUse(LHS); 16978 } 16979 16980 if (checkUnsafeAssigns(Loc, LHSType, RHS)) 16981 return; 16982 16983 // FIXME. Check for other life times. 16984 if (LT != Qualifiers::OCL_None) 16985 return; 16986 16987 if (PRE) { 16988 if (PRE->isImplicitProperty()) 16989 return; 16990 const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); 16991 if (!PD) 16992 return; 16993 16994 unsigned Attributes = PD->getPropertyAttributes(); 16995 if (Attributes & ObjCPropertyAttribute::kind_assign) { 16996 // when 'assign' attribute was not explicitly specified 16997 // by user, ignore it and rely on property type itself 16998 // for lifetime info. 16999 unsigned AsWrittenAttr = PD->getPropertyAttributesAsWritten(); 17000 if (!(AsWrittenAttr & ObjCPropertyAttribute::kind_assign) && 17001 LHSType->isObjCRetainableType()) 17002 return; 17003 17004 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { 17005 if (cast->getCastKind() == CK_ARCConsumeObject) { 17006 Diag(Loc, diag::warn_arc_retained_property_assign) 17007 << RHS->getSourceRange(); 17008 return; 17009 } 17010 RHS = cast->getSubExpr(); 17011 } 17012 } else if (Attributes & ObjCPropertyAttribute::kind_weak) { 17013 if (checkUnsafeAssignObject(*this, Loc, Qualifiers::OCL_Weak, RHS, true)) 17014 return; 17015 } 17016 } 17017} 17018 17019//===--- CHECK: Empty statement body (-Wempty-body) ---------------------===// 17020 17021static bool ShouldDiagnoseEmptyStmtBody(const SourceManager &SourceMgr, 17022 SourceLocation StmtLoc, 17023 const NullStmt *Body) { 17024 // Do not warn if the body is a macro that expands to nothing, e.g: 17025 // 17026 // #define CALL(x) 17027 // if (condition) 17028 // CALL(0); 17029 if (Body->hasLeadingEmptyMacro()) 17030 return false; 17031 17032 // Get line numbers of statement and body. 17033 bool StmtLineInvalid; 17034 unsigned StmtLine = SourceMgr.getPresumedLineNumber(StmtLoc, 17035 &StmtLineInvalid); 17036 if (StmtLineInvalid) 17037 return false; 17038 17039 bool BodyLineInvalid; 17040 unsigned BodyLine = SourceMgr.getSpellingLineNumber(Body->getSemiLoc(), 17041 &BodyLineInvalid); 17042 if (BodyLineInvalid) 17043 return false; 17044 17045 // Warn if null statement and body are on the same line. 17046 if (StmtLine != BodyLine) 17047 return false; 17048 17049 return true; 17050} 17051 17052void Sema::DiagnoseEmptyStmtBody(SourceLocation StmtLoc, 17053 const Stmt *Body, 17054 unsigned DiagID) { 17055 // Since this is a syntactic check, don't emit diagnostic for template 17056 // instantiations, this just adds noise. 17057 if (CurrentInstantiationScope) 17058 return; 17059 17060 // The body should be a null statement. 17061 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 17062 if (!NBody) 17063 return; 17064 17065 // Do the usual checks. 17066 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 17067 return; 17068 17069 Diag(NBody->getSemiLoc(), DiagID); 17070 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 17071} 17072 17073void Sema::DiagnoseEmptyLoopBody(const Stmt *S, 17074 const Stmt *PossibleBody) { 17075 assert(!CurrentInstantiationScope); // Ensured by caller 17076 17077 SourceLocation StmtLoc; 17078 const Stmt *Body; 17079 unsigned DiagID; 17080 if (const ForStmt *FS = dyn_cast<ForStmt>(S)) { 17081 StmtLoc = FS->getRParenLoc(); 17082 Body = FS->getBody(); 17083 DiagID = diag::warn_empty_for_body; 17084 } else if (const WhileStmt *WS = dyn_cast<WhileStmt>(S)) { 17085 StmtLoc = WS->getRParenLoc(); 17086 Body = WS->getBody(); 17087 DiagID = diag::warn_empty_while_body; 17088 } else 17089 return; // Neither `for' nor `while'. 17090 17091 // The body should be a null statement. 17092 const NullStmt *NBody = dyn_cast<NullStmt>(Body); 17093 if (!NBody) 17094 return; 17095 17096 // Skip expensive checks if diagnostic is disabled. 17097 if (Diags.isIgnored(DiagID, NBody->getSemiLoc())) 17098 return; 17099 17100 // Do the usual checks. 17101 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) 17102 return; 17103 17104 // `for(...);' and `while(...);' are popular idioms, so in order to keep 17105 // noise level low, emit diagnostics only if for/while is followed by a 17106 // CompoundStmt, e.g.: 17107 // for (int i = 0; i < n; i++); 17108 // { 17109 // a(i); 17110 // } 17111 // or if for/while is followed by a statement with more indentation 17112 // than for/while itself: 17113 // for (int i = 0; i < n; i++); 17114 // a(i); 17115 bool ProbableTypo = isa<CompoundStmt>(PossibleBody); 17116 if (!ProbableTypo) { 17117 bool BodyColInvalid; 17118 unsigned BodyCol = SourceMgr.getPresumedColumnNumber( 17119 PossibleBody->getBeginLoc(), &BodyColInvalid); 17120 if (BodyColInvalid) 17121 return; 17122 17123 bool StmtColInvalid; 17124 unsigned StmtCol = 17125 SourceMgr.getPresumedColumnNumber(S->getBeginLoc(), &StmtColInvalid); 17126 if (StmtColInvalid) 17127 return; 17128 17129 if (BodyCol > StmtCol) 17130 ProbableTypo = true; 17131 } 17132 17133 if (ProbableTypo) { 17134 Diag(NBody->getSemiLoc(), DiagID); 17135 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); 17136 } 17137} 17138 17139//===--- CHECK: Warn on self move with std::move. -------------------------===// 17140 17141/// DiagnoseSelfMove - Emits a warning if a value is moved to itself. 17142void Sema::DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, 17143 SourceLocation OpLoc) { 17144 if (Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, OpLoc)) 17145 return; 17146 17147 if (inTemplateInstantiation()) 17148 return; 17149 17150 // Strip parens and casts away. 17151 LHSExpr = LHSExpr->IgnoreParenImpCasts(); 17152 RHSExpr = RHSExpr->IgnoreParenImpCasts(); 17153 17154 // Check for a call expression 17155 const CallExpr *CE = dyn_cast<CallExpr>(RHSExpr); 17156 if (!CE || CE->getNumArgs() != 1) 17157 return; 17158 17159 // Check for a call to std::move 17160 if (!CE->isCallToStdMove()) 17161 return; 17162 17163 // Get argument from std::move 17164 RHSExpr = CE->getArg(0); 17165 17166 const DeclRefExpr *LHSDeclRef = dyn_cast<DeclRefExpr>(LHSExpr); 17167 const DeclRefExpr *RHSDeclRef = dyn_cast<DeclRefExpr>(RHSExpr); 17168 17169 // Two DeclRefExpr's, check that the decls are the same. 17170 if (LHSDeclRef && RHSDeclRef) { 17171 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 17172 return; 17173 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 17174 RHSDeclRef->getDecl()->getCanonicalDecl()) 17175 return; 17176 17177 auto D = Diag(OpLoc, diag::warn_self_move) 17178 << LHSExpr->getType() << LHSExpr->getSourceRange() 17179 << RHSExpr->getSourceRange(); 17180 if (const FieldDecl *F = 17181 getSelfAssignmentClassMemberCandidate(RHSDeclRef->getDecl())) 17182 D << 1 << F 17183 << FixItHint::CreateInsertion(LHSDeclRef->getBeginLoc(), "this->"); 17184 else 17185 D << 0; 17186 return; 17187 } 17188 17189 // Member variables require a different approach to check for self moves. 17190 // MemberExpr's are the same if every nested MemberExpr refers to the same 17191 // Decl and that the base Expr's are DeclRefExpr's with the same Decl or 17192 // the base Expr's are CXXThisExpr's. 17193 const Expr *LHSBase = LHSExpr; 17194 const Expr *RHSBase = RHSExpr; 17195 const MemberExpr *LHSME = dyn_cast<MemberExpr>(LHSExpr); 17196 const MemberExpr *RHSME = dyn_cast<MemberExpr>(RHSExpr); 17197 if (!LHSME || !RHSME) 17198 return; 17199 17200 while (LHSME && RHSME) { 17201 if (LHSME->getMemberDecl()->getCanonicalDecl() != 17202 RHSME->getMemberDecl()->getCanonicalDecl()) 17203 return; 17204 17205 LHSBase = LHSME->getBase(); 17206 RHSBase = RHSME->getBase(); 17207 LHSME = dyn_cast<MemberExpr>(LHSBase); 17208 RHSME = dyn_cast<MemberExpr>(RHSBase); 17209 } 17210 17211 LHSDeclRef = dyn_cast<DeclRefExpr>(LHSBase); 17212 RHSDeclRef = dyn_cast<DeclRefExpr>(RHSBase); 17213 if (LHSDeclRef && RHSDeclRef) { 17214 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) 17215 return; 17216 if (LHSDeclRef->getDecl()->getCanonicalDecl() != 17217 RHSDeclRef->getDecl()->getCanonicalDecl()) 17218 return; 17219 17220 Diag(OpLoc, diag::warn_self_move) 17221 << LHSExpr->getType() << 0 << LHSExpr->getSourceRange() 17222 << RHSExpr->getSourceRange(); 17223 return; 17224 } 17225 17226 if (isa<CXXThisExpr>(LHSBase) && isa<CXXThisExpr>(RHSBase)) 17227 Diag(OpLoc, diag::warn_self_move) 17228 << LHSExpr->getType() << 0 << LHSExpr->getSourceRange() 17229 << RHSExpr->getSourceRange(); 17230} 17231 17232//===--- Layout compatibility ----------------------------------------------// 17233 17234static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2); 17235 17236/// Check if two enumeration types are layout-compatible. 17237static bool isLayoutCompatible(ASTContext &C, EnumDecl *ED1, EnumDecl *ED2) { 17238 // C++11 [dcl.enum] p8: 17239 // Two enumeration types are layout-compatible if they have the same 17240 // underlying type. 17241 return ED1->isComplete() && ED2->isComplete() && 17242 C.hasSameType(ED1->getIntegerType(), ED2->getIntegerType()); 17243} 17244 17245/// Check if two fields are layout-compatible. 17246static bool isLayoutCompatible(ASTContext &C, FieldDecl *Field1, 17247 FieldDecl *Field2) { 17248 if (!isLayoutCompatible(C, Field1->getType(), Field2->getType())) 17249 return false; 17250 17251 if (Field1->isBitField() != Field2->isBitField()) 17252 return false; 17253 17254 if (Field1->isBitField()) { 17255 // Make sure that the bit-fields are the same length. 17256 unsigned Bits1 = Field1->getBitWidthValue(C); 17257 unsigned Bits2 = Field2->getBitWidthValue(C); 17258 17259 if (Bits1 != Bits2) 17260 return false; 17261 } 17262 17263 return true; 17264} 17265 17266/// Check if two standard-layout structs are layout-compatible. 17267/// (C++11 [class.mem] p17) 17268static bool isLayoutCompatibleStruct(ASTContext &C, RecordDecl *RD1, 17269 RecordDecl *RD2) { 17270 // If both records are C++ classes, check that base classes match. 17271 if (const CXXRecordDecl *D1CXX = dyn_cast<CXXRecordDecl>(RD1)) { 17272 // If one of records is a CXXRecordDecl we are in C++ mode, 17273 // thus the other one is a CXXRecordDecl, too. 17274 const CXXRecordDecl *D2CXX = cast<CXXRecordDecl>(RD2); 17275 // Check number of base classes. 17276 if (D1CXX->getNumBases() != D2CXX->getNumBases()) 17277 return false; 17278 17279 // Check the base classes. 17280 for (CXXRecordDecl::base_class_const_iterator 17281 Base1 = D1CXX->bases_begin(), 17282 BaseEnd1 = D1CXX->bases_end(), 17283 Base2 = D2CXX->bases_begin(); 17284 Base1 != BaseEnd1; 17285 ++Base1, ++Base2) { 17286 if (!isLayoutCompatible(C, Base1->getType(), Base2->getType())) 17287 return false; 17288 } 17289 } else if (const CXXRecordDecl *D2CXX = dyn_cast<CXXRecordDecl>(RD2)) { 17290 // If only RD2 is a C++ class, it should have zero base classes. 17291 if (D2CXX->getNumBases() > 0) 17292 return false; 17293 } 17294 17295 // Check the fields. 17296 RecordDecl::field_iterator Field2 = RD2->field_begin(), 17297 Field2End = RD2->field_end(), 17298 Field1 = RD1->field_begin(), 17299 Field1End = RD1->field_end(); 17300 for ( ; Field1 != Field1End && Field2 != Field2End; ++Field1, ++Field2) { 17301 if (!isLayoutCompatible(C, *Field1, *Field2)) 17302 return false; 17303 } 17304 if (Field1 != Field1End || Field2 != Field2End) 17305 return false; 17306 17307 return true; 17308} 17309 17310/// Check if two standard-layout unions are layout-compatible. 17311/// (C++11 [class.mem] p18) 17312static bool isLayoutCompatibleUnion(ASTContext &C, RecordDecl *RD1, 17313 RecordDecl *RD2) { 17314 llvm::SmallPtrSet<FieldDecl *, 8> UnmatchedFields; 17315 for (auto *Field2 : RD2->fields()) 17316 UnmatchedFields.insert(Field2); 17317 17318 for (auto *Field1 : RD1->fields()) { 17319 llvm::SmallPtrSet<FieldDecl *, 8>::iterator 17320 I = UnmatchedFields.begin(), 17321 E = UnmatchedFields.end(); 17322 17323 for ( ; I != E; ++I) { 17324 if (isLayoutCompatible(C, Field1, *I)) { 17325 bool Result = UnmatchedFields.erase(*I); 17326 (void) Result; 17327 assert(Result); 17328 break; 17329 } 17330 } 17331 if (I == E) 17332 return false; 17333 } 17334 17335 return UnmatchedFields.empty(); 17336} 17337 17338static bool isLayoutCompatible(ASTContext &C, RecordDecl *RD1, 17339 RecordDecl *RD2) { 17340 if (RD1->isUnion() != RD2->isUnion()) 17341 return false; 17342 17343 if (RD1->isUnion()) 17344 return isLayoutCompatibleUnion(C, RD1, RD2); 17345 else 17346 return isLayoutCompatibleStruct(C, RD1, RD2); 17347} 17348 17349/// Check if two types are layout-compatible in C++11 sense. 17350static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2) { 17351 if (T1.isNull() || T2.isNull()) 17352 return false; 17353 17354 // C++11 [basic.types] p11: 17355 // If two types T1 and T2 are the same type, then T1 and T2 are 17356 // layout-compatible types. 17357 if (C.hasSameType(T1, T2)) 17358 return true; 17359 17360 T1 = T1.getCanonicalType().getUnqualifiedType(); 17361 T2 = T2.getCanonicalType().getUnqualifiedType(); 17362 17363 const Type::TypeClass TC1 = T1->getTypeClass(); 17364 const Type::TypeClass TC2 = T2->getTypeClass(); 17365 17366 if (TC1 != TC2) 17367 return false; 17368 17369 if (TC1 == Type::Enum) { 17370 return isLayoutCompatible(C, 17371 cast<EnumType>(T1)->getDecl(), 17372 cast<EnumType>(T2)->getDecl()); 17373 } else if (TC1 == Type::Record) { 17374 if (!T1->isStandardLayoutType() || !T2->isStandardLayoutType()) 17375 return false; 17376 17377 return isLayoutCompatible(C, 17378 cast<RecordType>(T1)->getDecl(), 17379 cast<RecordType>(T2)->getDecl()); 17380 } 17381 17382 return false; 17383} 17384 17385//===--- CHECK: pointer_with_type_tag attribute: datatypes should match ----// 17386 17387/// Given a type tag expression find the type tag itself. 17388/// 17389/// \param TypeExpr Type tag expression, as it appears in user's code. 17390/// 17391/// \param VD Declaration of an identifier that appears in a type tag. 17392/// 17393/// \param MagicValue Type tag magic value. 17394/// 17395/// \param isConstantEvaluated whether the evalaution should be performed in 17396 17397/// constant context. 17398static bool FindTypeTagExpr(const Expr *TypeExpr, const ASTContext &Ctx, 17399 const ValueDecl **VD, uint64_t *MagicValue, 17400 bool isConstantEvaluated) { 17401 while(true) { 17402 if (!TypeExpr) 17403 return false; 17404 17405 TypeExpr = TypeExpr->IgnoreParenImpCasts()->IgnoreParenCasts(); 17406 17407 switch (TypeExpr->getStmtClass()) { 17408 case Stmt::UnaryOperatorClass: { 17409 const UnaryOperator *UO = cast<UnaryOperator>(TypeExpr); 17410 if (UO->getOpcode() == UO_AddrOf || UO->getOpcode() == UO_Deref) { 17411 TypeExpr = UO->getSubExpr(); 17412 continue; 17413 } 17414 return false; 17415 } 17416 17417 case Stmt::DeclRefExprClass: { 17418 const DeclRefExpr *DRE = cast<DeclRefExpr>(TypeExpr); 17419 *VD = DRE->getDecl(); 17420 return true; 17421 } 17422 17423 case Stmt::IntegerLiteralClass: { 17424 const IntegerLiteral *IL = cast<IntegerLiteral>(TypeExpr); 17425 llvm::APInt MagicValueAPInt = IL->getValue(); 17426 if (MagicValueAPInt.getActiveBits() <= 64) { 17427 *MagicValue = MagicValueAPInt.getZExtValue(); 17428 return true; 17429 } else 17430 return false; 17431 } 17432 17433 case Stmt::BinaryConditionalOperatorClass: 17434 case Stmt::ConditionalOperatorClass: { 17435 const AbstractConditionalOperator *ACO = 17436 cast<AbstractConditionalOperator>(TypeExpr); 17437 bool Result; 17438 if (ACO->getCond()->EvaluateAsBooleanCondition(Result, Ctx, 17439 isConstantEvaluated)) { 17440 if (Result) 17441 TypeExpr = ACO->getTrueExpr(); 17442 else 17443 TypeExpr = ACO->getFalseExpr(); 17444 continue; 17445 } 17446 return false; 17447 } 17448 17449 case Stmt::BinaryOperatorClass: { 17450 const BinaryOperator *BO = cast<BinaryOperator>(TypeExpr); 17451 if (BO->getOpcode() == BO_Comma) { 17452 TypeExpr = BO->getRHS(); 17453 continue; 17454 } 17455 return false; 17456 } 17457 17458 default: 17459 return false; 17460 } 17461 } 17462} 17463 17464/// Retrieve the C type corresponding to type tag TypeExpr. 17465/// 17466/// \param TypeExpr Expression that specifies a type tag. 17467/// 17468/// \param MagicValues Registered magic values. 17469/// 17470/// \param FoundWrongKind Set to true if a type tag was found, but of a wrong 17471/// kind. 17472/// 17473/// \param TypeInfo Information about the corresponding C type. 17474/// 17475/// \param isConstantEvaluated whether the evalaution should be performed in 17476/// constant context. 17477/// 17478/// \returns true if the corresponding C type was found. 17479static bool GetMatchingCType( 17480 const IdentifierInfo *ArgumentKind, const Expr *TypeExpr, 17481 const ASTContext &Ctx, 17482 const llvm::DenseMap<Sema::TypeTagMagicValue, Sema::TypeTagData> 17483 *MagicValues, 17484 bool &FoundWrongKind, Sema::TypeTagData &TypeInfo, 17485 bool isConstantEvaluated) { 17486 FoundWrongKind = false; 17487 17488 // Variable declaration that has type_tag_for_datatype attribute. 17489 const ValueDecl *VD = nullptr; 17490 17491 uint64_t MagicValue; 17492 17493 if (!FindTypeTagExpr(TypeExpr, Ctx, &VD, &MagicValue, isConstantEvaluated)) 17494 return false; 17495 17496 if (VD) { 17497 if (TypeTagForDatatypeAttr *I = VD->getAttr<TypeTagForDatatypeAttr>()) { 17498 if (I->getArgumentKind() != ArgumentKind) { 17499 FoundWrongKind = true; 17500 return false; 17501 } 17502 TypeInfo.Type = I->getMatchingCType(); 17503 TypeInfo.LayoutCompatible = I->getLayoutCompatible(); 17504 TypeInfo.MustBeNull = I->getMustBeNull(); 17505 return true; 17506 } 17507 return false; 17508 } 17509 17510 if (!MagicValues) 17511 return false; 17512 17513 llvm::DenseMap<Sema::TypeTagMagicValue, 17514 Sema::TypeTagData>::const_iterator I = 17515 MagicValues->find(std::make_pair(ArgumentKind, MagicValue)); 17516 if (I == MagicValues->end()) 17517 return false; 17518 17519 TypeInfo = I->second; 17520 return true; 17521} 17522 17523void Sema::RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, 17524 uint64_t MagicValue, QualType Type, 17525 bool LayoutCompatible, 17526 bool MustBeNull) { 17527 if (!TypeTagForDatatypeMagicValues) 17528 TypeTagForDatatypeMagicValues.reset( 17529 new llvm::DenseMap<TypeTagMagicValue, TypeTagData>); 17530 17531 TypeTagMagicValue Magic(ArgumentKind, MagicValue); 17532 (*TypeTagForDatatypeMagicValues)[Magic] = 17533 TypeTagData(Type, LayoutCompatible, MustBeNull); 17534} 17535 17536static bool IsSameCharType(QualType T1, QualType T2) { 17537 const BuiltinType *BT1 = T1->getAs<BuiltinType>(); 17538 if (!BT1) 17539 return false; 17540 17541 const BuiltinType *BT2 = T2->getAs<BuiltinType>(); 17542 if (!BT2) 17543 return false; 17544 17545 BuiltinType::Kind T1Kind = BT1->getKind(); 17546 BuiltinType::Kind T2Kind = BT2->getKind(); 17547 17548 return (T1Kind == BuiltinType::SChar && T2Kind == BuiltinType::Char_S) || 17549 (T1Kind == BuiltinType::UChar && T2Kind == BuiltinType::Char_U) || 17550 (T1Kind == BuiltinType::Char_U && T2Kind == BuiltinType::UChar) || 17551 (T1Kind == BuiltinType::Char_S && T2Kind == BuiltinType::SChar); 17552} 17553 17554void Sema::CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, 17555 const ArrayRef<const Expr *> ExprArgs, 17556 SourceLocation CallSiteLoc) { 17557 const IdentifierInfo *ArgumentKind = Attr->getArgumentKind(); 17558 bool IsPointerAttr = Attr->getIsPointer(); 17559 17560 // Retrieve the argument representing the 'type_tag'. 17561 unsigned TypeTagIdxAST = Attr->getTypeTagIdx().getASTIndex(); 17562 if (TypeTagIdxAST >= ExprArgs.size()) { 17563 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 17564 << 0 << Attr->getTypeTagIdx().getSourceIndex(); 17565 return; 17566 } 17567 const Expr *TypeTagExpr = ExprArgs[TypeTagIdxAST]; 17568 bool FoundWrongKind; 17569 TypeTagData TypeInfo; 17570 if (!GetMatchingCType(ArgumentKind, TypeTagExpr, Context, 17571 TypeTagForDatatypeMagicValues.get(), FoundWrongKind, 17572 TypeInfo, isConstantEvaluated())) { 17573 if (FoundWrongKind) 17574 Diag(TypeTagExpr->getExprLoc(), 17575 diag::warn_type_tag_for_datatype_wrong_kind) 17576 << TypeTagExpr->getSourceRange(); 17577 return; 17578 } 17579 17580 // Retrieve the argument representing the 'arg_idx'. 17581 unsigned ArgumentIdxAST = Attr->getArgumentIdx().getASTIndex(); 17582 if (ArgumentIdxAST >= ExprArgs.size()) { 17583 Diag(CallSiteLoc, diag::err_tag_index_out_of_range) 17584 << 1 << Attr->getArgumentIdx().getSourceIndex(); 17585 return; 17586 } 17587 const Expr *ArgumentExpr = ExprArgs[ArgumentIdxAST]; 17588 if (IsPointerAttr) { 17589 // Skip implicit cast of pointer to `void *' (as a function argument). 17590 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(ArgumentExpr)) 17591 if (ICE->getType()->isVoidPointerType() && 17592 ICE->getCastKind() == CK_BitCast) 17593 ArgumentExpr = ICE->getSubExpr(); 17594 } 17595 QualType ArgumentType = ArgumentExpr->getType(); 17596 17597 // Passing a `void*' pointer shouldn't trigger a warning. 17598 if (IsPointerAttr && ArgumentType->isVoidPointerType()) 17599 return; 17600 17601 if (TypeInfo.MustBeNull) { 17602 // Type tag with matching void type requires a null pointer. 17603 if (!ArgumentExpr->isNullPointerConstant(Context, 17604 Expr::NPC_ValueDependentIsNotNull)) { 17605 Diag(ArgumentExpr->getExprLoc(), 17606 diag::warn_type_safety_null_pointer_required) 17607 << ArgumentKind->getName() 17608 << ArgumentExpr->getSourceRange() 17609 << TypeTagExpr->getSourceRange(); 17610 } 17611 return; 17612 } 17613 17614 QualType RequiredType = TypeInfo.Type; 17615 if (IsPointerAttr) 17616 RequiredType = Context.getPointerType(RequiredType); 17617 17618 bool mismatch = false; 17619 if (!TypeInfo.LayoutCompatible) { 17620 mismatch = !Context.hasSameType(ArgumentType, RequiredType); 17621 17622 // C++11 [basic.fundamental] p1: 17623 // Plain char, signed char, and unsigned char are three distinct types. 17624 // 17625 // But we treat plain `char' as equivalent to `signed char' or `unsigned 17626 // char' depending on the current char signedness mode. 17627 if (mismatch) 17628 if ((IsPointerAttr && IsSameCharType(ArgumentType->getPointeeType(), 17629 RequiredType->getPointeeType())) || 17630 (!IsPointerAttr && IsSameCharType(ArgumentType, RequiredType))) 17631 mismatch = false; 17632 } else 17633 if (IsPointerAttr) 17634 mismatch = !isLayoutCompatible(Context, 17635 ArgumentType->getPointeeType(), 17636 RequiredType->getPointeeType()); 17637 else 17638 mismatch = !isLayoutCompatible(Context, ArgumentType, RequiredType); 17639 17640 if (mismatch) 17641 Diag(ArgumentExpr->getExprLoc(), diag::warn_type_safety_type_mismatch) 17642 << ArgumentType << ArgumentKind 17643 << TypeInfo.LayoutCompatible << RequiredType 17644 << ArgumentExpr->getSourceRange() 17645 << TypeTagExpr->getSourceRange(); 17646} 17647 17648void Sema::AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, 17649 CharUnits Alignment) { 17650 MisalignedMembers.emplace_back(E, RD, MD, Alignment); 17651} 17652 17653void Sema::DiagnoseMisalignedMembers() { 17654 for (MisalignedMember &m : MisalignedMembers) { 17655 const NamedDecl *ND = m.RD; 17656 if (ND->getName().empty()) { 17657 if (const TypedefNameDecl *TD = m.RD->getTypedefNameForAnonDecl()) 17658 ND = TD; 17659 } 17660 Diag(m.E->getBeginLoc(), diag::warn_taking_address_of_packed_member) 17661 << m.MD << ND << m.E->getSourceRange(); 17662 } 17663 MisalignedMembers.clear(); 17664} 17665 17666void Sema::DiscardMisalignedMemberAddress(const Type *T, Expr *E) { 17667 E = E->IgnoreParens(); 17668 if (!T->isPointerType() && !T->isIntegerType() && !T->isDependentType()) 17669 return; 17670 if (isa<UnaryOperator>(E) && 17671 cast<UnaryOperator>(E)->getOpcode() == UO_AddrOf) { 17672 auto *Op = cast<UnaryOperator>(E)->getSubExpr()->IgnoreParens(); 17673 if (isa<MemberExpr>(Op)) { 17674 auto *MA = llvm::find(MisalignedMembers, MisalignedMember(Op)); 17675 if (MA != MisalignedMembers.end() && 17676 (T->isDependentType() || T->isIntegerType() || 17677 (T->isPointerType() && (T->getPointeeType()->isIncompleteType() || 17678 Context.getTypeAlignInChars( 17679 T->getPointeeType()) <= MA->Alignment)))) 17680 MisalignedMembers.erase(MA); 17681 } 17682 } 17683} 17684 17685void Sema::RefersToMemberWithReducedAlignment( 17686 Expr *E, 17687 llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> 17688 Action) { 17689 const auto *ME = dyn_cast<MemberExpr>(E); 17690 if (!ME) 17691 return; 17692 17693 // No need to check expressions with an __unaligned-qualified type. 17694 if (E->getType().getQualifiers().hasUnaligned()) 17695 return; 17696 17697 // For a chain of MemberExpr like "a.b.c.d" this list 17698 // will keep FieldDecl's like [d, c, b]. 17699 SmallVector<FieldDecl *, 4> ReverseMemberChain; 17700 const MemberExpr *TopME = nullptr; 17701 bool AnyIsPacked = false; 17702 do { 17703 QualType BaseType = ME->getBase()->getType(); 17704 if (BaseType->isDependentType()) 17705 return; 17706 if (ME->isArrow()) 17707 BaseType = BaseType->getPointeeType(); 17708 RecordDecl *RD = BaseType->castAs<RecordType>()->getDecl(); 17709 if (RD->isInvalidDecl()) 17710 return; 17711 17712 ValueDecl *MD = ME->getMemberDecl(); 17713 auto *FD = dyn_cast<FieldDecl>(MD); 17714 // We do not care about non-data members. 17715 if (!FD || FD->isInvalidDecl()) 17716 return; 17717 17718 AnyIsPacked = 17719 AnyIsPacked || (RD->hasAttr<PackedAttr>() || MD->hasAttr<PackedAttr>()); 17720 ReverseMemberChain.push_back(FD); 17721 17722 TopME = ME; 17723 ME = dyn_cast<MemberExpr>(ME->getBase()->IgnoreParens()); 17724 } while (ME); 17725 assert(TopME && "We did not compute a topmost MemberExpr!"); 17726 17727 // Not the scope of this diagnostic. 17728 if (!AnyIsPacked) 17729 return; 17730 17731 const Expr *TopBase = TopME->getBase()->IgnoreParenImpCasts(); 17732 const auto *DRE = dyn_cast<DeclRefExpr>(TopBase); 17733 // TODO: The innermost base of the member expression may be too complicated. 17734 // For now, just disregard these cases. This is left for future 17735 // improvement. 17736 if (!DRE && !isa<CXXThisExpr>(TopBase)) 17737 return; 17738 17739 // Alignment expected by the whole expression. 17740 CharUnits ExpectedAlignment = Context.getTypeAlignInChars(E->getType()); 17741 17742 // No need to do anything else with this case. 17743 if (ExpectedAlignment.isOne()) 17744 return; 17745 17746 // Synthesize offset of the whole access. 17747 CharUnits Offset; 17748 for (const FieldDecl *FD : llvm::reverse(ReverseMemberChain)) 17749 Offset += Context.toCharUnitsFromBits(Context.getFieldOffset(FD)); 17750 17751 // Compute the CompleteObjectAlignment as the alignment of the whole chain. 17752 CharUnits CompleteObjectAlignment = Context.getTypeAlignInChars( 17753 ReverseMemberChain.back()->getParent()->getTypeForDecl()); 17754 17755 // The base expression of the innermost MemberExpr may give 17756 // stronger guarantees than the class containing the member. 17757 if (DRE && !TopME->isArrow()) { 17758 const ValueDecl *VD = DRE->getDecl(); 17759 if (!VD->getType()->isReferenceType()) 17760 CompleteObjectAlignment = 17761 std::max(CompleteObjectAlignment, Context.getDeclAlign(VD)); 17762 } 17763 17764 // Check if the synthesized offset fulfills the alignment. 17765 if (Offset % ExpectedAlignment != 0 || 17766 // It may fulfill the offset it but the effective alignment may still be 17767 // lower than the expected expression alignment. 17768 CompleteObjectAlignment < ExpectedAlignment) { 17769 // If this happens, we want to determine a sensible culprit of this. 17770 // Intuitively, watching the chain of member expressions from right to 17771 // left, we start with the required alignment (as required by the field 17772 // type) but some packed attribute in that chain has reduced the alignment. 17773 // It may happen that another packed structure increases it again. But if 17774 // we are here such increase has not been enough. So pointing the first 17775 // FieldDecl that either is packed or else its RecordDecl is, 17776 // seems reasonable. 17777 FieldDecl *FD = nullptr; 17778 CharUnits Alignment; 17779 for (FieldDecl *FDI : ReverseMemberChain) { 17780 if (FDI->hasAttr<PackedAttr>() || 17781 FDI->getParent()->hasAttr<PackedAttr>()) { 17782 FD = FDI; 17783 Alignment = std::min( 17784 Context.getTypeAlignInChars(FD->getType()), 17785 Context.getTypeAlignInChars(FD->getParent()->getTypeForDecl())); 17786 break; 17787 } 17788 } 17789 assert(FD && "We did not find a packed FieldDecl!"); 17790 Action(E, FD->getParent(), FD, Alignment); 17791 } 17792} 17793 17794void Sema::CheckAddressOfPackedMember(Expr *rhs) { 17795 using namespace std::placeholders; 17796 17797 RefersToMemberWithReducedAlignment( 17798 rhs, std::bind(&Sema::AddPotentialMisalignedMembers, std::ref(*this), _1, 17799 _2, _3, _4)); 17800} 17801 17802bool Sema::PrepareBuiltinElementwiseMathOneArgCall(CallExpr *TheCall) { 17803 if (checkArgCount(*this, TheCall, 1)) 17804 return true; 17805 17806 ExprResult A = UsualUnaryConversions(TheCall->getArg(0)); 17807 if (A.isInvalid()) 17808 return true; 17809 17810 TheCall->setArg(0, A.get()); 17811 QualType TyA = A.get()->getType(); 17812 17813 if (checkMathBuiltinElementType(*this, A.get()->getBeginLoc(), TyA)) 17814 return true; 17815 17816 TheCall->setType(TyA); 17817 return false; 17818} 17819 17820bool Sema::SemaBuiltinElementwiseMath(CallExpr *TheCall) { 17821 if (checkArgCount(*this, TheCall, 2)) 17822 return true; 17823 17824 ExprResult A = TheCall->getArg(0); 17825 ExprResult B = TheCall->getArg(1); 17826 // Do standard promotions between the two arguments, returning their common 17827 // type. 17828 QualType Res = 17829 UsualArithmeticConversions(A, B, TheCall->getExprLoc(), ACK_Comparison); 17830 if (A.isInvalid() || B.isInvalid()) 17831 return true; 17832 17833 QualType TyA = A.get()->getType(); 17834 QualType TyB = B.get()->getType(); 17835 17836 if (Res.isNull() || TyA.getCanonicalType() != TyB.getCanonicalType()) 17837 return Diag(A.get()->getBeginLoc(), 17838 diag::err_typecheck_call_different_arg_types) 17839 << TyA << TyB; 17840 17841 if (checkMathBuiltinElementType(*this, A.get()->getBeginLoc(), TyA)) 17842 return true; 17843 17844 TheCall->setArg(0, A.get()); 17845 TheCall->setArg(1, B.get()); 17846 TheCall->setType(Res); 17847 return false; 17848} 17849 17850bool Sema::PrepareBuiltinReduceMathOneArgCall(CallExpr *TheCall) { 17851 if (checkArgCount(*this, TheCall, 1)) 17852 return true; 17853 17854 ExprResult A = UsualUnaryConversions(TheCall->getArg(0)); 17855 if (A.isInvalid()) 17856 return true; 17857 17858 TheCall->setArg(0, A.get()); 17859 return false; 17860} 17861 17862ExprResult Sema::SemaBuiltinMatrixTranspose(CallExpr *TheCall, 17863 ExprResult CallResult) { 17864 if (checkArgCount(*this, TheCall, 1)) 17865 return ExprError(); 17866 17867 ExprResult MatrixArg = DefaultLvalueConversion(TheCall->getArg(0)); 17868 if (MatrixArg.isInvalid()) 17869 return MatrixArg; 17870 Expr *Matrix = MatrixArg.get(); 17871 17872 auto *MType = Matrix->getType()->getAs<ConstantMatrixType>(); 17873 if (!MType) { 17874 Diag(Matrix->getBeginLoc(), diag::err_builtin_invalid_arg_type) 17875 << 1 << /* matrix ty*/ 1 << Matrix->getType(); 17876 return ExprError(); 17877 } 17878 17879 // Create returned matrix type by swapping rows and columns of the argument 17880 // matrix type. 17881 QualType ResultType = Context.getConstantMatrixType( 17882 MType->getElementType(), MType->getNumColumns(), MType->getNumRows()); 17883 17884 // Change the return type to the type of the returned matrix. 17885 TheCall->setType(ResultType); 17886 17887 // Update call argument to use the possibly converted matrix argument. 17888 TheCall->setArg(0, Matrix); 17889 return CallResult; 17890} 17891 17892// Get and verify the matrix dimensions. 17893static std::optional<unsigned> 17894getAndVerifyMatrixDimension(Expr *Expr, StringRef Name, Sema &S) { 17895 SourceLocation ErrorPos; 17896 std::optional<llvm::APSInt> Value = 17897 Expr->getIntegerConstantExpr(S.Context, &ErrorPos); 17898 if (!Value) { 17899 S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_scalar_unsigned_arg) 17900 << Name; 17901 return {}; 17902 } 17903 uint64_t Dim = Value->getZExtValue(); 17904 if (!ConstantMatrixType::isDimensionValid(Dim)) { 17905 S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_invalid_dimension) 17906 << Name << ConstantMatrixType::getMaxElementsPerDimension(); 17907 return {}; 17908 } 17909 return Dim; 17910} 17911 17912ExprResult Sema::SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall, 17913 ExprResult CallResult) { 17914 if (!getLangOpts().MatrixTypes) { 17915 Diag(TheCall->getBeginLoc(), diag::err_builtin_matrix_disabled); 17916 return ExprError(); 17917 } 17918 17919 if (checkArgCount(*this, TheCall, 4)) 17920 return ExprError(); 17921 17922 unsigned PtrArgIdx = 0; 17923 Expr *PtrExpr = TheCall->getArg(PtrArgIdx); 17924 Expr *RowsExpr = TheCall->getArg(1); 17925 Expr *ColumnsExpr = TheCall->getArg(2); 17926 Expr *StrideExpr = TheCall->getArg(3); 17927 17928 bool ArgError = false; 17929 17930 // Check pointer argument. 17931 { 17932 ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(PtrExpr); 17933 if (PtrConv.isInvalid()) 17934 return PtrConv; 17935 PtrExpr = PtrConv.get(); 17936 TheCall->setArg(0, PtrExpr); 17937 if (PtrExpr->isTypeDependent()) { 17938 TheCall->setType(Context.DependentTy); 17939 return TheCall; 17940 } 17941 } 17942 17943 auto *PtrTy = PtrExpr->getType()->getAs<PointerType>(); 17944 QualType ElementTy; 17945 if (!PtrTy) { 17946 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 17947 << PtrArgIdx + 1 << /*pointer to element ty*/ 2 << PtrExpr->getType(); 17948 ArgError = true; 17949 } else { 17950 ElementTy = PtrTy->getPointeeType().getUnqualifiedType(); 17951 17952 if (!ConstantMatrixType::isValidElementType(ElementTy)) { 17953 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 17954 << PtrArgIdx + 1 << /* pointer to element ty*/ 2 17955 << PtrExpr->getType(); 17956 ArgError = true; 17957 } 17958 } 17959 17960 // Apply default Lvalue conversions and convert the expression to size_t. 17961 auto ApplyArgumentConversions = [this](Expr *E) { 17962 ExprResult Conv = DefaultLvalueConversion(E); 17963 if (Conv.isInvalid()) 17964 return Conv; 17965 17966 return tryConvertExprToType(Conv.get(), Context.getSizeType()); 17967 }; 17968 17969 // Apply conversion to row and column expressions. 17970 ExprResult RowsConv = ApplyArgumentConversions(RowsExpr); 17971 if (!RowsConv.isInvalid()) { 17972 RowsExpr = RowsConv.get(); 17973 TheCall->setArg(1, RowsExpr); 17974 } else 17975 RowsExpr = nullptr; 17976 17977 ExprResult ColumnsConv = ApplyArgumentConversions(ColumnsExpr); 17978 if (!ColumnsConv.isInvalid()) { 17979 ColumnsExpr = ColumnsConv.get(); 17980 TheCall->setArg(2, ColumnsExpr); 17981 } else 17982 ColumnsExpr = nullptr; 17983 17984 // If any part of the result matrix type is still pending, just use 17985 // Context.DependentTy, until all parts are resolved. 17986 if ((RowsExpr && RowsExpr->isTypeDependent()) || 17987 (ColumnsExpr && ColumnsExpr->isTypeDependent())) { 17988 TheCall->setType(Context.DependentTy); 17989 return CallResult; 17990 } 17991 17992 // Check row and column dimensions. 17993 std::optional<unsigned> MaybeRows; 17994 if (RowsExpr) 17995 MaybeRows = getAndVerifyMatrixDimension(RowsExpr, "row", *this); 17996 17997 std::optional<unsigned> MaybeColumns; 17998 if (ColumnsExpr) 17999 MaybeColumns = getAndVerifyMatrixDimension(ColumnsExpr, "column", *this); 18000 18001 // Check stride argument. 18002 ExprResult StrideConv = ApplyArgumentConversions(StrideExpr); 18003 if (StrideConv.isInvalid()) 18004 return ExprError(); 18005 StrideExpr = StrideConv.get(); 18006 TheCall->setArg(3, StrideExpr); 18007 18008 if (MaybeRows) { 18009 if (std::optional<llvm::APSInt> Value = 18010 StrideExpr->getIntegerConstantExpr(Context)) { 18011 uint64_t Stride = Value->getZExtValue(); 18012 if (Stride < *MaybeRows) { 18013 Diag(StrideExpr->getBeginLoc(), 18014 diag::err_builtin_matrix_stride_too_small); 18015 ArgError = true; 18016 } 18017 } 18018 } 18019 18020 if (ArgError || !MaybeRows || !MaybeColumns) 18021 return ExprError(); 18022 18023 TheCall->setType( 18024 Context.getConstantMatrixType(ElementTy, *MaybeRows, *MaybeColumns)); 18025 return CallResult; 18026} 18027 18028ExprResult Sema::SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall, 18029 ExprResult CallResult) { 18030 if (checkArgCount(*this, TheCall, 3)) 18031 return ExprError(); 18032 18033 unsigned PtrArgIdx = 1; 18034 Expr *MatrixExpr = TheCall->getArg(0); 18035 Expr *PtrExpr = TheCall->getArg(PtrArgIdx); 18036 Expr *StrideExpr = TheCall->getArg(2); 18037 18038 bool ArgError = false; 18039 18040 { 18041 ExprResult MatrixConv = DefaultLvalueConversion(MatrixExpr); 18042 if (MatrixConv.isInvalid()) 18043 return MatrixConv; 18044 MatrixExpr = MatrixConv.get(); 18045 TheCall->setArg(0, MatrixExpr); 18046 } 18047 if (MatrixExpr->isTypeDependent()) { 18048 TheCall->setType(Context.DependentTy); 18049 return TheCall; 18050 } 18051 18052 auto *MatrixTy = MatrixExpr->getType()->getAs<ConstantMatrixType>(); 18053 if (!MatrixTy) { 18054 Diag(MatrixExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 18055 << 1 << /*matrix ty */ 1 << MatrixExpr->getType(); 18056 ArgError = true; 18057 } 18058 18059 { 18060 ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(PtrExpr); 18061 if (PtrConv.isInvalid()) 18062 return PtrConv; 18063 PtrExpr = PtrConv.get(); 18064 TheCall->setArg(1, PtrExpr); 18065 if (PtrExpr->isTypeDependent()) { 18066 TheCall->setType(Context.DependentTy); 18067 return TheCall; 18068 } 18069 } 18070 18071 // Check pointer argument. 18072 auto *PtrTy = PtrExpr->getType()->getAs<PointerType>(); 18073 if (!PtrTy) { 18074 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type) 18075 << PtrArgIdx + 1 << /*pointer to element ty*/ 2 << PtrExpr->getType(); 18076 ArgError = true; 18077 } else { 18078 QualType ElementTy = PtrTy->getPointeeType(); 18079 if (ElementTy.isConstQualified()) { 18080 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_store_to_const); 18081 ArgError = true; 18082 } 18083 ElementTy = ElementTy.getUnqualifiedType().getCanonicalType(); 18084 if (MatrixTy && 18085 !Context.hasSameType(ElementTy, MatrixTy->getElementType())) { 18086 Diag(PtrExpr->getBeginLoc(), 18087 diag::err_builtin_matrix_pointer_arg_mismatch) 18088 << ElementTy << MatrixTy->getElementType(); 18089 ArgError = true; 18090 } 18091 } 18092 18093 // Apply default Lvalue conversions and convert the stride expression to 18094 // size_t. 18095 { 18096 ExprResult StrideConv = DefaultLvalueConversion(StrideExpr); 18097 if (StrideConv.isInvalid()) 18098 return StrideConv; 18099 18100 StrideConv = tryConvertExprToType(StrideConv.get(), Context.getSizeType()); 18101 if (StrideConv.isInvalid()) 18102 return StrideConv; 18103 StrideExpr = StrideConv.get(); 18104 TheCall->setArg(2, StrideExpr); 18105 } 18106 18107 // Check stride argument. 18108 if (MatrixTy) { 18109 if (std::optional<llvm::APSInt> Value = 18110 StrideExpr->getIntegerConstantExpr(Context)) { 18111 uint64_t Stride = Value->getZExtValue(); 18112 if (Stride < MatrixTy->getNumRows()) { 18113 Diag(StrideExpr->getBeginLoc(), 18114 diag::err_builtin_matrix_stride_too_small); 18115 ArgError = true; 18116 } 18117 } 18118 } 18119 18120 if (ArgError) 18121 return ExprError(); 18122 18123 return CallResult; 18124} 18125 18126/// \brief Enforce the bounds of a TCB 18127/// CheckTCBEnforcement - Enforces that every function in a named TCB only 18128/// directly calls other functions in the same TCB as marked by the enforce_tcb 18129/// and enforce_tcb_leaf attributes. 18130void Sema::CheckTCBEnforcement(const SourceLocation CallExprLoc, 18131 const NamedDecl *Callee) { 18132 const NamedDecl *Caller = getCurFunctionOrMethodDecl(); 18133 18134 if (!Caller || !Caller->hasAttr<EnforceTCBAttr>()) 18135 return; 18136 18137 // Search through the enforce_tcb and enforce_tcb_leaf attributes to find 18138 // all TCBs the callee is a part of. 18139 llvm::StringSet<> CalleeTCBs; 18140 for (const auto *A : Callee->specific_attrs<EnforceTCBAttr>()) 18141 CalleeTCBs.insert(A->getTCBName()); 18142 for (const auto *A : Callee->specific_attrs<EnforceTCBLeafAttr>()) 18143 CalleeTCBs.insert(A->getTCBName()); 18144 18145 // Go through the TCBs the caller is a part of and emit warnings if Caller 18146 // is in a TCB that the Callee is not. 18147 for (const auto *A : Caller->specific_attrs<EnforceTCBAttr>()) { 18148 StringRef CallerTCB = A->getTCBName(); 18149 if (CalleeTCBs.count(CallerTCB) == 0) { 18150 this->Diag(CallExprLoc, diag::warn_tcb_enforcement_violation) 18151 << Callee << CallerTCB; 18152 } 18153 } 18154} 18155