1//===- SemaChecking.cpp - Extra Semantic Checking -------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements extra semantic analysis beyond what is enforced
10// by the C type system.
11//
12//===----------------------------------------------------------------------===//
13
14#include "clang/AST/APValue.h"
15#include "clang/AST/ASTContext.h"
16#include "clang/AST/Attr.h"
17#include "clang/AST/AttrIterator.h"
18#include "clang/AST/CharUnits.h"
19#include "clang/AST/Decl.h"
20#include "clang/AST/DeclBase.h"
21#include "clang/AST/DeclCXX.h"
22#include "clang/AST/DeclObjC.h"
23#include "clang/AST/DeclarationName.h"
24#include "clang/AST/EvaluatedExprVisitor.h"
25#include "clang/AST/Expr.h"
26#include "clang/AST/ExprCXX.h"
27#include "clang/AST/ExprObjC.h"
28#include "clang/AST/ExprOpenMP.h"
29#include "clang/AST/FormatString.h"
30#include "clang/AST/NSAPI.h"
31#include "clang/AST/NonTrivialTypeVisitor.h"
32#include "clang/AST/OperationKinds.h"
33#include "clang/AST/RecordLayout.h"
34#include "clang/AST/Stmt.h"
35#include "clang/AST/TemplateBase.h"
36#include "clang/AST/Type.h"
37#include "clang/AST/TypeLoc.h"
38#include "clang/AST/UnresolvedSet.h"
39#include "clang/Basic/AddressSpaces.h"
40#include "clang/Basic/CharInfo.h"
41#include "clang/Basic/Diagnostic.h"
42#include "clang/Basic/IdentifierTable.h"
43#include "clang/Basic/LLVM.h"
44#include "clang/Basic/LangOptions.h"
45#include "clang/Basic/OpenCLOptions.h"
46#include "clang/Basic/OperatorKinds.h"
47#include "clang/Basic/PartialDiagnostic.h"
48#include "clang/Basic/SourceLocation.h"
49#include "clang/Basic/SourceManager.h"
50#include "clang/Basic/Specifiers.h"
51#include "clang/Basic/SyncScope.h"
52#include "clang/Basic/TargetBuiltins.h"
53#include "clang/Basic/TargetCXXABI.h"
54#include "clang/Basic/TargetInfo.h"
55#include "clang/Basic/TypeTraits.h"
56#include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering.
57#include "clang/Sema/Initialization.h"
58#include "clang/Sema/Lookup.h"
59#include "clang/Sema/Ownership.h"
60#include "clang/Sema/Scope.h"
61#include "clang/Sema/ScopeInfo.h"
62#include "clang/Sema/Sema.h"
63#include "clang/Sema/SemaInternal.h"
64#include "llvm/ADT/APFloat.h"
65#include "llvm/ADT/APInt.h"
66#include "llvm/ADT/APSInt.h"
67#include "llvm/ADT/ArrayRef.h"
68#include "llvm/ADT/DenseMap.h"
69#include "llvm/ADT/FoldingSet.h"
70#include "llvm/ADT/STLExtras.h"
71#include "llvm/ADT/SmallBitVector.h"
72#include "llvm/ADT/SmallPtrSet.h"
73#include "llvm/ADT/SmallString.h"
74#include "llvm/ADT/SmallVector.h"
75#include "llvm/ADT/StringExtras.h"
76#include "llvm/ADT/StringRef.h"
77#include "llvm/ADT/StringSet.h"
78#include "llvm/ADT/StringSwitch.h"
79#include "llvm/Support/AtomicOrdering.h"
80#include "llvm/Support/Casting.h"
81#include "llvm/Support/Compiler.h"
82#include "llvm/Support/ConvertUTF.h"
83#include "llvm/Support/ErrorHandling.h"
84#include "llvm/Support/Format.h"
85#include "llvm/Support/Locale.h"
86#include "llvm/Support/MathExtras.h"
87#include "llvm/Support/SaveAndRestore.h"
88#include "llvm/Support/raw_ostream.h"
89#include "llvm/TargetParser/RISCVTargetParser.h"
90#include "llvm/TargetParser/Triple.h"
91#include <algorithm>
92#include <bitset>
93#include <cassert>
94#include <cctype>
95#include <cstddef>
96#include <cstdint>
97#include <functional>
98#include <limits>
99#include <optional>
100#include <string>
101#include <tuple>
102#include <utility>
103
104using namespace clang;
105using namespace sema;
106
107SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL,
108 unsigned ByteNo) const {
109 return SL->getLocationOfByte(ByteNo, SM: getSourceManager(), Features: LangOpts,
110 Target: Context.getTargetInfo());
111}
112
113static constexpr unsigned short combineFAPK(Sema::FormatArgumentPassingKind A,
114 Sema::FormatArgumentPassingKind B) {
115 return (A << 8) | B;
116}
117
118/// Checks that a call expression's argument count is at least the desired
119/// number. This is useful when doing custom type-checking on a variadic
120/// function. Returns true on error.
121static bool checkArgCountAtLeast(Sema &S, CallExpr *Call,
122 unsigned MinArgCount) {
123 unsigned ArgCount = Call->getNumArgs();
124 if (ArgCount >= MinArgCount)
125 return false;
126
127 return S.Diag(Call->getEndLoc(), diag::err_typecheck_call_too_few_args)
128 << 0 /*function call*/ << MinArgCount << ArgCount
129 << /*is non object*/ 0 << Call->getSourceRange();
130}
131
132/// Checks that a call expression's argument count is at most the desired
133/// number. This is useful when doing custom type-checking on a variadic
134/// function. Returns true on error.
135static bool checkArgCountAtMost(Sema &S, CallExpr *Call, unsigned MaxArgCount) {
136 unsigned ArgCount = Call->getNumArgs();
137 if (ArgCount <= MaxArgCount)
138 return false;
139 return S.Diag(Call->getEndLoc(),
140 diag::err_typecheck_call_too_many_args_at_most)
141 << 0 /*function call*/ << MaxArgCount << ArgCount
142 << /*is non object*/ 0 << Call->getSourceRange();
143}
144
145/// Checks that a call expression's argument count is in the desired range. This
146/// is useful when doing custom type-checking on a variadic function. Returns
147/// true on error.
148static bool checkArgCountRange(Sema &S, CallExpr *Call, unsigned MinArgCount,
149 unsigned MaxArgCount) {
150 return checkArgCountAtLeast(S, Call, MinArgCount) ||
151 checkArgCountAtMost(S, Call, MaxArgCount);
152}
153
154/// Checks that a call expression's argument count is the desired number.
155/// This is useful when doing custom type-checking. Returns true on error.
156static bool checkArgCount(Sema &S, CallExpr *Call, unsigned DesiredArgCount) {
157 unsigned ArgCount = Call->getNumArgs();
158 if (ArgCount == DesiredArgCount)
159 return false;
160
161 if (checkArgCountAtLeast(S, Call, MinArgCount: DesiredArgCount))
162 return true;
163 assert(ArgCount > DesiredArgCount && "should have diagnosed this");
164
165 // Highlight all the excess arguments.
166 SourceRange Range(Call->getArg(Arg: DesiredArgCount)->getBeginLoc(),
167 Call->getArg(Arg: ArgCount - 1)->getEndLoc());
168
169 return S.Diag(Range.getBegin(), diag::err_typecheck_call_too_many_args)
170 << 0 /*function call*/ << DesiredArgCount << ArgCount
171 << /*is non object*/ 0 << Call->getArg(1)->getSourceRange();
172}
173
174static bool convertArgumentToType(Sema &S, Expr *&Value, QualType Ty) {
175 if (Value->isTypeDependent())
176 return false;
177
178 InitializedEntity Entity =
179 InitializedEntity::InitializeParameter(Context&: S.Context, Type: Ty, Consumed: false);
180 ExprResult Result =
181 S.PerformCopyInitialization(Entity, EqualLoc: SourceLocation(), Init: Value);
182 if (Result.isInvalid())
183 return true;
184 Value = Result.get();
185 return false;
186}
187
188/// Check that the first argument to __builtin_annotation is an integer
189/// and the second argument is a non-wide string literal.
190static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) {
191 if (checkArgCount(S, Call: TheCall, DesiredArgCount: 2))
192 return true;
193
194 // First argument should be an integer.
195 Expr *ValArg = TheCall->getArg(Arg: 0);
196 QualType Ty = ValArg->getType();
197 if (!Ty->isIntegerType()) {
198 S.Diag(ValArg->getBeginLoc(), diag::err_builtin_annotation_first_arg)
199 << ValArg->getSourceRange();
200 return true;
201 }
202
203 // Second argument should be a constant string.
204 Expr *StrArg = TheCall->getArg(Arg: 1)->IgnoreParenCasts();
205 StringLiteral *Literal = dyn_cast<StringLiteral>(Val: StrArg);
206 if (!Literal || !Literal->isOrdinary()) {
207 S.Diag(StrArg->getBeginLoc(), diag::err_builtin_annotation_second_arg)
208 << StrArg->getSourceRange();
209 return true;
210 }
211
212 TheCall->setType(Ty);
213 return false;
214}
215
216static bool SemaBuiltinMSVCAnnotation(Sema &S, CallExpr *TheCall) {
217 // We need at least one argument.
218 if (TheCall->getNumArgs() < 1) {
219 S.Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least)
220 << 0 << 1 << TheCall->getNumArgs() << /*is non object*/ 0
221 << TheCall->getCallee()->getSourceRange();
222 return true;
223 }
224
225 // All arguments should be wide string literals.
226 for (Expr *Arg : TheCall->arguments()) {
227 auto *Literal = dyn_cast<StringLiteral>(Val: Arg->IgnoreParenCasts());
228 if (!Literal || !Literal->isWide()) {
229 S.Diag(Arg->getBeginLoc(), diag::err_msvc_annotation_wide_str)
230 << Arg->getSourceRange();
231 return true;
232 }
233 }
234
235 return false;
236}
237
238/// Check that the argument to __builtin_addressof is a glvalue, and set the
239/// result type to the corresponding pointer type.
240static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) {
241 if (checkArgCount(S, Call: TheCall, DesiredArgCount: 1))
242 return true;
243
244 ExprResult Arg(TheCall->getArg(Arg: 0));
245 QualType ResultType = S.CheckAddressOfOperand(Operand&: Arg, OpLoc: TheCall->getBeginLoc());
246 if (ResultType.isNull())
247 return true;
248
249 TheCall->setArg(Arg: 0, ArgExpr: Arg.get());
250 TheCall->setType(ResultType);
251 return false;
252}
253
254/// Check that the argument to __builtin_function_start is a function.
255static bool SemaBuiltinFunctionStart(Sema &S, CallExpr *TheCall) {
256 if (checkArgCount(S, Call: TheCall, DesiredArgCount: 1))
257 return true;
258
259 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(E: TheCall->getArg(Arg: 0));
260 if (Arg.isInvalid())
261 return true;
262
263 TheCall->setArg(Arg: 0, ArgExpr: Arg.get());
264 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(
265 Val: Arg.get()->getAsBuiltinConstantDeclRef(Context: S.getASTContext()));
266
267 if (!FD) {
268 S.Diag(TheCall->getBeginLoc(), diag::err_function_start_invalid_type)
269 << TheCall->getSourceRange();
270 return true;
271 }
272
273 return !S.checkAddressOfFunctionIsAvailable(Function: FD, /*Complain=*/true,
274 Loc: TheCall->getBeginLoc());
275}
276
277/// Check the number of arguments and set the result type to
278/// the argument type.
279static bool SemaBuiltinPreserveAI(Sema &S, CallExpr *TheCall) {
280 if (checkArgCount(S, Call: TheCall, DesiredArgCount: 1))
281 return true;
282
283 TheCall->setType(TheCall->getArg(Arg: 0)->getType());
284 return false;
285}
286
287/// Check that the value argument for __builtin_is_aligned(value, alignment) and
288/// __builtin_aligned_{up,down}(value, alignment) is an integer or a pointer
289/// type (but not a function pointer) and that the alignment is a power-of-two.
290static bool SemaBuiltinAlignment(Sema &S, CallExpr *TheCall, unsigned ID) {
291 if (checkArgCount(S, Call: TheCall, DesiredArgCount: 2))
292 return true;
293
294 clang::Expr *Source = TheCall->getArg(Arg: 0);
295 bool IsBooleanAlignBuiltin = ID == Builtin::BI__builtin_is_aligned;
296
297 auto IsValidIntegerType = [](QualType Ty) {
298 return Ty->isIntegerType() && !Ty->isEnumeralType() && !Ty->isBooleanType();
299 };
300 QualType SrcTy = Source->getType();
301 // We should also be able to use it with arrays (but not functions!).
302 if (SrcTy->canDecayToPointerType() && SrcTy->isArrayType()) {
303 SrcTy = S.Context.getDecayedType(T: SrcTy);
304 }
305 if ((!SrcTy->isPointerType() && !IsValidIntegerType(SrcTy)) ||
306 SrcTy->isFunctionPointerType()) {
307 // FIXME: this is not quite the right error message since we don't allow
308 // floating point types, or member pointers.
309 S.Diag(Source->getExprLoc(), diag::err_typecheck_expect_scalar_operand)
310 << SrcTy;
311 return true;
312 }
313
314 clang::Expr *AlignOp = TheCall->getArg(Arg: 1);
315 if (!IsValidIntegerType(AlignOp->getType())) {
316 S.Diag(AlignOp->getExprLoc(), diag::err_typecheck_expect_int)
317 << AlignOp->getType();
318 return true;
319 }
320 Expr::EvalResult AlignResult;
321 unsigned MaxAlignmentBits = S.Context.getIntWidth(T: SrcTy) - 1;
322 // We can't check validity of alignment if it is value dependent.
323 if (!AlignOp->isValueDependent() &&
324 AlignOp->EvaluateAsInt(Result&: AlignResult, Ctx: S.Context,
325 AllowSideEffects: Expr::SE_AllowSideEffects)) {
326 llvm::APSInt AlignValue = AlignResult.Val.getInt();
327 llvm::APSInt MaxValue(
328 llvm::APInt::getOneBitSet(numBits: MaxAlignmentBits + 1, BitNo: MaxAlignmentBits));
329 if (AlignValue < 1) {
330 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_small) << 1;
331 return true;
332 }
333 if (llvm::APSInt::compareValues(I1: AlignValue, I2: MaxValue) > 0) {
334 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_big)
335 << toString(MaxValue, 10);
336 return true;
337 }
338 if (!AlignValue.isPowerOf2()) {
339 S.Diag(AlignOp->getExprLoc(), diag::err_alignment_not_power_of_two);
340 return true;
341 }
342 if (AlignValue == 1) {
343 S.Diag(AlignOp->getExprLoc(), diag::warn_alignment_builtin_useless)
344 << IsBooleanAlignBuiltin;
345 }
346 }
347
348 ExprResult SrcArg = S.PerformCopyInitialization(
349 Entity: InitializedEntity::InitializeParameter(Context&: S.Context, Type: SrcTy, Consumed: false),
350 EqualLoc: SourceLocation(), Init: Source);
351 if (SrcArg.isInvalid())
352 return true;
353 TheCall->setArg(Arg: 0, ArgExpr: SrcArg.get());
354 ExprResult AlignArg =
355 S.PerformCopyInitialization(Entity: InitializedEntity::InitializeParameter(
356 Context&: S.Context, Type: AlignOp->getType(), Consumed: false),
357 EqualLoc: SourceLocation(), Init: AlignOp);
358 if (AlignArg.isInvalid())
359 return true;
360 TheCall->setArg(Arg: 1, ArgExpr: AlignArg.get());
361 // For align_up/align_down, the return type is the same as the (potentially
362 // decayed) argument type including qualifiers. For is_aligned(), the result
363 // is always bool.
364 TheCall->setType(IsBooleanAlignBuiltin ? S.Context.BoolTy : SrcTy);
365 return false;
366}
367
368static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall,
369 unsigned BuiltinID) {
370 if (checkArgCount(S, Call: TheCall, DesiredArgCount: 3))
371 return true;
372
373 std::pair<unsigned, const char *> Builtins[] = {
374 { Builtin::BI__builtin_add_overflow, "ckd_add" },
375 { Builtin::BI__builtin_sub_overflow, "ckd_sub" },
376 { Builtin::BI__builtin_mul_overflow, "ckd_mul" },
377 };
378
379 bool CkdOperation = llvm::any_of(Range&: Builtins, P: [&](const std::pair<unsigned,
380 const char *> &P) {
381 return BuiltinID == P.first && TheCall->getExprLoc().isMacroID() &&
382 Lexer::getImmediateMacroName(TheCall->getExprLoc(),
383 S.getSourceManager(), S.getLangOpts()) == P.second;
384 });
385
386 auto ValidCkdIntType = [](QualType QT) {
387 // A valid checked integer type is an integer type other than a plain char,
388 // bool, a bit-precise type, or an enumeration type.
389 if (const auto *BT = QT.getCanonicalType()->getAs<BuiltinType>())
390 return (BT->getKind() >= BuiltinType::Short &&
391 BT->getKind() <= BuiltinType::Int128) || (
392 BT->getKind() >= BuiltinType::UShort &&
393 BT->getKind() <= BuiltinType::UInt128) ||
394 BT->getKind() == BuiltinType::UChar ||
395 BT->getKind() == BuiltinType::SChar;
396 return false;
397 };
398
399 // First two arguments should be integers.
400 for (unsigned I = 0; I < 2; ++I) {
401 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(E: TheCall->getArg(Arg: I));
402 if (Arg.isInvalid()) return true;
403 TheCall->setArg(Arg: I, ArgExpr: Arg.get());
404
405 QualType Ty = Arg.get()->getType();
406 bool IsValid = CkdOperation ? ValidCkdIntType(Ty) : Ty->isIntegerType();
407 if (!IsValid) {
408 S.Diag(Arg.get()->getBeginLoc(), diag::err_overflow_builtin_must_be_int)
409 << CkdOperation << Ty << Arg.get()->getSourceRange();
410 return true;
411 }
412 }
413
414 // Third argument should be a pointer to a non-const integer.
415 // IRGen correctly handles volatile, restrict, and address spaces, and
416 // the other qualifiers aren't possible.
417 {
418 ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(E: TheCall->getArg(Arg: 2));
419 if (Arg.isInvalid()) return true;
420 TheCall->setArg(Arg: 2, ArgExpr: Arg.get());
421
422 QualType Ty = Arg.get()->getType();
423 const auto *PtrTy = Ty->getAs<PointerType>();
424 if (!PtrTy ||
425 !PtrTy->getPointeeType()->isIntegerType() ||
426 (!ValidCkdIntType(PtrTy->getPointeeType()) && CkdOperation) ||
427 PtrTy->getPointeeType().isConstQualified()) {
428 S.Diag(Arg.get()->getBeginLoc(),
429 diag::err_overflow_builtin_must_be_ptr_int)
430 << CkdOperation << Ty << Arg.get()->getSourceRange();
431 return true;
432 }
433 }
434
435 // Disallow signed bit-precise integer args larger than 128 bits to mul
436 // function until we improve backend support.
437 if (BuiltinID == Builtin::BI__builtin_mul_overflow) {
438 for (unsigned I = 0; I < 3; ++I) {
439 const auto Arg = TheCall->getArg(Arg: I);
440 // Third argument will be a pointer.
441 auto Ty = I < 2 ? Arg->getType() : Arg->getType()->getPointeeType();
442 if (Ty->isBitIntType() && Ty->isSignedIntegerType() &&
443 S.getASTContext().getIntWidth(Ty) > 128)
444 return S.Diag(Arg->getBeginLoc(),
445 diag::err_overflow_builtin_bit_int_max_size)
446 << 128;
447 }
448 }
449
450 return false;
451}
452
453namespace {
454struct BuiltinDumpStructGenerator {
455 Sema &S;
456 CallExpr *TheCall;
457 SourceLocation Loc = TheCall->getBeginLoc();
458 SmallVector<Expr *, 32> Actions;
459 DiagnosticErrorTrap ErrorTracker;
460 PrintingPolicy Policy;
461
462 BuiltinDumpStructGenerator(Sema &S, CallExpr *TheCall)
463 : S(S), TheCall(TheCall), ErrorTracker(S.getDiagnostics()),
464 Policy(S.Context.getPrintingPolicy()) {
465 Policy.AnonymousTagLocations = false;
466 }
467
468 Expr *makeOpaqueValueExpr(Expr *Inner) {
469 auto *OVE = new (S.Context)
470 OpaqueValueExpr(Loc, Inner->getType(), Inner->getValueKind(),
471 Inner->getObjectKind(), Inner);
472 Actions.push_back(OVE);
473 return OVE;
474 }
475
476 Expr *getStringLiteral(llvm::StringRef Str) {
477 Expr *Lit = S.Context.getPredefinedStringLiteralFromCache(Key: Str);
478 // Wrap the literal in parentheses to attach a source location.
479 return new (S.Context) ParenExpr(Loc, Loc, Lit);
480 }
481
482 bool callPrintFunction(llvm::StringRef Format,
483 llvm::ArrayRef<Expr *> Exprs = {}) {
484 SmallVector<Expr *, 8> Args;
485 assert(TheCall->getNumArgs() >= 2);
486 Args.reserve(N: (TheCall->getNumArgs() - 2) + /*Format*/ 1 + Exprs.size());
487 Args.assign(TheCall->arg_begin() + 2, TheCall->arg_end());
488 Args.push_back(Elt: getStringLiteral(Str: Format));
489 Args.insert(I: Args.end(), From: Exprs.begin(), To: Exprs.end());
490
491 // Register a note to explain why we're performing the call.
492 Sema::CodeSynthesisContext Ctx;
493 Ctx.Kind = Sema::CodeSynthesisContext::BuildingBuiltinDumpStructCall;
494 Ctx.PointOfInstantiation = Loc;
495 Ctx.CallArgs = Args.data();
496 Ctx.NumCallArgs = Args.size();
497 S.pushCodeSynthesisContext(Ctx);
498
499 ExprResult RealCall =
500 S.BuildCallExpr(/*Scope=*/S: nullptr, Fn: TheCall->getArg(Arg: 1),
501 LParenLoc: TheCall->getBeginLoc(), ArgExprs: Args, RParenLoc: TheCall->getRParenLoc());
502
503 S.popCodeSynthesisContext();
504 if (!RealCall.isInvalid())
505 Actions.push_back(Elt: RealCall.get());
506 // Bail out if we've hit any errors, even if we managed to build the
507 // call. We don't want to produce more than one error.
508 return RealCall.isInvalid() || ErrorTracker.hasErrorOccurred();
509 }
510
511 Expr *getIndentString(unsigned Depth) {
512 if (!Depth)
513 return nullptr;
514
515 llvm::SmallString<32> Indent;
516 Indent.resize(N: Depth * Policy.Indentation, NV: ' ');
517 return getStringLiteral(Str: Indent);
518 }
519
520 Expr *getTypeString(QualType T) {
521 return getStringLiteral(Str: T.getAsString(Policy));
522 }
523
524 bool appendFormatSpecifier(QualType T, llvm::SmallVectorImpl<char> &Str) {
525 llvm::raw_svector_ostream OS(Str);
526
527 // Format 'bool', 'char', 'signed char', 'unsigned char' as numbers, rather
528 // than trying to print a single character.
529 if (auto *BT = T->getAs<BuiltinType>()) {
530 switch (BT->getKind()) {
531 case BuiltinType::Bool:
532 OS << "%d";
533 return true;
534 case BuiltinType::Char_U:
535 case BuiltinType::UChar:
536 OS << "%hhu";
537 return true;
538 case BuiltinType::Char_S:
539 case BuiltinType::SChar:
540 OS << "%hhd";
541 return true;
542 default:
543 break;
544 }
545 }
546
547 analyze_printf::PrintfSpecifier Specifier;
548 if (Specifier.fixType(QT: T, LangOpt: S.getLangOpts(), Ctx&: S.Context, /*IsObjCLiteral=*/false)) {
549 // We were able to guess how to format this.
550 if (Specifier.getConversionSpecifier().getKind() ==
551 analyze_printf::PrintfConversionSpecifier::sArg) {
552 // Wrap double-quotes around a '%s' specifier and limit its maximum
553 // length. Ideally we'd also somehow escape special characters in the
554 // contents but printf doesn't support that.
555 // FIXME: '%s' formatting is not safe in general.
556 OS << '"';
557 Specifier.setPrecision(analyze_printf::OptionalAmount(32u));
558 Specifier.toString(os&: OS);
559 OS << '"';
560 // FIXME: It would be nice to include a '...' if the string doesn't fit
561 // in the length limit.
562 } else {
563 Specifier.toString(os&: OS);
564 }
565 return true;
566 }
567
568 if (T->isPointerType()) {
569 // Format all pointers with '%p'.
570 OS << "%p";
571 return true;
572 }
573
574 return false;
575 }
576
577 bool dumpUnnamedRecord(const RecordDecl *RD, Expr *E, unsigned Depth) {
578 Expr *IndentLit = getIndentString(Depth);
579 Expr *TypeLit = getTypeString(T: S.Context.getRecordType(Decl: RD));
580 if (IndentLit ? callPrintFunction(Format: "%s%s", Exprs: {IndentLit, TypeLit})
581 : callPrintFunction(Format: "%s", Exprs: {TypeLit}))
582 return true;
583
584 return dumpRecordValue(RD, E, RecordIndent: IndentLit, Depth);
585 }
586
587 // Dump a record value. E should be a pointer or lvalue referring to an RD.
588 bool dumpRecordValue(const RecordDecl *RD, Expr *E, Expr *RecordIndent,
589 unsigned Depth) {
590 // FIXME: Decide what to do if RD is a union. At least we should probably
591 // turn off printing `const char*` members with `%s`, because that is very
592 // likely to crash if that's not the active member. Whatever we decide, we
593 // should document it.
594
595 // Build an OpaqueValueExpr so we can refer to E more than once without
596 // triggering re-evaluation.
597 Expr *RecordArg = makeOpaqueValueExpr(Inner: E);
598 bool RecordArgIsPtr = RecordArg->getType()->isPointerType();
599
600 if (callPrintFunction(Format: " {\n"))
601 return true;
602
603 // Dump each base class, regardless of whether they're aggregates.
604 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(Val: RD)) {
605 for (const auto &Base : CXXRD->bases()) {
606 QualType BaseType =
607 RecordArgIsPtr ? S.Context.getPointerType(T: Base.getType())
608 : S.Context.getLValueReferenceType(T: Base.getType());
609 ExprResult BasePtr = S.BuildCStyleCastExpr(
610 LParenLoc: Loc, Ty: S.Context.getTrivialTypeSourceInfo(T: BaseType, Loc), RParenLoc: Loc,
611 Op: RecordArg);
612 if (BasePtr.isInvalid() ||
613 dumpUnnamedRecord(RD: Base.getType()->getAsRecordDecl(), E: BasePtr.get(),
614 Depth: Depth + 1))
615 return true;
616 }
617 }
618
619 Expr *FieldIndentArg = getIndentString(Depth: Depth + 1);
620
621 // Dump each field.
622 for (auto *D : RD->decls()) {
623 auto *IFD = dyn_cast<IndirectFieldDecl>(D);
624 auto *FD = IFD ? IFD->getAnonField() : dyn_cast<FieldDecl>(D);
625 if (!FD || FD->isUnnamedBitfield() || FD->isAnonymousStructOrUnion())
626 continue;
627
628 llvm::SmallString<20> Format = llvm::StringRef("%s%s %s ");
629 llvm::SmallVector<Expr *, 5> Args = {FieldIndentArg,
630 getTypeString(FD->getType()),
631 getStringLiteral(FD->getName())};
632
633 if (FD->isBitField()) {
634 Format += ": %zu ";
635 QualType SizeT = S.Context.getSizeType();
636 llvm::APInt BitWidth(S.Context.getIntWidth(SizeT),
637 FD->getBitWidthValue(S.Context));
638 Args.push_back(IntegerLiteral::Create(S.Context, BitWidth, SizeT, Loc));
639 }
640
641 Format += "=";
642
643 ExprResult Field =
644 IFD ? S.BuildAnonymousStructUnionMemberReference(
645 CXXScopeSpec(), Loc, IFD,
646 DeclAccessPair::make(IFD, AS_public), RecordArg, Loc)
647 : S.BuildFieldReferenceExpr(
648 RecordArg, RecordArgIsPtr, Loc, CXXScopeSpec(), FD,
649 DeclAccessPair::make(FD, AS_public),
650 DeclarationNameInfo(FD->getDeclName(), Loc));
651 if (Field.isInvalid())
652 return true;
653
654 auto *InnerRD = FD->getType()->getAsRecordDecl();
655 auto *InnerCXXRD = dyn_cast_or_null<CXXRecordDecl>(InnerRD);
656 if (InnerRD && (!InnerCXXRD || InnerCXXRD->isAggregate())) {
657 // Recursively print the values of members of aggregate record type.
658 if (callPrintFunction(Format, Args) ||
659 dumpRecordValue(InnerRD, Field.get(), FieldIndentArg, Depth + 1))
660 return true;
661 } else {
662 Format += " ";
663 if (appendFormatSpecifier(FD->getType(), Format)) {
664 // We know how to print this field.
665 Args.push_back(Field.get());
666 } else {
667 // We don't know how to print this field. Print out its address
668 // with a format specifier that a smart tool will be able to
669 // recognize and treat specially.
670 Format += "*%p";
671 ExprResult FieldAddr =
672 S.BuildUnaryOp(nullptr, Loc, UO_AddrOf, Field.get());
673 if (FieldAddr.isInvalid())
674 return true;
675 Args.push_back(FieldAddr.get());
676 }
677 Format += "\n";
678 if (callPrintFunction(Format, Args))
679 return true;
680 }
681 }
682
683 return RecordIndent ? callPrintFunction(Format: "%s}\n", Exprs: RecordIndent)
684 : callPrintFunction(Format: "}\n");
685 }
686
687 Expr *buildWrapper() {
688 auto *Wrapper = PseudoObjectExpr::Create(S.Context, TheCall, Actions,
689 PseudoObjectExpr::NoResult);
690 TheCall->setType(Wrapper->getType());
691 TheCall->setValueKind(Wrapper->getValueKind());
692 return Wrapper;
693 }
694};
695} // namespace
696
697static ExprResult SemaBuiltinDumpStruct(Sema &S, CallExpr *TheCall) {
698 if (checkArgCountAtLeast(S, Call: TheCall, MinArgCount: 2))
699 return ExprError();
700
701 ExprResult PtrArgResult = S.DefaultLvalueConversion(E: TheCall->getArg(Arg: 0));
702 if (PtrArgResult.isInvalid())
703 return ExprError();
704 TheCall->setArg(Arg: 0, ArgExpr: PtrArgResult.get());
705
706 // First argument should be a pointer to a struct.
707 QualType PtrArgType = PtrArgResult.get()->getType();
708 if (!PtrArgType->isPointerType() ||
709 !PtrArgType->getPointeeType()->isRecordType()) {
710 S.Diag(PtrArgResult.get()->getBeginLoc(),
711 diag::err_expected_struct_pointer_argument)
712 << 1 << TheCall->getDirectCallee() << PtrArgType;
713 return ExprError();
714 }
715 QualType Pointee = PtrArgType->getPointeeType();
716 const RecordDecl *RD = Pointee->getAsRecordDecl();
717 // Try to instantiate the class template as appropriate; otherwise, access to
718 // its data() may lead to a crash.
719 if (S.RequireCompleteType(PtrArgResult.get()->getBeginLoc(), Pointee,
720 diag::err_incomplete_type))
721 return ExprError();
722 // Second argument is a callable, but we can't fully validate it until we try
723 // calling it.
724 QualType FnArgType = TheCall->getArg(Arg: 1)->getType();
725 if (!FnArgType->isFunctionType() && !FnArgType->isFunctionPointerType() &&
726 !FnArgType->isBlockPointerType() &&
727 !(S.getLangOpts().CPlusPlus && FnArgType->isRecordType())) {
728 auto *BT = FnArgType->getAs<BuiltinType>();
729 switch (BT ? BT->getKind() : BuiltinType::Void) {
730 case BuiltinType::Dependent:
731 case BuiltinType::Overload:
732 case BuiltinType::BoundMember:
733 case BuiltinType::PseudoObject:
734 case BuiltinType::UnknownAny:
735 case BuiltinType::BuiltinFn:
736 // This might be a callable.
737 break;
738
739 default:
740 S.Diag(TheCall->getArg(1)->getBeginLoc(),
741 diag::err_expected_callable_argument)
742 << 2 << TheCall->getDirectCallee() << FnArgType;
743 return ExprError();
744 }
745 }
746
747 BuiltinDumpStructGenerator Generator(S, TheCall);
748
749 // Wrap parentheses around the given pointer. This is not necessary for
750 // correct code generation, but it means that when we pretty-print the call
751 // arguments in our diagnostics we will produce '(&s)->n' instead of the
752 // incorrect '&s->n'.
753 Expr *PtrArg = PtrArgResult.get();
754 PtrArg = new (S.Context)
755 ParenExpr(PtrArg->getBeginLoc(),
756 S.getLocForEndOfToken(Loc: PtrArg->getEndLoc()), PtrArg);
757 if (Generator.dumpUnnamedRecord(RD, E: PtrArg, Depth: 0))
758 return ExprError();
759
760 return Generator.buildWrapper();
761}
762
763static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) {
764 if (checkArgCount(S, Call: BuiltinCall, DesiredArgCount: 2))
765 return true;
766
767 SourceLocation BuiltinLoc = BuiltinCall->getBeginLoc();
768 Expr *Builtin = BuiltinCall->getCallee()->IgnoreImpCasts();
769 Expr *Call = BuiltinCall->getArg(Arg: 0);
770 Expr *Chain = BuiltinCall->getArg(Arg: 1);
771
772 if (Call->getStmtClass() != Stmt::CallExprClass) {
773 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_not_call)
774 << Call->getSourceRange();
775 return true;
776 }
777
778 auto CE = cast<CallExpr>(Val: Call);
779 if (CE->getCallee()->getType()->isBlockPointerType()) {
780 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_block_call)
781 << Call->getSourceRange();
782 return true;
783 }
784
785 const Decl *TargetDecl = CE->getCalleeDecl();
786 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(Val: TargetDecl))
787 if (FD->getBuiltinID()) {
788 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_builtin_call)
789 << Call->getSourceRange();
790 return true;
791 }
792
793 if (isa<CXXPseudoDestructorExpr>(Val: CE->getCallee()->IgnoreParens())) {
794 S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_pdtor_call)
795 << Call->getSourceRange();
796 return true;
797 }
798
799 ExprResult ChainResult = S.UsualUnaryConversions(E: Chain);
800 if (ChainResult.isInvalid())
801 return true;
802 if (!ChainResult.get()->getType()->isPointerType()) {
803 S.Diag(BuiltinLoc, diag::err_second_argument_to_cwsc_not_pointer)
804 << Chain->getSourceRange();
805 return true;
806 }
807
808 QualType ReturnTy = CE->getCallReturnType(Ctx: S.Context);
809 QualType ArgTys[2] = { ReturnTy, ChainResult.get()->getType() };
810 QualType BuiltinTy = S.Context.getFunctionType(
811 ResultTy: ReturnTy, Args: ArgTys, EPI: FunctionProtoType::ExtProtoInfo());
812 QualType BuiltinPtrTy = S.Context.getPointerType(T: BuiltinTy);
813
814 Builtin =
815 S.ImpCastExprToType(E: Builtin, Type: BuiltinPtrTy, CK: CK_BuiltinFnToFnPtr).get();
816
817 BuiltinCall->setType(CE->getType());
818 BuiltinCall->setValueKind(CE->getValueKind());
819 BuiltinCall->setObjectKind(CE->getObjectKind());
820 BuiltinCall->setCallee(Builtin);
821 BuiltinCall->setArg(Arg: 1, ArgExpr: ChainResult.get());
822
823 return false;
824}
825
826namespace {
827
828class ScanfDiagnosticFormatHandler
829 : public analyze_format_string::FormatStringHandler {
830 // Accepts the argument index (relative to the first destination index) of the
831 // argument whose size we want.
832 using ComputeSizeFunction =
833 llvm::function_ref<std::optional<llvm::APSInt>(unsigned)>;
834
835 // Accepts the argument index (relative to the first destination index), the
836 // destination size, and the source size).
837 using DiagnoseFunction =
838 llvm::function_ref<void(unsigned, unsigned, unsigned)>;
839
840 ComputeSizeFunction ComputeSizeArgument;
841 DiagnoseFunction Diagnose;
842
843public:
844 ScanfDiagnosticFormatHandler(ComputeSizeFunction ComputeSizeArgument,
845 DiagnoseFunction Diagnose)
846 : ComputeSizeArgument(ComputeSizeArgument), Diagnose(Diagnose) {}
847
848 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS,
849 const char *StartSpecifier,
850 unsigned specifierLen) override {
851 if (!FS.consumesDataArgument())
852 return true;
853
854 unsigned NulByte = 0;
855 switch ((FS.getConversionSpecifier().getKind())) {
856 default:
857 return true;
858 case analyze_format_string::ConversionSpecifier::sArg:
859 case analyze_format_string::ConversionSpecifier::ScanListArg:
860 NulByte = 1;
861 break;
862 case analyze_format_string::ConversionSpecifier::cArg:
863 break;
864 }
865
866 analyze_format_string::OptionalAmount FW = FS.getFieldWidth();
867 if (FW.getHowSpecified() !=
868 analyze_format_string::OptionalAmount::HowSpecified::Constant)
869 return true;
870
871 unsigned SourceSize = FW.getConstantAmount() + NulByte;
872
873 std::optional<llvm::APSInt> DestSizeAPS =
874 ComputeSizeArgument(FS.getArgIndex());
875 if (!DestSizeAPS)
876 return true;
877
878 unsigned DestSize = DestSizeAPS->getZExtValue();
879
880 if (DestSize < SourceSize)
881 Diagnose(FS.getArgIndex(), DestSize, SourceSize);
882
883 return true;
884 }
885};
886
887class EstimateSizeFormatHandler
888 : public analyze_format_string::FormatStringHandler {
889 size_t Size;
890 /// Whether the format string contains Linux kernel's format specifier
891 /// extension.
892 bool IsKernelCompatible = true;
893
894public:
895 EstimateSizeFormatHandler(StringRef Format)
896 : Size(std::min(a: Format.find(C: 0), b: Format.size()) +
897 1 /* null byte always written by sprintf */) {}
898
899 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS,
900 const char *, unsigned SpecifierLen,
901 const TargetInfo &) override {
902
903 const size_t FieldWidth = computeFieldWidth(FS);
904 const size_t Precision = computePrecision(FS);
905
906 // The actual format.
907 switch (FS.getConversionSpecifier().getKind()) {
908 // Just a char.
909 case analyze_format_string::ConversionSpecifier::cArg:
910 case analyze_format_string::ConversionSpecifier::CArg:
911 Size += std::max(a: FieldWidth, b: (size_t)1);
912 break;
913 // Just an integer.
914 case analyze_format_string::ConversionSpecifier::dArg:
915 case analyze_format_string::ConversionSpecifier::DArg:
916 case analyze_format_string::ConversionSpecifier::iArg:
917 case analyze_format_string::ConversionSpecifier::oArg:
918 case analyze_format_string::ConversionSpecifier::OArg:
919 case analyze_format_string::ConversionSpecifier::uArg:
920 case analyze_format_string::ConversionSpecifier::UArg:
921 case analyze_format_string::ConversionSpecifier::xArg:
922 case analyze_format_string::ConversionSpecifier::XArg:
923 Size += std::max(a: FieldWidth, b: Precision);
924 break;
925
926 // %g style conversion switches between %f or %e style dynamically.
927 // %g removes trailing zeros, and does not print decimal point if there are
928 // no digits that follow it. Thus %g can print a single digit.
929 // FIXME: If it is alternative form:
930 // For g and G conversions, trailing zeros are not removed from the result.
931 case analyze_format_string::ConversionSpecifier::gArg:
932 case analyze_format_string::ConversionSpecifier::GArg:
933 Size += 1;
934 break;
935
936 // Floating point number in the form '[+]ddd.ddd'.
937 case analyze_format_string::ConversionSpecifier::fArg:
938 case analyze_format_string::ConversionSpecifier::FArg:
939 Size += std::max(a: FieldWidth, b: 1 /* integer part */ +
940 (Precision ? 1 + Precision
941 : 0) /* period + decimal */);
942 break;
943
944 // Floating point number in the form '[-]d.ddde[+-]dd'.
945 case analyze_format_string::ConversionSpecifier::eArg:
946 case analyze_format_string::ConversionSpecifier::EArg:
947 Size +=
948 std::max(a: FieldWidth,
949 b: 1 /* integer part */ +
950 (Precision ? 1 + Precision : 0) /* period + decimal */ +
951 1 /* e or E letter */ + 2 /* exponent */);
952 break;
953
954 // Floating point number in the form '[-]0xh.hhhhp±dd'.
955 case analyze_format_string::ConversionSpecifier::aArg:
956 case analyze_format_string::ConversionSpecifier::AArg:
957 Size +=
958 std::max(a: FieldWidth,
959 b: 2 /* 0x */ + 1 /* integer part */ +
960 (Precision ? 1 + Precision : 0) /* period + decimal */ +
961 1 /* p or P letter */ + 1 /* + or - */ + 1 /* value */);
962 break;
963
964 // Just a string.
965 case analyze_format_string::ConversionSpecifier::sArg:
966 case analyze_format_string::ConversionSpecifier::SArg:
967 Size += FieldWidth;
968 break;
969
970 // Just a pointer in the form '0xddd'.
971 case analyze_format_string::ConversionSpecifier::pArg:
972 // Linux kernel has its own extesion for `%p` specifier.
973 // Kernel Document:
974 // https://docs.kernel.org/core-api/printk-formats.html#pointer-types
975 IsKernelCompatible = false;
976 Size += std::max(a: FieldWidth, b: 2 /* leading 0x */ + Precision);
977 break;
978
979 // A plain percent.
980 case analyze_format_string::ConversionSpecifier::PercentArg:
981 Size += 1;
982 break;
983
984 default:
985 break;
986 }
987
988 Size += FS.hasPlusPrefix() || FS.hasSpacePrefix();
989
990 if (FS.hasAlternativeForm()) {
991 switch (FS.getConversionSpecifier().getKind()) {
992 // For o conversion, it increases the precision, if and only if necessary,
993 // to force the first digit of the result to be a zero
994 // (if the value and precision are both 0, a single 0 is printed)
995 case analyze_format_string::ConversionSpecifier::oArg:
996 // For b conversion, a nonzero result has 0b prefixed to it.
997 case analyze_format_string::ConversionSpecifier::bArg:
998 // For x (or X) conversion, a nonzero result has 0x (or 0X) prefixed to
999 // it.
1000 case analyze_format_string::ConversionSpecifier::xArg:
1001 case analyze_format_string::ConversionSpecifier::XArg:
1002 // Note: even when the prefix is added, if
1003 // (prefix_width <= FieldWidth - formatted_length) holds,
1004 // the prefix does not increase the format
1005 // size. e.g.(("%#3x", 0xf) is "0xf")
1006
1007 // If the result is zero, o, b, x, X adds nothing.
1008 break;
1009 // For a, A, e, E, f, F, g, and G conversions,
1010 // the result of converting a floating-point number always contains a
1011 // decimal-point
1012 case analyze_format_string::ConversionSpecifier::aArg:
1013 case analyze_format_string::ConversionSpecifier::AArg:
1014 case analyze_format_string::ConversionSpecifier::eArg:
1015 case analyze_format_string::ConversionSpecifier::EArg:
1016 case analyze_format_string::ConversionSpecifier::fArg:
1017 case analyze_format_string::ConversionSpecifier::FArg:
1018 case analyze_format_string::ConversionSpecifier::gArg:
1019 case analyze_format_string::ConversionSpecifier::GArg:
1020 Size += (Precision ? 0 : 1);
1021 break;
1022 // For other conversions, the behavior is undefined.
1023 default:
1024 break;
1025 }
1026 }
1027 assert(SpecifierLen <= Size && "no underflow");
1028 Size -= SpecifierLen;
1029 return true;
1030 }
1031
1032 size_t getSizeLowerBound() const { return Size; }
1033 bool isKernelCompatible() const { return IsKernelCompatible; }
1034
1035private:
1036 static size_t computeFieldWidth(const analyze_printf::PrintfSpecifier &FS) {
1037 const analyze_format_string::OptionalAmount &FW = FS.getFieldWidth();
1038 size_t FieldWidth = 0;
1039 if (FW.getHowSpecified() == analyze_format_string::OptionalAmount::Constant)
1040 FieldWidth = FW.getConstantAmount();
1041 return FieldWidth;
1042 }
1043
1044 static size_t computePrecision(const analyze_printf::PrintfSpecifier &FS) {
1045 const analyze_format_string::OptionalAmount &FW = FS.getPrecision();
1046 size_t Precision = 0;
1047
1048 // See man 3 printf for default precision value based on the specifier.
1049 switch (FW.getHowSpecified()) {
1050 case analyze_format_string::OptionalAmount::NotSpecified:
1051 switch (FS.getConversionSpecifier().getKind()) {
1052 default:
1053 break;
1054 case analyze_format_string::ConversionSpecifier::dArg: // %d
1055 case analyze_format_string::ConversionSpecifier::DArg: // %D
1056 case analyze_format_string::ConversionSpecifier::iArg: // %i
1057 Precision = 1;
1058 break;
1059 case analyze_format_string::ConversionSpecifier::oArg: // %d
1060 case analyze_format_string::ConversionSpecifier::OArg: // %D
1061 case analyze_format_string::ConversionSpecifier::uArg: // %d
1062 case analyze_format_string::ConversionSpecifier::UArg: // %D
1063 case analyze_format_string::ConversionSpecifier::xArg: // %d
1064 case analyze_format_string::ConversionSpecifier::XArg: // %D
1065 Precision = 1;
1066 break;
1067 case analyze_format_string::ConversionSpecifier::fArg: // %f
1068 case analyze_format_string::ConversionSpecifier::FArg: // %F
1069 case analyze_format_string::ConversionSpecifier::eArg: // %e
1070 case analyze_format_string::ConversionSpecifier::EArg: // %E
1071 case analyze_format_string::ConversionSpecifier::gArg: // %g
1072 case analyze_format_string::ConversionSpecifier::GArg: // %G
1073 Precision = 6;
1074 break;
1075 case analyze_format_string::ConversionSpecifier::pArg: // %d
1076 Precision = 1;
1077 break;
1078 }
1079 break;
1080 case analyze_format_string::OptionalAmount::Constant:
1081 Precision = FW.getConstantAmount();
1082 break;
1083 default:
1084 break;
1085 }
1086 return Precision;
1087 }
1088};
1089
1090} // namespace
1091
1092static bool ProcessFormatStringLiteral(const Expr *FormatExpr,
1093 StringRef &FormatStrRef, size_t &StrLen,
1094 ASTContext &Context) {
1095 if (const auto *Format = dyn_cast<StringLiteral>(Val: FormatExpr);
1096 Format && (Format->isOrdinary() || Format->isUTF8())) {
1097 FormatStrRef = Format->getString();
1098 const ConstantArrayType *T =
1099 Context.getAsConstantArrayType(T: Format->getType());
1100 assert(T && "String literal not of constant array type!");
1101 size_t TypeSize = T->getSize().getZExtValue();
1102 // In case there's a null byte somewhere.
1103 StrLen = std::min(a: std::max(a: TypeSize, b: size_t(1)) - 1, b: FormatStrRef.find(C: 0));
1104 return true;
1105 }
1106 return false;
1107}
1108
1109void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD,
1110 CallExpr *TheCall) {
1111 if (TheCall->isValueDependent() || TheCall->isTypeDependent() ||
1112 isConstantEvaluatedContext())
1113 return;
1114
1115 bool UseDABAttr = false;
1116 const FunctionDecl *UseDecl = FD;
1117
1118 const auto *DABAttr = FD->getAttr<DiagnoseAsBuiltinAttr>();
1119 if (DABAttr) {
1120 UseDecl = DABAttr->getFunction();
1121 assert(UseDecl && "Missing FunctionDecl in DiagnoseAsBuiltin attribute!");
1122 UseDABAttr = true;
1123 }
1124
1125 unsigned BuiltinID = UseDecl->getBuiltinID(/*ConsiderWrappers=*/ConsiderWrapperFunctions: true);
1126
1127 if (!BuiltinID)
1128 return;
1129
1130 const TargetInfo &TI = getASTContext().getTargetInfo();
1131 unsigned SizeTypeWidth = TI.getTypeWidth(T: TI.getSizeType());
1132
1133 auto TranslateIndex = [&](unsigned Index) -> std::optional<unsigned> {
1134 // If we refer to a diagnose_as_builtin attribute, we need to change the
1135 // argument index to refer to the arguments of the called function. Unless
1136 // the index is out of bounds, which presumably means it's a variadic
1137 // function.
1138 if (!UseDABAttr)
1139 return Index;
1140 unsigned DABIndices = DABAttr->argIndices_size();
1141 unsigned NewIndex = Index < DABIndices
1142 ? DABAttr->argIndices_begin()[Index]
1143 : Index - DABIndices + FD->getNumParams();
1144 if (NewIndex >= TheCall->getNumArgs())
1145 return std::nullopt;
1146 return NewIndex;
1147 };
1148
1149 auto ComputeExplicitObjectSizeArgument =
1150 [&](unsigned Index) -> std::optional<llvm::APSInt> {
1151 std::optional<unsigned> IndexOptional = TranslateIndex(Index);
1152 if (!IndexOptional)
1153 return std::nullopt;
1154 unsigned NewIndex = *IndexOptional;
1155 Expr::EvalResult Result;
1156 Expr *SizeArg = TheCall->getArg(Arg: NewIndex);
1157 if (!SizeArg->EvaluateAsInt(Result, Ctx: getASTContext()))
1158 return std::nullopt;
1159 llvm::APSInt Integer = Result.Val.getInt();
1160 Integer.setIsUnsigned(true);
1161 return Integer;
1162 };
1163
1164 auto ComputeSizeArgument =
1165 [&](unsigned Index) -> std::optional<llvm::APSInt> {
1166 // If the parameter has a pass_object_size attribute, then we should use its
1167 // (potentially) more strict checking mode. Otherwise, conservatively assume
1168 // type 0.
1169 int BOSType = 0;
1170 // This check can fail for variadic functions.
1171 if (Index < FD->getNumParams()) {
1172 if (const auto *POS =
1173 FD->getParamDecl(Index)->getAttr<PassObjectSizeAttr>())
1174 BOSType = POS->getType();
1175 }
1176
1177 std::optional<unsigned> IndexOptional = TranslateIndex(Index);
1178 if (!IndexOptional)
1179 return std::nullopt;
1180 unsigned NewIndex = *IndexOptional;
1181
1182 if (NewIndex >= TheCall->getNumArgs())
1183 return std::nullopt;
1184
1185 const Expr *ObjArg = TheCall->getArg(Arg: NewIndex);
1186 uint64_t Result;
1187 if (!ObjArg->tryEvaluateObjectSize(Result, Ctx&: getASTContext(), Type: BOSType))
1188 return std::nullopt;
1189
1190 // Get the object size in the target's size_t width.
1191 return llvm::APSInt::getUnsigned(X: Result).extOrTrunc(width: SizeTypeWidth);
1192 };
1193
1194 auto ComputeStrLenArgument =
1195 [&](unsigned Index) -> std::optional<llvm::APSInt> {
1196 std::optional<unsigned> IndexOptional = TranslateIndex(Index);
1197 if (!IndexOptional)
1198 return std::nullopt;
1199 unsigned NewIndex = *IndexOptional;
1200
1201 const Expr *ObjArg = TheCall->getArg(Arg: NewIndex);
1202 uint64_t Result;
1203 if (!ObjArg->tryEvaluateStrLen(Result, Ctx&: getASTContext()))
1204 return std::nullopt;
1205 // Add 1 for null byte.
1206 return llvm::APSInt::getUnsigned(X: Result + 1).extOrTrunc(width: SizeTypeWidth);
1207 };
1208
1209 std::optional<llvm::APSInt> SourceSize;
1210 std::optional<llvm::APSInt> DestinationSize;
1211 unsigned DiagID = 0;
1212 bool IsChkVariant = false;
1213
1214 auto GetFunctionName = [&]() {
1215 StringRef FunctionName = getASTContext().BuiltinInfo.getName(ID: BuiltinID);
1216 // Skim off the details of whichever builtin was called to produce a better
1217 // diagnostic, as it's unlikely that the user wrote the __builtin
1218 // explicitly.
1219 if (IsChkVariant) {
1220 FunctionName = FunctionName.drop_front(N: std::strlen(s: "__builtin___"));
1221 FunctionName = FunctionName.drop_back(N: std::strlen(s: "_chk"));
1222 } else {
1223 FunctionName.consume_front(Prefix: "__builtin_");
1224 }
1225 return FunctionName;
1226 };
1227
1228 switch (BuiltinID) {
1229 default:
1230 return;
1231 case Builtin::BI__builtin_strcpy:
1232 case Builtin::BIstrcpy: {
1233 DiagID = diag::warn_fortify_strlen_overflow;
1234 SourceSize = ComputeStrLenArgument(1);
1235 DestinationSize = ComputeSizeArgument(0);
1236 break;
1237 }
1238
1239 case Builtin::BI__builtin___strcpy_chk: {
1240 DiagID = diag::warn_fortify_strlen_overflow;
1241 SourceSize = ComputeStrLenArgument(1);
1242 DestinationSize = ComputeExplicitObjectSizeArgument(2);
1243 IsChkVariant = true;
1244 break;
1245 }
1246
1247 case Builtin::BIscanf:
1248 case Builtin::BIfscanf:
1249 case Builtin::BIsscanf: {
1250 unsigned FormatIndex = 1;
1251 unsigned DataIndex = 2;
1252 if (BuiltinID == Builtin::BIscanf) {
1253 FormatIndex = 0;
1254 DataIndex = 1;
1255 }
1256
1257 const auto *FormatExpr =
1258 TheCall->getArg(Arg: FormatIndex)->IgnoreParenImpCasts();
1259
1260 StringRef FormatStrRef;
1261 size_t StrLen;
1262 if (!ProcessFormatStringLiteral(FormatExpr, FormatStrRef, StrLen, Context))
1263 return;
1264
1265 auto Diagnose = [&](unsigned ArgIndex, unsigned DestSize,
1266 unsigned SourceSize) {
1267 DiagID = diag::warn_fortify_scanf_overflow;
1268 unsigned Index = ArgIndex + DataIndex;
1269 StringRef FunctionName = GetFunctionName();
1270 DiagRuntimeBehavior(TheCall->getArg(Arg: Index)->getBeginLoc(), TheCall,
1271 PDiag(DiagID) << FunctionName << (Index + 1)
1272 << DestSize << SourceSize);
1273 };
1274
1275 auto ShiftedComputeSizeArgument = [&](unsigned Index) {
1276 return ComputeSizeArgument(Index + DataIndex);
1277 };
1278 ScanfDiagnosticFormatHandler H(ShiftedComputeSizeArgument, Diagnose);
1279 const char *FormatBytes = FormatStrRef.data();
1280 analyze_format_string::ParseScanfString(H, beg: FormatBytes,
1281 end: FormatBytes + StrLen, LO: getLangOpts(),
1282 Target: Context.getTargetInfo());
1283
1284 // Unlike the other cases, in this one we have already issued the diagnostic
1285 // here, so no need to continue (because unlike the other cases, here the
1286 // diagnostic refers to the argument number).
1287 return;
1288 }
1289
1290 case Builtin::BIsprintf:
1291 case Builtin::BI__builtin___sprintf_chk: {
1292 size_t FormatIndex = BuiltinID == Builtin::BIsprintf ? 1 : 3;
1293 auto *FormatExpr = TheCall->getArg(Arg: FormatIndex)->IgnoreParenImpCasts();
1294
1295 StringRef FormatStrRef;
1296 size_t StrLen;
1297 if (ProcessFormatStringLiteral(FormatExpr, FormatStrRef, StrLen, Context)) {
1298 EstimateSizeFormatHandler H(FormatStrRef);
1299 const char *FormatBytes = FormatStrRef.data();
1300 if (!analyze_format_string::ParsePrintfString(
1301 H, beg: FormatBytes, end: FormatBytes + StrLen, LO: getLangOpts(),
1302 Target: Context.getTargetInfo(), isFreeBSDKPrintf: false)) {
1303 DiagID = H.isKernelCompatible()
1304 ? diag::warn_format_overflow
1305 : diag::warn_format_overflow_non_kprintf;
1306 SourceSize = llvm::APSInt::getUnsigned(X: H.getSizeLowerBound())
1307 .extOrTrunc(width: SizeTypeWidth);
1308 if (BuiltinID == Builtin::BI__builtin___sprintf_chk) {
1309 DestinationSize = ComputeExplicitObjectSizeArgument(2);
1310 IsChkVariant = true;
1311 } else {
1312 DestinationSize = ComputeSizeArgument(0);
1313 }
1314 break;
1315 }
1316 }
1317 return;
1318 }
1319 case Builtin::BI__builtin___memcpy_chk:
1320 case Builtin::BI__builtin___memmove_chk:
1321 case Builtin::BI__builtin___memset_chk:
1322 case Builtin::BI__builtin___strlcat_chk:
1323 case Builtin::BI__builtin___strlcpy_chk:
1324 case Builtin::BI__builtin___strncat_chk:
1325 case Builtin::BI__builtin___strncpy_chk:
1326 case Builtin::BI__builtin___stpncpy_chk:
1327 case Builtin::BI__builtin___memccpy_chk:
1328 case Builtin::BI__builtin___mempcpy_chk: {
1329 DiagID = diag::warn_builtin_chk_overflow;
1330 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 2);
1331 DestinationSize =
1332 ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1);
1333 IsChkVariant = true;
1334 break;
1335 }
1336
1337 case Builtin::BI__builtin___snprintf_chk:
1338 case Builtin::BI__builtin___vsnprintf_chk: {
1339 DiagID = diag::warn_builtin_chk_overflow;
1340 SourceSize = ComputeExplicitObjectSizeArgument(1);
1341 DestinationSize = ComputeExplicitObjectSizeArgument(3);
1342 IsChkVariant = true;
1343 break;
1344 }
1345
1346 case Builtin::BIstrncat:
1347 case Builtin::BI__builtin_strncat:
1348 case Builtin::BIstrncpy:
1349 case Builtin::BI__builtin_strncpy:
1350 case Builtin::BIstpncpy:
1351 case Builtin::BI__builtin_stpncpy: {
1352 // Whether these functions overflow depends on the runtime strlen of the
1353 // string, not just the buffer size, so emitting the "always overflow"
1354 // diagnostic isn't quite right. We should still diagnose passing a buffer
1355 // size larger than the destination buffer though; this is a runtime abort
1356 // in _FORTIFY_SOURCE mode, and is quite suspicious otherwise.
1357 DiagID = diag::warn_fortify_source_size_mismatch;
1358 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1);
1359 DestinationSize = ComputeSizeArgument(0);
1360 break;
1361 }
1362
1363 case Builtin::BImemcpy:
1364 case Builtin::BI__builtin_memcpy:
1365 case Builtin::BImemmove:
1366 case Builtin::BI__builtin_memmove:
1367 case Builtin::BImemset:
1368 case Builtin::BI__builtin_memset:
1369 case Builtin::BImempcpy:
1370 case Builtin::BI__builtin_mempcpy: {
1371 DiagID = diag::warn_fortify_source_overflow;
1372 SourceSize = ComputeExplicitObjectSizeArgument(TheCall->getNumArgs() - 1);
1373 DestinationSize = ComputeSizeArgument(0);
1374 break;
1375 }
1376 case Builtin::BIsnprintf:
1377 case Builtin::BI__builtin_snprintf:
1378 case Builtin::BIvsnprintf:
1379 case Builtin::BI__builtin_vsnprintf: {
1380 DiagID = diag::warn_fortify_source_size_mismatch;
1381 SourceSize = ComputeExplicitObjectSizeArgument(1);
1382 const auto *FormatExpr = TheCall->getArg(Arg: 2)->IgnoreParenImpCasts();
1383 StringRef FormatStrRef;
1384 size_t StrLen;
1385 if (SourceSize &&
1386 ProcessFormatStringLiteral(FormatExpr, FormatStrRef, StrLen, Context)) {
1387 EstimateSizeFormatHandler H(FormatStrRef);
1388 const char *FormatBytes = FormatStrRef.data();
1389 if (!analyze_format_string::ParsePrintfString(
1390 H, beg: FormatBytes, end: FormatBytes + StrLen, LO: getLangOpts(),
1391 Target: Context.getTargetInfo(), /*isFreeBSDKPrintf=*/false)) {
1392 llvm::APSInt FormatSize =
1393 llvm::APSInt::getUnsigned(X: H.getSizeLowerBound())
1394 .extOrTrunc(width: SizeTypeWidth);
1395 if (FormatSize > *SourceSize && *SourceSize != 0) {
1396 unsigned TruncationDiagID =
1397 H.isKernelCompatible() ? diag::warn_format_truncation
1398 : diag::warn_format_truncation_non_kprintf;
1399 SmallString<16> SpecifiedSizeStr;
1400 SmallString<16> FormatSizeStr;
1401 SourceSize->toString(Str&: SpecifiedSizeStr, /*Radix=*/10);
1402 FormatSize.toString(Str&: FormatSizeStr, /*Radix=*/10);
1403 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall,
1404 PDiag(DiagID: TruncationDiagID)
1405 << GetFunctionName() << SpecifiedSizeStr
1406 << FormatSizeStr);
1407 }
1408 }
1409 }
1410 DestinationSize = ComputeSizeArgument(0);
1411 }
1412 }
1413
1414 if (!SourceSize || !DestinationSize ||
1415 llvm::APSInt::compareValues(I1: *SourceSize, I2: *DestinationSize) <= 0)
1416 return;
1417
1418 StringRef FunctionName = GetFunctionName();
1419
1420 SmallString<16> DestinationStr;
1421 SmallString<16> SourceStr;
1422 DestinationSize->toString(Str&: DestinationStr, /*Radix=*/10);
1423 SourceSize->toString(Str&: SourceStr, /*Radix=*/10);
1424 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall,
1425 PDiag(DiagID)
1426 << FunctionName << DestinationStr << SourceStr);
1427}
1428
1429static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall,
1430 Scope::ScopeFlags NeededScopeFlags,
1431 unsigned DiagID) {
1432 // Scopes aren't available during instantiation. Fortunately, builtin
1433 // functions cannot be template args so they cannot be formed through template
1434 // instantiation. Therefore checking once during the parse is sufficient.
1435 if (SemaRef.inTemplateInstantiation())
1436 return false;
1437
1438 Scope *S = SemaRef.getCurScope();
1439 while (S && !S->isSEHExceptScope())
1440 S = S->getParent();
1441 if (!S || !(S->getFlags() & NeededScopeFlags)) {
1442 auto *DRE = cast<DeclRefExpr>(Val: TheCall->getCallee()->IgnoreParenCasts());
1443 SemaRef.Diag(TheCall->getExprLoc(), DiagID)
1444 << DRE->getDecl()->getIdentifier();
1445 return true;
1446 }
1447
1448 return false;
1449}
1450
1451static inline bool isBlockPointer(Expr *Arg) {
1452 return Arg->getType()->isBlockPointerType();
1453}
1454
1455/// OpenCL C v2.0, s6.13.17.2 - Checks that the block parameters are all local
1456/// void*, which is a requirement of device side enqueue.
1457static bool checkOpenCLBlockArgs(Sema &S, Expr *BlockArg) {
1458 const BlockPointerType *BPT =
1459 cast<BlockPointerType>(Val: BlockArg->getType().getCanonicalType());
1460 ArrayRef<QualType> Params =
1461 BPT->getPointeeType()->castAs<FunctionProtoType>()->getParamTypes();
1462 unsigned ArgCounter = 0;
1463 bool IllegalParams = false;
1464 // Iterate through the block parameters until either one is found that is not
1465 // a local void*, or the block is valid.
1466 for (ArrayRef<QualType>::iterator I = Params.begin(), E = Params.end();
1467 I != E; ++I, ++ArgCounter) {
1468 if (!(*I)->isPointerType() || !(*I)->getPointeeType()->isVoidType() ||
1469 (*I)->getPointeeType().getQualifiers().getAddressSpace() !=
1470 LangAS::opencl_local) {
1471 // Get the location of the error. If a block literal has been passed
1472 // (BlockExpr) then we can point straight to the offending argument,
1473 // else we just point to the variable reference.
1474 SourceLocation ErrorLoc;
1475 if (isa<BlockExpr>(Val: BlockArg)) {
1476 BlockDecl *BD = cast<BlockExpr>(Val: BlockArg)->getBlockDecl();
1477 ErrorLoc = BD->getParamDecl(i: ArgCounter)->getBeginLoc();
1478 } else if (isa<DeclRefExpr>(Val: BlockArg)) {
1479 ErrorLoc = cast<DeclRefExpr>(Val: BlockArg)->getBeginLoc();
1480 }
1481 S.Diag(ErrorLoc,
1482 diag::err_opencl_enqueue_kernel_blocks_non_local_void_args);
1483 IllegalParams = true;
1484 }
1485 }
1486
1487 return IllegalParams;
1488}
1489
1490static bool checkOpenCLSubgroupExt(Sema &S, CallExpr *Call) {
1491 // OpenCL device can support extension but not the feature as extension
1492 // requires subgroup independent forward progress, but subgroup independent
1493 // forward progress is optional in OpenCL C 3.0 __opencl_c_subgroups feature.
1494 if (!S.getOpenCLOptions().isSupported(Ext: "cl_khr_subgroups", LO: S.getLangOpts()) &&
1495 !S.getOpenCLOptions().isSupported(Ext: "__opencl_c_subgroups",
1496 LO: S.getLangOpts())) {
1497 S.Diag(Call->getBeginLoc(), diag::err_opencl_requires_extension)
1498 << 1 << Call->getDirectCallee()
1499 << "cl_khr_subgroups or __opencl_c_subgroups";
1500 return true;
1501 }
1502 return false;
1503}
1504
1505static bool SemaOpenCLBuiltinNDRangeAndBlock(Sema &S, CallExpr *TheCall) {
1506 if (checkArgCount(S, Call: TheCall, DesiredArgCount: 2))
1507 return true;
1508
1509 if (checkOpenCLSubgroupExt(S, Call: TheCall))
1510 return true;
1511
1512 // First argument is an ndrange_t type.
1513 Expr *NDRangeArg = TheCall->getArg(Arg: 0);
1514 if (NDRangeArg->getType().getUnqualifiedType().getAsString() != "ndrange_t") {
1515 S.Diag(NDRangeArg->getBeginLoc(), diag::err_opencl_builtin_expected_type)
1516 << TheCall->getDirectCallee() << "'ndrange_t'";
1517 return true;
1518 }
1519
1520 Expr *BlockArg = TheCall->getArg(Arg: 1);
1521 if (!isBlockPointer(Arg: BlockArg)) {
1522 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type)
1523 << TheCall->getDirectCallee() << "block";
1524 return true;
1525 }
1526 return checkOpenCLBlockArgs(S, BlockArg);
1527}
1528
1529/// OpenCL C v2.0, s6.13.17.6 - Check the argument to the
1530/// get_kernel_work_group_size
1531/// and get_kernel_preferred_work_group_size_multiple builtin functions.
1532static bool SemaOpenCLBuiltinKernelWorkGroupSize(Sema &S, CallExpr *TheCall) {
1533 if (checkArgCount(S, Call: TheCall, DesiredArgCount: 1))
1534 return true;
1535
1536 Expr *BlockArg = TheCall->getArg(Arg: 0);
1537 if (!isBlockPointer(Arg: BlockArg)) {
1538 S.Diag(BlockArg->getBeginLoc(), diag::err_opencl_builtin_expected_type)
1539 << TheCall->getDirectCallee() << "block";
1540 return true;
1541 }
1542 return checkOpenCLBlockArgs(S, BlockArg);
1543}
1544
1545/// Diagnose integer type and any valid implicit conversion to it.
1546static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E,
1547 const QualType &IntType);
1548
1549static bool checkOpenCLEnqueueLocalSizeArgs(Sema &S, CallExpr *TheCall,
1550 unsigned Start, unsigned End) {
1551 bool IllegalParams = false;
1552 for (unsigned I = Start; I <= End; ++I)
1553 IllegalParams |= checkOpenCLEnqueueIntType(S, TheCall->getArg(Arg: I),
1554 S.Context.getSizeType());
1555 return IllegalParams;
1556}
1557
1558/// OpenCL v2.0, s6.13.17.1 - Check that sizes are provided for all
1559/// 'local void*' parameter of passed block.
1560static bool checkOpenCLEnqueueVariadicArgs(Sema &S, CallExpr *TheCall,
1561 Expr *BlockArg,
1562 unsigned NumNonVarArgs) {
1563 const BlockPointerType *BPT =
1564 cast<BlockPointerType>(Val: BlockArg->getType().getCanonicalType());
1565 unsigned NumBlockParams =
1566 BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams();
1567 unsigned TotalNumArgs = TheCall->getNumArgs();
1568
1569 // For each argument passed to the block, a corresponding uint needs to
1570 // be passed to describe the size of the local memory.
1571 if (TotalNumArgs != NumBlockParams + NumNonVarArgs) {
1572 S.Diag(TheCall->getBeginLoc(),
1573 diag::err_opencl_enqueue_kernel_local_size_args);
1574 return true;
1575 }
1576
1577 // Check that the sizes of the local memory are specified by integers.
1578 return checkOpenCLEnqueueLocalSizeArgs(S, TheCall, Start: NumNonVarArgs,
1579 End: TotalNumArgs - 1);
1580}
1581
1582/// OpenCL C v2.0, s6.13.17 - Enqueue kernel function contains four different
1583/// overload formats specified in Table 6.13.17.1.
1584/// int enqueue_kernel(queue_t queue,
1585/// kernel_enqueue_flags_t flags,
1586/// const ndrange_t ndrange,
1587/// void (^block)(void))
1588/// int enqueue_kernel(queue_t queue,
1589/// kernel_enqueue_flags_t flags,
1590/// const ndrange_t ndrange,
1591/// uint num_events_in_wait_list,
1592/// clk_event_t *event_wait_list,
1593/// clk_event_t *event_ret,
1594/// void (^block)(void))
1595/// int enqueue_kernel(queue_t queue,
1596/// kernel_enqueue_flags_t flags,
1597/// const ndrange_t ndrange,
1598/// void (^block)(local void*, ...),
1599/// uint size0, ...)
1600/// int enqueue_kernel(queue_t queue,
1601/// kernel_enqueue_flags_t flags,
1602/// const ndrange_t ndrange,
1603/// uint num_events_in_wait_list,
1604/// clk_event_t *event_wait_list,
1605/// clk_event_t *event_ret,
1606/// void (^block)(local void*, ...),
1607/// uint size0, ...)
1608static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) {
1609 unsigned NumArgs = TheCall->getNumArgs();
1610
1611 if (NumArgs < 4) {
1612 S.Diag(TheCall->getBeginLoc(),
1613 diag::err_typecheck_call_too_few_args_at_least)
1614 << 0 << 4 << NumArgs << /*is non object*/ 0;
1615 return true;
1616 }
1617
1618 Expr *Arg0 = TheCall->getArg(Arg: 0);
1619 Expr *Arg1 = TheCall->getArg(Arg: 1);
1620 Expr *Arg2 = TheCall->getArg(Arg: 2);
1621 Expr *Arg3 = TheCall->getArg(Arg: 3);
1622
1623 // First argument always needs to be a queue_t type.
1624 if (!Arg0->getType()->isQueueT()) {
1625 S.Diag(TheCall->getArg(0)->getBeginLoc(),
1626 diag::err_opencl_builtin_expected_type)
1627 << TheCall->getDirectCallee() << S.Context.OCLQueueTy;
1628 return true;
1629 }
1630
1631 // Second argument always needs to be a kernel_enqueue_flags_t enum value.
1632 if (!Arg1->getType()->isIntegerType()) {
1633 S.Diag(TheCall->getArg(1)->getBeginLoc(),
1634 diag::err_opencl_builtin_expected_type)
1635 << TheCall->getDirectCallee() << "'kernel_enqueue_flags_t' (i.e. uint)";
1636 return true;
1637 }
1638
1639 // Third argument is always an ndrange_t type.
1640 if (Arg2->getType().getUnqualifiedType().getAsString() != "ndrange_t") {
1641 S.Diag(TheCall->getArg(2)->getBeginLoc(),
1642 diag::err_opencl_builtin_expected_type)
1643 << TheCall->getDirectCallee() << "'ndrange_t'";
1644 return true;
1645 }
1646
1647 // With four arguments, there is only one form that the function could be
1648 // called in: no events and no variable arguments.
1649 if (NumArgs == 4) {
1650 // check that the last argument is the right block type.
1651 if (!isBlockPointer(Arg: Arg3)) {
1652 S.Diag(Arg3->getBeginLoc(), diag::err_opencl_builtin_expected_type)
1653 << TheCall->getDirectCallee() << "block";
1654 return true;
1655 }
1656 // we have a block type, check the prototype
1657 const BlockPointerType *BPT =
1658 cast<BlockPointerType>(Val: Arg3->getType().getCanonicalType());
1659 if (BPT->getPointeeType()->castAs<FunctionProtoType>()->getNumParams() > 0) {
1660 S.Diag(Arg3->getBeginLoc(),
1661 diag::err_opencl_enqueue_kernel_blocks_no_args);
1662 return true;
1663 }
1664 return false;
1665 }
1666 // we can have block + varargs.
1667 if (isBlockPointer(Arg: Arg3))
1668 return (checkOpenCLBlockArgs(S, BlockArg: Arg3) ||
1669 checkOpenCLEnqueueVariadicArgs(S, TheCall, BlockArg: Arg3, NumNonVarArgs: 4));
1670 // last two cases with either exactly 7 args or 7 args and varargs.
1671 if (NumArgs >= 7) {
1672 // check common block argument.
1673 Expr *Arg6 = TheCall->getArg(Arg: 6);
1674 if (!isBlockPointer(Arg: Arg6)) {
1675 S.Diag(Arg6->getBeginLoc(), diag::err_opencl_builtin_expected_type)
1676 << TheCall->getDirectCallee() << "block";
1677 return true;
1678 }
1679 if (checkOpenCLBlockArgs(S, BlockArg: Arg6))
1680 return true;
1681
1682 // Forth argument has to be any integer type.
1683 if (!Arg3->getType()->isIntegerType()) {
1684 S.Diag(TheCall->getArg(3)->getBeginLoc(),
1685 diag::err_opencl_builtin_expected_type)
1686 << TheCall->getDirectCallee() << "integer";
1687 return true;
1688 }
1689 // check remaining common arguments.
1690 Expr *Arg4 = TheCall->getArg(Arg: 4);
1691 Expr *Arg5 = TheCall->getArg(Arg: 5);
1692
1693 // Fifth argument is always passed as a pointer to clk_event_t.
1694 if (!Arg4->isNullPointerConstant(Ctx&: S.Context,
1695 NPC: Expr::NPC_ValueDependentIsNotNull) &&
1696 !Arg4->getType()->getPointeeOrArrayElementType()->isClkEventT()) {
1697 S.Diag(TheCall->getArg(4)->getBeginLoc(),
1698 diag::err_opencl_builtin_expected_type)
1699 << TheCall->getDirectCallee()
1700 << S.Context.getPointerType(S.Context.OCLClkEventTy);
1701 return true;
1702 }
1703
1704 // Sixth argument is always passed as a pointer to clk_event_t.
1705 if (!Arg5->isNullPointerConstant(Ctx&: S.Context,
1706 NPC: Expr::NPC_ValueDependentIsNotNull) &&
1707 !(Arg5->getType()->isPointerType() &&
1708 Arg5->getType()->getPointeeType()->isClkEventT())) {
1709 S.Diag(TheCall->getArg(5)->getBeginLoc(),
1710 diag::err_opencl_builtin_expected_type)
1711 << TheCall->getDirectCallee()
1712 << S.Context.getPointerType(S.Context.OCLClkEventTy);
1713 return true;
1714 }
1715
1716 if (NumArgs == 7)
1717 return false;
1718
1719 return checkOpenCLEnqueueVariadicArgs(S, TheCall, BlockArg: Arg6, NumNonVarArgs: 7);
1720 }
1721
1722 // None of the specific case has been detected, give generic error
1723 S.Diag(TheCall->getBeginLoc(),
1724 diag::err_opencl_enqueue_kernel_incorrect_args);
1725 return true;
1726}
1727
1728/// Returns OpenCL access qual.
1729static OpenCLAccessAttr *getOpenCLArgAccess(const Decl *D) {
1730 return D->getAttr<OpenCLAccessAttr>();
1731}
1732
1733/// Returns true if pipe element type is different from the pointer.
1734static bool checkOpenCLPipeArg(Sema &S, CallExpr *Call) {
1735 const Expr *Arg0 = Call->getArg(Arg: 0);
1736 // First argument type should always be pipe.
1737 if (!Arg0->getType()->isPipeType()) {
1738 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg)
1739 << Call->getDirectCallee() << Arg0->getSourceRange();
1740 return true;
1741 }
1742 OpenCLAccessAttr *AccessQual =
1743 getOpenCLArgAccess(cast<DeclRefExpr>(Arg0)->getDecl());
1744 // Validates the access qualifier is compatible with the call.
1745 // OpenCL v2.0 s6.13.16 - The access qualifiers for pipe should only be
1746 // read_only and write_only, and assumed to be read_only if no qualifier is
1747 // specified.
1748 switch (Call->getDirectCallee()->getBuiltinID()) {
1749 case Builtin::BIread_pipe:
1750 case Builtin::BIreserve_read_pipe:
1751 case Builtin::BIcommit_read_pipe:
1752 case Builtin::BIwork_group_reserve_read_pipe:
1753 case Builtin::BIsub_group_reserve_read_pipe:
1754 case Builtin::BIwork_group_commit_read_pipe:
1755 case Builtin::BIsub_group_commit_read_pipe:
1756 if (!(!AccessQual || AccessQual->isReadOnly())) {
1757 S.Diag(Arg0->getBeginLoc(),
1758 diag::err_opencl_builtin_pipe_invalid_access_modifier)
1759 << "read_only" << Arg0->getSourceRange();
1760 return true;
1761 }
1762 break;
1763 case Builtin::BIwrite_pipe:
1764 case Builtin::BIreserve_write_pipe:
1765 case Builtin::BIcommit_write_pipe:
1766 case Builtin::BIwork_group_reserve_write_pipe:
1767 case Builtin::BIsub_group_reserve_write_pipe:
1768 case Builtin::BIwork_group_commit_write_pipe:
1769 case Builtin::BIsub_group_commit_write_pipe:
1770 if (!(AccessQual && AccessQual->isWriteOnly())) {
1771 S.Diag(Arg0->getBeginLoc(),
1772 diag::err_opencl_builtin_pipe_invalid_access_modifier)
1773 << "write_only" << Arg0->getSourceRange();
1774 return true;
1775 }
1776 break;
1777 default:
1778 break;
1779 }
1780 return false;
1781}
1782
1783/// Returns true if pipe element type is different from the pointer.
1784static bool checkOpenCLPipePacketType(Sema &S, CallExpr *Call, unsigned Idx) {
1785 const Expr *Arg0 = Call->getArg(Arg: 0);
1786 const Expr *ArgIdx = Call->getArg(Arg: Idx);
1787 const PipeType *PipeTy = cast<PipeType>(Val: Arg0->getType());
1788 const QualType EltTy = PipeTy->getElementType();
1789 const PointerType *ArgTy = ArgIdx->getType()->getAs<PointerType>();
1790 // The Idx argument should be a pointer and the type of the pointer and
1791 // the type of pipe element should also be the same.
1792 if (!ArgTy ||
1793 !S.Context.hasSameType(
1794 T1: EltTy, T2: ArgTy->getPointeeType()->getCanonicalTypeInternal())) {
1795 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
1796 << Call->getDirectCallee() << S.Context.getPointerType(EltTy)
1797 << ArgIdx->getType() << ArgIdx->getSourceRange();
1798 return true;
1799 }
1800 return false;
1801}
1802
1803// Performs semantic analysis for the read/write_pipe call.
1804// \param S Reference to the semantic analyzer.
1805// \param Call A pointer to the builtin call.
1806// \return True if a semantic error has been found, false otherwise.
1807static bool SemaBuiltinRWPipe(Sema &S, CallExpr *Call) {
1808 // OpenCL v2.0 s6.13.16.2 - The built-in read/write
1809 // functions have two forms.
1810 switch (Call->getNumArgs()) {
1811 case 2:
1812 if (checkOpenCLPipeArg(S, Call))
1813 return true;
1814 // The call with 2 arguments should be
1815 // read/write_pipe(pipe T, T*).
1816 // Check packet type T.
1817 if (checkOpenCLPipePacketType(S, Call, Idx: 1))
1818 return true;
1819 break;
1820
1821 case 4: {
1822 if (checkOpenCLPipeArg(S, Call))
1823 return true;
1824 // The call with 4 arguments should be
1825 // read/write_pipe(pipe T, reserve_id_t, uint, T*).
1826 // Check reserve_id_t.
1827 if (!Call->getArg(Arg: 1)->getType()->isReserveIDT()) {
1828 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
1829 << Call->getDirectCallee() << S.Context.OCLReserveIDTy
1830 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
1831 return true;
1832 }
1833
1834 // Check the index.
1835 const Expr *Arg2 = Call->getArg(Arg: 2);
1836 if (!Arg2->getType()->isIntegerType() &&
1837 !Arg2->getType()->isUnsignedIntegerType()) {
1838 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
1839 << Call->getDirectCallee() << S.Context.UnsignedIntTy
1840 << Arg2->getType() << Arg2->getSourceRange();
1841 return true;
1842 }
1843
1844 // Check packet type T.
1845 if (checkOpenCLPipePacketType(S, Call, Idx: 3))
1846 return true;
1847 } break;
1848 default:
1849 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_arg_num)
1850 << Call->getDirectCallee() << Call->getSourceRange();
1851 return true;
1852 }
1853
1854 return false;
1855}
1856
1857// Performs a semantic analysis on the {work_group_/sub_group_
1858// /_}reserve_{read/write}_pipe
1859// \param S Reference to the semantic analyzer.
1860// \param Call The call to the builtin function to be analyzed.
1861// \return True if a semantic error was found, false otherwise.
1862static bool SemaBuiltinReserveRWPipe(Sema &S, CallExpr *Call) {
1863 if (checkArgCount(S, Call, DesiredArgCount: 2))
1864 return true;
1865
1866 if (checkOpenCLPipeArg(S, Call))
1867 return true;
1868
1869 // Check the reserve size.
1870 if (!Call->getArg(Arg: 1)->getType()->isIntegerType() &&
1871 !Call->getArg(Arg: 1)->getType()->isUnsignedIntegerType()) {
1872 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
1873 << Call->getDirectCallee() << S.Context.UnsignedIntTy
1874 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
1875 return true;
1876 }
1877
1878 // Since return type of reserve_read/write_pipe built-in function is
1879 // reserve_id_t, which is not defined in the builtin def file , we used int
1880 // as return type and need to override the return type of these functions.
1881 Call->setType(S.Context.OCLReserveIDTy);
1882
1883 return false;
1884}
1885
1886// Performs a semantic analysis on {work_group_/sub_group_
1887// /_}commit_{read/write}_pipe
1888// \param S Reference to the semantic analyzer.
1889// \param Call The call to the builtin function to be analyzed.
1890// \return True if a semantic error was found, false otherwise.
1891static bool SemaBuiltinCommitRWPipe(Sema &S, CallExpr *Call) {
1892 if (checkArgCount(S, Call, DesiredArgCount: 2))
1893 return true;
1894
1895 if (checkOpenCLPipeArg(S, Call))
1896 return true;
1897
1898 // Check reserve_id_t.
1899 if (!Call->getArg(Arg: 1)->getType()->isReserveIDT()) {
1900 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_invalid_arg)
1901 << Call->getDirectCallee() << S.Context.OCLReserveIDTy
1902 << Call->getArg(1)->getType() << Call->getArg(1)->getSourceRange();
1903 return true;
1904 }
1905
1906 return false;
1907}
1908
1909// Performs a semantic analysis on the call to built-in Pipe
1910// Query Functions.
1911// \param S Reference to the semantic analyzer.
1912// \param Call The call to the builtin function to be analyzed.
1913// \return True if a semantic error was found, false otherwise.
1914static bool SemaBuiltinPipePackets(Sema &S, CallExpr *Call) {
1915 if (checkArgCount(S, Call, DesiredArgCount: 1))
1916 return true;
1917
1918 if (!Call->getArg(Arg: 0)->getType()->isPipeType()) {
1919 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_pipe_first_arg)
1920 << Call->getDirectCallee() << Call->getArg(0)->getSourceRange();
1921 return true;
1922 }
1923
1924 return false;
1925}
1926
1927// OpenCL v2.0 s6.13.9 - Address space qualifier functions.
1928// Performs semantic analysis for the to_global/local/private call.
1929// \param S Reference to the semantic analyzer.
1930// \param BuiltinID ID of the builtin function.
1931// \param Call A pointer to the builtin call.
1932// \return True if a semantic error has been found, false otherwise.
1933static bool SemaOpenCLBuiltinToAddr(Sema &S, unsigned BuiltinID,
1934 CallExpr *Call) {
1935 if (checkArgCount(S, Call, DesiredArgCount: 1))
1936 return true;
1937
1938 auto RT = Call->getArg(Arg: 0)->getType();
1939 if (!RT->isPointerType() || RT->getPointeeType()
1940 .getAddressSpace() == LangAS::opencl_constant) {
1941 S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_invalid_arg)
1942 << Call->getArg(0) << Call->getDirectCallee() << Call->getSourceRange();
1943 return true;
1944 }
1945
1946 if (RT->getPointeeType().getAddressSpace() != LangAS::opencl_generic) {
1947 S.Diag(Call->getArg(0)->getBeginLoc(),
1948 diag::warn_opencl_generic_address_space_arg)
1949 << Call->getDirectCallee()->getNameInfo().getAsString()
1950 << Call->getArg(0)->getSourceRange();
1951 }
1952
1953 RT = RT->getPointeeType();
1954 auto Qual = RT.getQualifiers();
1955 switch (BuiltinID) {
1956 case Builtin::BIto_global:
1957 Qual.setAddressSpace(LangAS::opencl_global);
1958 break;
1959 case Builtin::BIto_local:
1960 Qual.setAddressSpace(LangAS::opencl_local);
1961 break;
1962 case Builtin::BIto_private:
1963 Qual.setAddressSpace(LangAS::opencl_private);
1964 break;
1965 default:
1966 llvm_unreachable("Invalid builtin function");
1967 }
1968 Call->setType(S.Context.getPointerType(T: S.Context.getQualifiedType(
1969 T: RT.getUnqualifiedType(), Qs: Qual)));
1970
1971 return false;
1972}
1973
1974static ExprResult SemaBuiltinLaunder(Sema &S, CallExpr *TheCall) {
1975 if (checkArgCount(S, Call: TheCall, DesiredArgCount: 1))
1976 return ExprError();
1977
1978 // Compute __builtin_launder's parameter type from the argument.
1979 // The parameter type is:
1980 // * The type of the argument if it's not an array or function type,
1981 // Otherwise,
1982 // * The decayed argument type.
1983 QualType ParamTy = [&]() {
1984 QualType ArgTy = TheCall->getArg(Arg: 0)->getType();
1985 if (const ArrayType *Ty = ArgTy->getAsArrayTypeUnsafe())
1986 return S.Context.getPointerType(Ty->getElementType());
1987 if (ArgTy->isFunctionType()) {
1988 return S.Context.getPointerType(ArgTy);
1989 }
1990 return ArgTy;
1991 }();
1992
1993 TheCall->setType(ParamTy);
1994
1995 auto DiagSelect = [&]() -> std::optional<unsigned> {
1996 if (!ParamTy->isPointerType())
1997 return 0;
1998 if (ParamTy->isFunctionPointerType())
1999 return 1;
2000 if (ParamTy->isVoidPointerType())
2001 return 2;
2002 return std::optional<unsigned>{};
2003 }();
2004 if (DiagSelect) {
2005 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_launder_invalid_arg)
2006 << *DiagSelect << TheCall->getSourceRange();
2007 return ExprError();
2008 }
2009
2010 // We either have an incomplete class type, or we have a class template
2011 // whose instantiation has not been forced. Example:
2012 //
2013 // template <class T> struct Foo { T value; };
2014 // Foo<int> *p = nullptr;
2015 // auto *d = __builtin_launder(p);
2016 if (S.RequireCompleteType(TheCall->getBeginLoc(), ParamTy->getPointeeType(),
2017 diag::err_incomplete_type))
2018 return ExprError();
2019
2020 assert(ParamTy->getPointeeType()->isObjectType() &&
2021 "Unhandled non-object pointer case");
2022
2023 InitializedEntity Entity =
2024 InitializedEntity::InitializeParameter(Context&: S.Context, Type: ParamTy, Consumed: false);
2025 ExprResult Arg =
2026 S.PerformCopyInitialization(Entity, EqualLoc: SourceLocation(), Init: TheCall->getArg(Arg: 0));
2027 if (Arg.isInvalid())
2028 return ExprError();
2029 TheCall->setArg(Arg: 0, ArgExpr: Arg.get());
2030
2031 return TheCall;
2032}
2033
2034// Emit an error and return true if the current object format type is in the
2035// list of unsupported types.
2036static bool CheckBuiltinTargetNotInUnsupported(
2037 Sema &S, unsigned BuiltinID, CallExpr *TheCall,
2038 ArrayRef<llvm::Triple::ObjectFormatType> UnsupportedObjectFormatTypes) {
2039 llvm::Triple::ObjectFormatType CurObjFormat =
2040 S.getASTContext().getTargetInfo().getTriple().getObjectFormat();
2041 if (llvm::is_contained(Range&: UnsupportedObjectFormatTypes, Element: CurObjFormat)) {
2042 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported)
2043 << TheCall->getSourceRange();
2044 return true;
2045 }
2046 return false;
2047}
2048
2049// Emit an error and return true if the current architecture is not in the list
2050// of supported architectures.
2051static bool
2052CheckBuiltinTargetInSupported(Sema &S, unsigned BuiltinID, CallExpr *TheCall,
2053 ArrayRef<llvm::Triple::ArchType> SupportedArchs) {
2054 llvm::Triple::ArchType CurArch =
2055 S.getASTContext().getTargetInfo().getTriple().getArch();
2056 if (llvm::is_contained(Range&: SupportedArchs, Element: CurArch))
2057 return false;
2058 S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported)
2059 << TheCall->getSourceRange();
2060 return true;
2061}
2062
2063static void CheckNonNullArgument(Sema &S, const Expr *ArgExpr,
2064 SourceLocation CallSiteLoc);
2065
2066bool Sema::CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
2067 CallExpr *TheCall) {
2068 switch (TI.getTriple().getArch()) {
2069 default:
2070 // Some builtins don't require additional checking, so just consider these
2071 // acceptable.
2072 return false;
2073 case llvm::Triple::arm:
2074 case llvm::Triple::armeb:
2075 case llvm::Triple::thumb:
2076 case llvm::Triple::thumbeb:
2077 return CheckARMBuiltinFunctionCall(TI, BuiltinID, TheCall);
2078 case llvm::Triple::aarch64:
2079 case llvm::Triple::aarch64_32:
2080 case llvm::Triple::aarch64_be:
2081 return CheckAArch64BuiltinFunctionCall(TI, BuiltinID, TheCall);
2082 case llvm::Triple::bpfeb:
2083 case llvm::Triple::bpfel:
2084 return CheckBPFBuiltinFunctionCall(BuiltinID, TheCall);
2085 case llvm::Triple::hexagon:
2086 return CheckHexagonBuiltinFunctionCall(BuiltinID, TheCall);
2087 case llvm::Triple::mips:
2088 case llvm::Triple::mipsel:
2089 case llvm::Triple::mips64:
2090 case llvm::Triple::mips64el:
2091 return CheckMipsBuiltinFunctionCall(TI, BuiltinID, TheCall);
2092 case llvm::Triple::systemz:
2093 return CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall);
2094 case llvm::Triple::x86:
2095 case llvm::Triple::x86_64:
2096 return CheckX86BuiltinFunctionCall(TI, BuiltinID, TheCall);
2097 case llvm::Triple::ppc:
2098 case llvm::Triple::ppcle:
2099 case llvm::Triple::ppc64:
2100 case llvm::Triple::ppc64le:
2101 return CheckPPCBuiltinFunctionCall(TI, BuiltinID, TheCall);
2102 case llvm::Triple::amdgcn:
2103 return CheckAMDGCNBuiltinFunctionCall(BuiltinID, TheCall);
2104 case llvm::Triple::riscv32:
2105 case llvm::Triple::riscv64:
2106 return CheckRISCVBuiltinFunctionCall(TI, BuiltinID, TheCall);
2107 case llvm::Triple::loongarch32:
2108 case llvm::Triple::loongarch64:
2109 return CheckLoongArchBuiltinFunctionCall(TI, BuiltinID, TheCall);
2110 case llvm::Triple::wasm32:
2111 case llvm::Triple::wasm64:
2112 return CheckWebAssemblyBuiltinFunctionCall(TI, BuiltinID, TheCall);
2113 case llvm::Triple::nvptx:
2114 case llvm::Triple::nvptx64:
2115 return CheckNVPTXBuiltinFunctionCall(TI, BuiltinID, TheCall);
2116 }
2117}
2118
2119// Check if \p Ty is a valid type for the elementwise math builtins. If it is
2120// not a valid type, emit an error message and return true. Otherwise return
2121// false.
2122static bool checkMathBuiltinElementType(Sema &S, SourceLocation Loc,
2123 QualType Ty) {
2124 if (!Ty->getAs<VectorType>() && !ConstantMatrixType::isValidElementType(T: Ty)) {
2125 return S.Diag(Loc, diag::err_builtin_invalid_arg_type)
2126 << 1 << /* vector, integer or float ty*/ 0 << Ty;
2127 }
2128
2129 return false;
2130}
2131
2132static bool checkFPMathBuiltinElementType(Sema &S, SourceLocation Loc,
2133 QualType ArgTy, int ArgIndex) {
2134 QualType EltTy = ArgTy;
2135 if (auto *VecTy = EltTy->getAs<VectorType>())
2136 EltTy = VecTy->getElementType();
2137
2138 if (!EltTy->isRealFloatingType()) {
2139 return S.Diag(Loc, diag::err_builtin_invalid_arg_type)
2140 << ArgIndex << /* vector or float ty*/ 5 << ArgTy;
2141 }
2142
2143 return false;
2144}
2145
2146/// SemaBuiltinCpu{Supports|Is} - Handle __builtin_cpu_{supports|is}(char *).
2147/// This checks that the target supports the builtin and that the string
2148/// argument is constant and valid.
2149static bool SemaBuiltinCpu(Sema &S, const TargetInfo &TI, CallExpr *TheCall,
2150 const TargetInfo *AuxTI, unsigned BuiltinID) {
2151 assert((BuiltinID == Builtin::BI__builtin_cpu_supports ||
2152 BuiltinID == Builtin::BI__builtin_cpu_is) &&
2153 "Expecting __builtin_cpu_...");
2154
2155 bool IsCPUSupports = BuiltinID == Builtin::BI__builtin_cpu_supports;
2156 const TargetInfo *TheTI = &TI;
2157 auto SupportsBI = [=](const TargetInfo *TInfo) {
2158 return TInfo && ((IsCPUSupports && TInfo->supportsCpuSupports()) ||
2159 (!IsCPUSupports && TInfo->supportsCpuIs()));
2160 };
2161 if (!SupportsBI(&TI) && SupportsBI(AuxTI))
2162 TheTI = AuxTI;
2163
2164 if (IsCPUSupports && !TheTI->supportsCpuSupports())
2165 return S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported)
2166 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc());
2167 if (!IsCPUSupports && !TheTI->supportsCpuIs())
2168 return S.Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported)
2169 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc());
2170
2171 Expr *Arg = TheCall->getArg(Arg: 0)->IgnoreParenImpCasts();
2172 // Check if the argument is a string literal.
2173 if (!isa<StringLiteral>(Arg))
2174 return S.Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal)
2175 << Arg->getSourceRange();
2176
2177 // Check the contents of the string.
2178 StringRef Feature = cast<StringLiteral>(Val: Arg)->getString();
2179 if (IsCPUSupports && !TheTI->validateCpuSupports(Feature))
2180 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_supports)
2181 << Arg->getSourceRange();
2182 if (!IsCPUSupports && !TheTI->validateCpuIs(Feature))
2183 return S.Diag(TheCall->getBeginLoc(), diag::err_invalid_cpu_is)
2184 << Arg->getSourceRange();
2185 return false;
2186}
2187
2188ExprResult
2189Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
2190 CallExpr *TheCall) {
2191 ExprResult TheCallResult(TheCall);
2192
2193 // Find out if any arguments are required to be integer constant expressions.
2194 unsigned ICEArguments = 0;
2195 ASTContext::GetBuiltinTypeError Error;
2196 Context.GetBuiltinType(ID: BuiltinID, Error, IntegerConstantArgs: &ICEArguments);
2197 if (Error != ASTContext::GE_None)
2198 ICEArguments = 0; // Don't diagnose previously diagnosed errors.
2199
2200 // If any arguments are required to be ICE's, check and diagnose.
2201 for (unsigned ArgNo = 0; ICEArguments != 0; ++ArgNo) {
2202 // Skip arguments not required to be ICE's.
2203 if ((ICEArguments & (1 << ArgNo)) == 0) continue;
2204
2205 llvm::APSInt Result;
2206 // If we don't have enough arguments, continue so we can issue better
2207 // diagnostic in checkArgCount(...)
2208 if (ArgNo < TheCall->getNumArgs() &&
2209 SemaBuiltinConstantArg(TheCall, ArgNum: ArgNo, Result))
2210 return true;
2211 ICEArguments &= ~(1 << ArgNo);
2212 }
2213
2214 FPOptions FPO;
2215 switch (BuiltinID) {
2216 case Builtin::BI__builtin_cpu_supports:
2217 case Builtin::BI__builtin_cpu_is:
2218 if (SemaBuiltinCpu(S&: *this, TI: Context.getTargetInfo(), TheCall,
2219 AuxTI: Context.getAuxTargetInfo(), BuiltinID))
2220 return ExprError();
2221 break;
2222 case Builtin::BI__builtin_cpu_init:
2223 if (!Context.getTargetInfo().supportsCpuInit()) {
2224 Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported)
2225 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc());
2226 return ExprError();
2227 }
2228 break;
2229 case Builtin::BI__builtin___CFStringMakeConstantString:
2230 // CFStringMakeConstantString is currently not implemented for GOFF (i.e.,
2231 // on z/OS) and for XCOFF (i.e., on AIX). Emit unsupported
2232 if (CheckBuiltinTargetNotInUnsupported(
2233 S&: *this, BuiltinID, TheCall,
2234 UnsupportedObjectFormatTypes: {llvm::Triple::GOFF, llvm::Triple::XCOFF}))
2235 return ExprError();
2236 assert(TheCall->getNumArgs() == 1 &&
2237 "Wrong # arguments to builtin CFStringMakeConstantString");
2238 if (CheckObjCString(Arg: TheCall->getArg(Arg: 0)))
2239 return ExprError();
2240 break;
2241 case Builtin::BI__builtin_ms_va_start:
2242 case Builtin::BI__builtin_stdarg_start:
2243 case Builtin::BI__builtin_va_start:
2244 if (SemaBuiltinVAStart(BuiltinID, TheCall))
2245 return ExprError();
2246 break;
2247 case Builtin::BI__va_start: {
2248 switch (Context.getTargetInfo().getTriple().getArch()) {
2249 case llvm::Triple::aarch64:
2250 case llvm::Triple::arm:
2251 case llvm::Triple::thumb:
2252 if (SemaBuiltinVAStartARMMicrosoft(Call: TheCall))
2253 return ExprError();
2254 break;
2255 default:
2256 if (SemaBuiltinVAStart(BuiltinID, TheCall))
2257 return ExprError();
2258 break;
2259 }
2260 break;
2261 }
2262
2263 // The acquire, release, and no fence variants are ARM and AArch64 only.
2264 case Builtin::BI_interlockedbittestandset_acq:
2265 case Builtin::BI_interlockedbittestandset_rel:
2266 case Builtin::BI_interlockedbittestandset_nf:
2267 case Builtin::BI_interlockedbittestandreset_acq:
2268 case Builtin::BI_interlockedbittestandreset_rel:
2269 case Builtin::BI_interlockedbittestandreset_nf:
2270 if (CheckBuiltinTargetInSupported(
2271 S&: *this, BuiltinID, TheCall,
2272 SupportedArchs: {llvm::Triple::arm, llvm::Triple::thumb, llvm::Triple::aarch64}))
2273 return ExprError();
2274 break;
2275
2276 // The 64-bit bittest variants are x64, ARM, and AArch64 only.
2277 case Builtin::BI_bittest64:
2278 case Builtin::BI_bittestandcomplement64:
2279 case Builtin::BI_bittestandreset64:
2280 case Builtin::BI_bittestandset64:
2281 case Builtin::BI_interlockedbittestandreset64:
2282 case Builtin::BI_interlockedbittestandset64:
2283 if (CheckBuiltinTargetInSupported(S&: *this, BuiltinID, TheCall,
2284 SupportedArchs: {llvm::Triple::x86_64, llvm::Triple::arm,
2285 llvm::Triple::thumb,
2286 llvm::Triple::aarch64}))
2287 return ExprError();
2288 break;
2289
2290 case Builtin::BI__builtin_set_flt_rounds:
2291 if (CheckBuiltinTargetInSupported(S&: *this, BuiltinID, TheCall,
2292 SupportedArchs: {llvm::Triple::x86, llvm::Triple::x86_64,
2293 llvm::Triple::arm, llvm::Triple::thumb,
2294 llvm::Triple::aarch64}))
2295 return ExprError();
2296 break;
2297
2298 case Builtin::BI__builtin_isgreater:
2299 case Builtin::BI__builtin_isgreaterequal:
2300 case Builtin::BI__builtin_isless:
2301 case Builtin::BI__builtin_islessequal:
2302 case Builtin::BI__builtin_islessgreater:
2303 case Builtin::BI__builtin_isunordered:
2304 if (SemaBuiltinUnorderedCompare(TheCall, BuiltinID))
2305 return ExprError();
2306 break;
2307 case Builtin::BI__builtin_fpclassify:
2308 if (SemaBuiltinFPClassification(TheCall, NumArgs: 6, BuiltinID))
2309 return ExprError();
2310 break;
2311 case Builtin::BI__builtin_isfpclass:
2312 if (SemaBuiltinFPClassification(TheCall, NumArgs: 2, BuiltinID))
2313 return ExprError();
2314 break;
2315 case Builtin::BI__builtin_isfinite:
2316 case Builtin::BI__builtin_isinf:
2317 case Builtin::BI__builtin_isinf_sign:
2318 case Builtin::BI__builtin_isnan:
2319 case Builtin::BI__builtin_issignaling:
2320 case Builtin::BI__builtin_isnormal:
2321 case Builtin::BI__builtin_issubnormal:
2322 case Builtin::BI__builtin_iszero:
2323 case Builtin::BI__builtin_signbit:
2324 case Builtin::BI__builtin_signbitf:
2325 case Builtin::BI__builtin_signbitl:
2326 if (SemaBuiltinFPClassification(TheCall, NumArgs: 1, BuiltinID))
2327 return ExprError();
2328 break;
2329 case Builtin::BI__builtin_shufflevector:
2330 return SemaBuiltinShuffleVector(TheCall);
2331 // TheCall will be freed by the smart pointer here, but that's fine, since
2332 // SemaBuiltinShuffleVector guts it, but then doesn't release it.
2333 case Builtin::BI__builtin_prefetch:
2334 if (SemaBuiltinPrefetch(TheCall))
2335 return ExprError();
2336 break;
2337 case Builtin::BI__builtin_alloca_with_align:
2338 case Builtin::BI__builtin_alloca_with_align_uninitialized:
2339 if (SemaBuiltinAllocaWithAlign(TheCall))
2340 return ExprError();
2341 [[fallthrough]];
2342 case Builtin::BI__builtin_alloca:
2343 case Builtin::BI__builtin_alloca_uninitialized:
2344 Diag(TheCall->getBeginLoc(), diag::warn_alloca)
2345 << TheCall->getDirectCallee();
2346 break;
2347 case Builtin::BI__arithmetic_fence:
2348 if (SemaBuiltinArithmeticFence(TheCall))
2349 return ExprError();
2350 break;
2351 case Builtin::BI__assume:
2352 case Builtin::BI__builtin_assume:
2353 if (SemaBuiltinAssume(TheCall))
2354 return ExprError();
2355 break;
2356 case Builtin::BI__builtin_assume_aligned:
2357 if (SemaBuiltinAssumeAligned(TheCall))
2358 return ExprError();
2359 break;
2360 case Builtin::BI__builtin_dynamic_object_size:
2361 case Builtin::BI__builtin_object_size:
2362 if (SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 3))
2363 return ExprError();
2364 break;
2365 case Builtin::BI__builtin_longjmp:
2366 if (SemaBuiltinLongjmp(TheCall))
2367 return ExprError();
2368 break;
2369 case Builtin::BI__builtin_setjmp:
2370 if (SemaBuiltinSetjmp(TheCall))
2371 return ExprError();
2372 break;
2373 case Builtin::BI__builtin_classify_type:
2374 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 1)) return true;
2375 TheCall->setType(Context.IntTy);
2376 break;
2377 case Builtin::BI__builtin_complex:
2378 if (SemaBuiltinComplex(TheCall))
2379 return ExprError();
2380 break;
2381 case Builtin::BI__builtin_constant_p: {
2382 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 1)) return true;
2383 ExprResult Arg = DefaultFunctionArrayLvalueConversion(E: TheCall->getArg(Arg: 0));
2384 if (Arg.isInvalid()) return true;
2385 TheCall->setArg(Arg: 0, ArgExpr: Arg.get());
2386 TheCall->setType(Context.IntTy);
2387 break;
2388 }
2389 case Builtin::BI__builtin_launder:
2390 return SemaBuiltinLaunder(S&: *this, TheCall);
2391 case Builtin::BI__sync_fetch_and_add:
2392 case Builtin::BI__sync_fetch_and_add_1:
2393 case Builtin::BI__sync_fetch_and_add_2:
2394 case Builtin::BI__sync_fetch_and_add_4:
2395 case Builtin::BI__sync_fetch_and_add_8:
2396 case Builtin::BI__sync_fetch_and_add_16:
2397 case Builtin::BI__sync_fetch_and_sub:
2398 case Builtin::BI__sync_fetch_and_sub_1:
2399 case Builtin::BI__sync_fetch_and_sub_2:
2400 case Builtin::BI__sync_fetch_and_sub_4:
2401 case Builtin::BI__sync_fetch_and_sub_8:
2402 case Builtin::BI__sync_fetch_and_sub_16:
2403 case Builtin::BI__sync_fetch_and_or:
2404 case Builtin::BI__sync_fetch_and_or_1:
2405 case Builtin::BI__sync_fetch_and_or_2:
2406 case Builtin::BI__sync_fetch_and_or_4:
2407 case Builtin::BI__sync_fetch_and_or_8:
2408 case Builtin::BI__sync_fetch_and_or_16:
2409 case Builtin::BI__sync_fetch_and_and:
2410 case Builtin::BI__sync_fetch_and_and_1:
2411 case Builtin::BI__sync_fetch_and_and_2:
2412 case Builtin::BI__sync_fetch_and_and_4:
2413 case Builtin::BI__sync_fetch_and_and_8:
2414 case Builtin::BI__sync_fetch_and_and_16:
2415 case Builtin::BI__sync_fetch_and_xor:
2416 case Builtin::BI__sync_fetch_and_xor_1:
2417 case Builtin::BI__sync_fetch_and_xor_2:
2418 case Builtin::BI__sync_fetch_and_xor_4:
2419 case Builtin::BI__sync_fetch_and_xor_8:
2420 case Builtin::BI__sync_fetch_and_xor_16:
2421 case Builtin::BI__sync_fetch_and_nand:
2422 case Builtin::BI__sync_fetch_and_nand_1:
2423 case Builtin::BI__sync_fetch_and_nand_2:
2424 case Builtin::BI__sync_fetch_and_nand_4:
2425 case Builtin::BI__sync_fetch_and_nand_8:
2426 case Builtin::BI__sync_fetch_and_nand_16:
2427 case Builtin::BI__sync_add_and_fetch:
2428 case Builtin::BI__sync_add_and_fetch_1:
2429 case Builtin::BI__sync_add_and_fetch_2:
2430 case Builtin::BI__sync_add_and_fetch_4:
2431 case Builtin::BI__sync_add_and_fetch_8:
2432 case Builtin::BI__sync_add_and_fetch_16:
2433 case Builtin::BI__sync_sub_and_fetch:
2434 case Builtin::BI__sync_sub_and_fetch_1:
2435 case Builtin::BI__sync_sub_and_fetch_2:
2436 case Builtin::BI__sync_sub_and_fetch_4:
2437 case Builtin::BI__sync_sub_and_fetch_8:
2438 case Builtin::BI__sync_sub_and_fetch_16:
2439 case Builtin::BI__sync_and_and_fetch:
2440 case Builtin::BI__sync_and_and_fetch_1:
2441 case Builtin::BI__sync_and_and_fetch_2:
2442 case Builtin::BI__sync_and_and_fetch_4:
2443 case Builtin::BI__sync_and_and_fetch_8:
2444 case Builtin::BI__sync_and_and_fetch_16:
2445 case Builtin::BI__sync_or_and_fetch:
2446 case Builtin::BI__sync_or_and_fetch_1:
2447 case Builtin::BI__sync_or_and_fetch_2:
2448 case Builtin::BI__sync_or_and_fetch_4:
2449 case Builtin::BI__sync_or_and_fetch_8:
2450 case Builtin::BI__sync_or_and_fetch_16:
2451 case Builtin::BI__sync_xor_and_fetch:
2452 case Builtin::BI__sync_xor_and_fetch_1:
2453 case Builtin::BI__sync_xor_and_fetch_2:
2454 case Builtin::BI__sync_xor_and_fetch_4:
2455 case Builtin::BI__sync_xor_and_fetch_8:
2456 case Builtin::BI__sync_xor_and_fetch_16:
2457 case Builtin::BI__sync_nand_and_fetch:
2458 case Builtin::BI__sync_nand_and_fetch_1:
2459 case Builtin::BI__sync_nand_and_fetch_2:
2460 case Builtin::BI__sync_nand_and_fetch_4:
2461 case Builtin::BI__sync_nand_and_fetch_8:
2462 case Builtin::BI__sync_nand_and_fetch_16:
2463 case Builtin::BI__sync_val_compare_and_swap:
2464 case Builtin::BI__sync_val_compare_and_swap_1:
2465 case Builtin::BI__sync_val_compare_and_swap_2:
2466 case Builtin::BI__sync_val_compare_and_swap_4:
2467 case Builtin::BI__sync_val_compare_and_swap_8:
2468 case Builtin::BI__sync_val_compare_and_swap_16:
2469 case Builtin::BI__sync_bool_compare_and_swap:
2470 case Builtin::BI__sync_bool_compare_and_swap_1:
2471 case Builtin::BI__sync_bool_compare_and_swap_2:
2472 case Builtin::BI__sync_bool_compare_and_swap_4:
2473 case Builtin::BI__sync_bool_compare_and_swap_8:
2474 case Builtin::BI__sync_bool_compare_and_swap_16:
2475 case Builtin::BI__sync_lock_test_and_set:
2476 case Builtin::BI__sync_lock_test_and_set_1:
2477 case Builtin::BI__sync_lock_test_and_set_2:
2478 case Builtin::BI__sync_lock_test_and_set_4:
2479 case Builtin::BI__sync_lock_test_and_set_8:
2480 case Builtin::BI__sync_lock_test_and_set_16:
2481 case Builtin::BI__sync_lock_release:
2482 case Builtin::BI__sync_lock_release_1:
2483 case Builtin::BI__sync_lock_release_2:
2484 case Builtin::BI__sync_lock_release_4:
2485 case Builtin::BI__sync_lock_release_8:
2486 case Builtin::BI__sync_lock_release_16:
2487 case Builtin::BI__sync_swap:
2488 case Builtin::BI__sync_swap_1:
2489 case Builtin::BI__sync_swap_2:
2490 case Builtin::BI__sync_swap_4:
2491 case Builtin::BI__sync_swap_8:
2492 case Builtin::BI__sync_swap_16:
2493 return SemaBuiltinAtomicOverloaded(TheCallResult);
2494 case Builtin::BI__sync_synchronize:
2495 Diag(TheCall->getBeginLoc(), diag::warn_atomic_implicit_seq_cst)
2496 << TheCall->getCallee()->getSourceRange();
2497 break;
2498 case Builtin::BI__builtin_nontemporal_load:
2499 case Builtin::BI__builtin_nontemporal_store:
2500 return SemaBuiltinNontemporalOverloaded(TheCallResult);
2501 case Builtin::BI__builtin_memcpy_inline: {
2502 clang::Expr *SizeOp = TheCall->getArg(Arg: 2);
2503 // We warn about copying to or from `nullptr` pointers when `size` is
2504 // greater than 0. When `size` is value dependent we cannot evaluate its
2505 // value so we bail out.
2506 if (SizeOp->isValueDependent())
2507 break;
2508 if (!SizeOp->EvaluateKnownConstInt(Ctx: Context).isZero()) {
2509 CheckNonNullArgument(*this, TheCall->getArg(Arg: 0), TheCall->getExprLoc());
2510 CheckNonNullArgument(*this, TheCall->getArg(Arg: 1), TheCall->getExprLoc());
2511 }
2512 break;
2513 }
2514 case Builtin::BI__builtin_memset_inline: {
2515 clang::Expr *SizeOp = TheCall->getArg(Arg: 2);
2516 // We warn about filling to `nullptr` pointers when `size` is greater than
2517 // 0. When `size` is value dependent we cannot evaluate its value so we bail
2518 // out.
2519 if (SizeOp->isValueDependent())
2520 break;
2521 if (!SizeOp->EvaluateKnownConstInt(Ctx: Context).isZero())
2522 CheckNonNullArgument(*this, TheCall->getArg(Arg: 0), TheCall->getExprLoc());
2523 break;
2524 }
2525#define BUILTIN(ID, TYPE, ATTRS)
2526#define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \
2527 case Builtin::BI##ID: \
2528 return SemaAtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID);
2529#include "clang/Basic/Builtins.inc"
2530 case Builtin::BI__annotation:
2531 if (SemaBuiltinMSVCAnnotation(S&: *this, TheCall))
2532 return ExprError();
2533 break;
2534 case Builtin::BI__builtin_annotation:
2535 if (SemaBuiltinAnnotation(S&: *this, TheCall))
2536 return ExprError();
2537 break;
2538 case Builtin::BI__builtin_addressof:
2539 if (SemaBuiltinAddressof(S&: *this, TheCall))
2540 return ExprError();
2541 break;
2542 case Builtin::BI__builtin_function_start:
2543 if (SemaBuiltinFunctionStart(S&: *this, TheCall))
2544 return ExprError();
2545 break;
2546 case Builtin::BI__builtin_is_aligned:
2547 case Builtin::BI__builtin_align_up:
2548 case Builtin::BI__builtin_align_down:
2549 if (SemaBuiltinAlignment(S&: *this, TheCall, ID: BuiltinID))
2550 return ExprError();
2551 break;
2552 case Builtin::BI__builtin_add_overflow:
2553 case Builtin::BI__builtin_sub_overflow:
2554 case Builtin::BI__builtin_mul_overflow:
2555 if (SemaBuiltinOverflow(S&: *this, TheCall, BuiltinID))
2556 return ExprError();
2557 break;
2558 case Builtin::BI__builtin_operator_new:
2559 case Builtin::BI__builtin_operator_delete: {
2560 bool IsDelete = BuiltinID == Builtin::BI__builtin_operator_delete;
2561 ExprResult Res =
2562 SemaBuiltinOperatorNewDeleteOverloaded(TheCallResult, IsDelete);
2563 if (Res.isInvalid())
2564 CorrectDelayedTyposInExpr(E: TheCallResult.get());
2565 return Res;
2566 }
2567 case Builtin::BI__builtin_dump_struct:
2568 return SemaBuiltinDumpStruct(S&: *this, TheCall);
2569 case Builtin::BI__builtin_expect_with_probability: {
2570 // We first want to ensure we are called with 3 arguments
2571 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 3))
2572 return ExprError();
2573 // then check probability is constant float in range [0.0, 1.0]
2574 const Expr *ProbArg = TheCall->getArg(Arg: 2);
2575 SmallVector<PartialDiagnosticAt, 8> Notes;
2576 Expr::EvalResult Eval;
2577 Eval.Diag = &Notes;
2578 if ((!ProbArg->EvaluateAsConstantExpr(Result&: Eval, Ctx: Context)) ||
2579 !Eval.Val.isFloat()) {
2580 Diag(ProbArg->getBeginLoc(), diag::err_probability_not_constant_float)
2581 << ProbArg->getSourceRange();
2582 for (const PartialDiagnosticAt &PDiag : Notes)
2583 Diag(PDiag.first, PDiag.second);
2584 return ExprError();
2585 }
2586 llvm::APFloat Probability = Eval.Val.getFloat();
2587 bool LoseInfo = false;
2588 Probability.convert(ToSemantics: llvm::APFloat::IEEEdouble(),
2589 RM: llvm::RoundingMode::Dynamic, losesInfo: &LoseInfo);
2590 if (!(Probability >= llvm::APFloat(0.0) &&
2591 Probability <= llvm::APFloat(1.0))) {
2592 Diag(ProbArg->getBeginLoc(), diag::err_probability_out_of_range)
2593 << ProbArg->getSourceRange();
2594 return ExprError();
2595 }
2596 break;
2597 }
2598 case Builtin::BI__builtin_preserve_access_index:
2599 if (SemaBuiltinPreserveAI(S&: *this, TheCall))
2600 return ExprError();
2601 break;
2602 case Builtin::BI__builtin_call_with_static_chain:
2603 if (SemaBuiltinCallWithStaticChain(S&: *this, BuiltinCall: TheCall))
2604 return ExprError();
2605 break;
2606 case Builtin::BI__exception_code:
2607 case Builtin::BI_exception_code:
2608 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHExceptScope,
2609 diag::err_seh___except_block))
2610 return ExprError();
2611 break;
2612 case Builtin::BI__exception_info:
2613 case Builtin::BI_exception_info:
2614 if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHFilterScope,
2615 diag::err_seh___except_filter))
2616 return ExprError();
2617 break;
2618 case Builtin::BI__GetExceptionInfo:
2619 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 1))
2620 return ExprError();
2621
2622 if (CheckCXXThrowOperand(
2623 ThrowLoc: TheCall->getBeginLoc(),
2624 ThrowTy: Context.getExceptionObjectType(T: FDecl->getParamDecl(i: 0)->getType()),
2625 E: TheCall))
2626 return ExprError();
2627
2628 TheCall->setType(Context.VoidPtrTy);
2629 break;
2630 case Builtin::BIaddressof:
2631 case Builtin::BI__addressof:
2632 case Builtin::BIforward:
2633 case Builtin::BIforward_like:
2634 case Builtin::BImove:
2635 case Builtin::BImove_if_noexcept:
2636 case Builtin::BIas_const: {
2637 // These are all expected to be of the form
2638 // T &/&&/* f(U &/&&)
2639 // where T and U only differ in qualification.
2640 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 1))
2641 return ExprError();
2642 QualType Param = FDecl->getParamDecl(i: 0)->getType();
2643 QualType Result = FDecl->getReturnType();
2644 bool ReturnsPointer = BuiltinID == Builtin::BIaddressof ||
2645 BuiltinID == Builtin::BI__addressof;
2646 if (!(Param->isReferenceType() &&
2647 (ReturnsPointer ? Result->isAnyPointerType()
2648 : Result->isReferenceType()) &&
2649 Context.hasSameUnqualifiedType(T1: Param->getPointeeType(),
2650 T2: Result->getPointeeType()))) {
2651 Diag(TheCall->getBeginLoc(), diag::err_builtin_move_forward_unsupported)
2652 << FDecl;
2653 return ExprError();
2654 }
2655 break;
2656 }
2657 // OpenCL v2.0, s6.13.16 - Pipe functions
2658 case Builtin::BIread_pipe:
2659 case Builtin::BIwrite_pipe:
2660 // Since those two functions are declared with var args, we need a semantic
2661 // check for the argument.
2662 if (SemaBuiltinRWPipe(S&: *this, Call: TheCall))
2663 return ExprError();
2664 break;
2665 case Builtin::BIreserve_read_pipe:
2666 case Builtin::BIreserve_write_pipe:
2667 case Builtin::BIwork_group_reserve_read_pipe:
2668 case Builtin::BIwork_group_reserve_write_pipe:
2669 if (SemaBuiltinReserveRWPipe(S&: *this, Call: TheCall))
2670 return ExprError();
2671 break;
2672 case Builtin::BIsub_group_reserve_read_pipe:
2673 case Builtin::BIsub_group_reserve_write_pipe:
2674 if (checkOpenCLSubgroupExt(S&: *this, Call: TheCall) ||
2675 SemaBuiltinReserveRWPipe(S&: *this, Call: TheCall))
2676 return ExprError();
2677 break;
2678 case Builtin::BIcommit_read_pipe:
2679 case Builtin::BIcommit_write_pipe:
2680 case Builtin::BIwork_group_commit_read_pipe:
2681 case Builtin::BIwork_group_commit_write_pipe:
2682 if (SemaBuiltinCommitRWPipe(S&: *this, Call: TheCall))
2683 return ExprError();
2684 break;
2685 case Builtin::BIsub_group_commit_read_pipe:
2686 case Builtin::BIsub_group_commit_write_pipe:
2687 if (checkOpenCLSubgroupExt(S&: *this, Call: TheCall) ||
2688 SemaBuiltinCommitRWPipe(S&: *this, Call: TheCall))
2689 return ExprError();
2690 break;
2691 case Builtin::BIget_pipe_num_packets:
2692 case Builtin::BIget_pipe_max_packets:
2693 if (SemaBuiltinPipePackets(S&: *this, Call: TheCall))
2694 return ExprError();
2695 break;
2696 case Builtin::BIto_global:
2697 case Builtin::BIto_local:
2698 case Builtin::BIto_private:
2699 if (SemaOpenCLBuiltinToAddr(S&: *this, BuiltinID, Call: TheCall))
2700 return ExprError();
2701 break;
2702 // OpenCL v2.0, s6.13.17 - Enqueue kernel functions.
2703 case Builtin::BIenqueue_kernel:
2704 if (SemaOpenCLBuiltinEnqueueKernel(S&: *this, TheCall))
2705 return ExprError();
2706 break;
2707 case Builtin::BIget_kernel_work_group_size:
2708 case Builtin::BIget_kernel_preferred_work_group_size_multiple:
2709 if (SemaOpenCLBuiltinKernelWorkGroupSize(S&: *this, TheCall))
2710 return ExprError();
2711 break;
2712 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
2713 case Builtin::BIget_kernel_sub_group_count_for_ndrange:
2714 if (SemaOpenCLBuiltinNDRangeAndBlock(S&: *this, TheCall))
2715 return ExprError();
2716 break;
2717 case Builtin::BI__builtin_os_log_format:
2718 Cleanup.setExprNeedsCleanups(true);
2719 [[fallthrough]];
2720 case Builtin::BI__builtin_os_log_format_buffer_size:
2721 if (SemaBuiltinOSLogFormat(TheCall))
2722 return ExprError();
2723 break;
2724 case Builtin::BI__builtin_frame_address:
2725 case Builtin::BI__builtin_return_address: {
2726 if (SemaBuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 0xFFFF))
2727 return ExprError();
2728
2729 // -Wframe-address warning if non-zero passed to builtin
2730 // return/frame address.
2731 Expr::EvalResult Result;
2732 if (!TheCall->getArg(0)->isValueDependent() &&
2733 TheCall->getArg(0)->EvaluateAsInt(Result, getASTContext()) &&
2734 Result.Val.getInt() != 0)
2735 Diag(TheCall->getBeginLoc(), diag::warn_frame_address)
2736 << ((BuiltinID == Builtin::BI__builtin_return_address)
2737 ? "__builtin_return_address"
2738 : "__builtin_frame_address")
2739 << TheCall->getSourceRange();
2740 break;
2741 }
2742
2743 case Builtin::BI__builtin_nondeterministic_value: {
2744 if (SemaBuiltinNonDeterministicValue(TheCall))
2745 return ExprError();
2746 break;
2747 }
2748
2749 // __builtin_elementwise_abs restricts the element type to signed integers or
2750 // floating point types only.
2751 case Builtin::BI__builtin_elementwise_abs: {
2752 if (PrepareBuiltinElementwiseMathOneArgCall(TheCall))
2753 return ExprError();
2754
2755 QualType ArgTy = TheCall->getArg(Arg: 0)->getType();
2756 QualType EltTy = ArgTy;
2757
2758 if (auto *VecTy = EltTy->getAs<VectorType>())
2759 EltTy = VecTy->getElementType();
2760 if (EltTy->isUnsignedIntegerType()) {
2761 Diag(TheCall->getArg(0)->getBeginLoc(),
2762 diag::err_builtin_invalid_arg_type)
2763 << 1 << /* signed integer or float ty*/ 3 << ArgTy;
2764 return ExprError();
2765 }
2766 break;
2767 }
2768
2769 // These builtins restrict the element type to floating point
2770 // types only.
2771 case Builtin::BI__builtin_elementwise_ceil:
2772 case Builtin::BI__builtin_elementwise_cos:
2773 case Builtin::BI__builtin_elementwise_exp:
2774 case Builtin::BI__builtin_elementwise_exp2:
2775 case Builtin::BI__builtin_elementwise_floor:
2776 case Builtin::BI__builtin_elementwise_log:
2777 case Builtin::BI__builtin_elementwise_log2:
2778 case Builtin::BI__builtin_elementwise_log10:
2779 case Builtin::BI__builtin_elementwise_roundeven:
2780 case Builtin::BI__builtin_elementwise_round:
2781 case Builtin::BI__builtin_elementwise_rint:
2782 case Builtin::BI__builtin_elementwise_nearbyint:
2783 case Builtin::BI__builtin_elementwise_sin:
2784 case Builtin::BI__builtin_elementwise_sqrt:
2785 case Builtin::BI__builtin_elementwise_trunc:
2786 case Builtin::BI__builtin_elementwise_canonicalize: {
2787 if (PrepareBuiltinElementwiseMathOneArgCall(TheCall))
2788 return ExprError();
2789
2790 QualType ArgTy = TheCall->getArg(Arg: 0)->getType();
2791 if (checkFPMathBuiltinElementType(*this, TheCall->getArg(Arg: 0)->getBeginLoc(),
2792 ArgTy, 1))
2793 return ExprError();
2794 break;
2795 }
2796 case Builtin::BI__builtin_elementwise_fma: {
2797 if (SemaBuiltinElementwiseTernaryMath(TheCall))
2798 return ExprError();
2799 break;
2800 }
2801
2802 // These builtins restrict the element type to floating point
2803 // types only, and take in two arguments.
2804 case Builtin::BI__builtin_elementwise_pow: {
2805 if (SemaBuiltinElementwiseMath(TheCall))
2806 return ExprError();
2807
2808 QualType ArgTy = TheCall->getArg(Arg: 0)->getType();
2809 if (checkFPMathBuiltinElementType(*this, TheCall->getArg(Arg: 0)->getBeginLoc(),
2810 ArgTy, 1) ||
2811 checkFPMathBuiltinElementType(*this, TheCall->getArg(Arg: 1)->getBeginLoc(),
2812 ArgTy, 2))
2813 return ExprError();
2814 break;
2815 }
2816
2817 // These builtins restrict the element type to integer
2818 // types only.
2819 case Builtin::BI__builtin_elementwise_add_sat:
2820 case Builtin::BI__builtin_elementwise_sub_sat: {
2821 if (SemaBuiltinElementwiseMath(TheCall))
2822 return ExprError();
2823
2824 const Expr *Arg = TheCall->getArg(Arg: 0);
2825 QualType ArgTy = Arg->getType();
2826 QualType EltTy = ArgTy;
2827
2828 if (auto *VecTy = EltTy->getAs<VectorType>())
2829 EltTy = VecTy->getElementType();
2830
2831 if (!EltTy->isIntegerType()) {
2832 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type)
2833 << 1 << /* integer ty */ 6 << ArgTy;
2834 return ExprError();
2835 }
2836 break;
2837 }
2838
2839 case Builtin::BI__builtin_elementwise_min:
2840 case Builtin::BI__builtin_elementwise_max:
2841 if (SemaBuiltinElementwiseMath(TheCall))
2842 return ExprError();
2843 break;
2844
2845 case Builtin::BI__builtin_elementwise_bitreverse: {
2846 if (PrepareBuiltinElementwiseMathOneArgCall(TheCall))
2847 return ExprError();
2848
2849 const Expr *Arg = TheCall->getArg(Arg: 0);
2850 QualType ArgTy = Arg->getType();
2851 QualType EltTy = ArgTy;
2852
2853 if (auto *VecTy = EltTy->getAs<VectorType>())
2854 EltTy = VecTy->getElementType();
2855
2856 if (!EltTy->isIntegerType()) {
2857 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type)
2858 << 1 << /* integer ty */ 6 << ArgTy;
2859 return ExprError();
2860 }
2861 break;
2862 }
2863
2864 case Builtin::BI__builtin_elementwise_copysign: {
2865 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 2))
2866 return ExprError();
2867
2868 ExprResult Magnitude = UsualUnaryConversions(E: TheCall->getArg(Arg: 0));
2869 ExprResult Sign = UsualUnaryConversions(E: TheCall->getArg(Arg: 1));
2870 if (Magnitude.isInvalid() || Sign.isInvalid())
2871 return ExprError();
2872
2873 QualType MagnitudeTy = Magnitude.get()->getType();
2874 QualType SignTy = Sign.get()->getType();
2875 if (checkFPMathBuiltinElementType(*this, TheCall->getArg(Arg: 0)->getBeginLoc(),
2876 MagnitudeTy, 1) ||
2877 checkFPMathBuiltinElementType(*this, TheCall->getArg(Arg: 1)->getBeginLoc(),
2878 SignTy, 2)) {
2879 return ExprError();
2880 }
2881
2882 if (MagnitudeTy.getCanonicalType() != SignTy.getCanonicalType()) {
2883 return Diag(Sign.get()->getBeginLoc(),
2884 diag::err_typecheck_call_different_arg_types)
2885 << MagnitudeTy << SignTy;
2886 }
2887
2888 TheCall->setArg(Arg: 0, ArgExpr: Magnitude.get());
2889 TheCall->setArg(Arg: 1, ArgExpr: Sign.get());
2890 TheCall->setType(Magnitude.get()->getType());
2891 break;
2892 }
2893 case Builtin::BI__builtin_reduce_max:
2894 case Builtin::BI__builtin_reduce_min: {
2895 if (PrepareBuiltinReduceMathOneArgCall(TheCall))
2896 return ExprError();
2897
2898 const Expr *Arg = TheCall->getArg(Arg: 0);
2899 const auto *TyA = Arg->getType()->getAs<VectorType>();
2900 if (!TyA) {
2901 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type)
2902 << 1 << /* vector ty*/ 4 << Arg->getType();
2903 return ExprError();
2904 }
2905
2906 TheCall->setType(TyA->getElementType());
2907 break;
2908 }
2909
2910 // These builtins support vectors of integers only.
2911 // TODO: ADD/MUL should support floating-point types.
2912 case Builtin::BI__builtin_reduce_add:
2913 case Builtin::BI__builtin_reduce_mul:
2914 case Builtin::BI__builtin_reduce_xor:
2915 case Builtin::BI__builtin_reduce_or:
2916 case Builtin::BI__builtin_reduce_and: {
2917 if (PrepareBuiltinReduceMathOneArgCall(TheCall))
2918 return ExprError();
2919
2920 const Expr *Arg = TheCall->getArg(Arg: 0);
2921 const auto *TyA = Arg->getType()->getAs<VectorType>();
2922 if (!TyA || !TyA->getElementType()->isIntegerType()) {
2923 Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type)
2924 << 1 << /* vector of integers */ 6 << Arg->getType();
2925 return ExprError();
2926 }
2927 TheCall->setType(TyA->getElementType());
2928 break;
2929 }
2930
2931 case Builtin::BI__builtin_matrix_transpose:
2932 return SemaBuiltinMatrixTranspose(TheCall, CallResult: TheCallResult);
2933
2934 case Builtin::BI__builtin_matrix_column_major_load:
2935 return SemaBuiltinMatrixColumnMajorLoad(TheCall, CallResult: TheCallResult);
2936
2937 case Builtin::BI__builtin_matrix_column_major_store:
2938 return SemaBuiltinMatrixColumnMajorStore(TheCall, CallResult: TheCallResult);
2939
2940 case Builtin::BI__builtin_get_device_side_mangled_name: {
2941 auto Check = [](CallExpr *TheCall) {
2942 if (TheCall->getNumArgs() != 1)
2943 return false;
2944 auto *DRE = dyn_cast<DeclRefExpr>(TheCall->getArg(Arg: 0)->IgnoreImpCasts());
2945 if (!DRE)
2946 return false;
2947 auto *D = DRE->getDecl();
2948 if (!isa<FunctionDecl>(D) && !isa<VarDecl>(D))
2949 return false;
2950 return D->hasAttr<CUDAGlobalAttr>() || D->hasAttr<CUDADeviceAttr>() ||
2951 D->hasAttr<CUDAConstantAttr>() || D->hasAttr<HIPManagedAttr>();
2952 };
2953 if (!Check(TheCall)) {
2954 Diag(TheCall->getBeginLoc(),
2955 diag::err_hip_invalid_args_builtin_mangled_name);
2956 return ExprError();
2957 }
2958 }
2959 }
2960
2961 // Since the target specific builtins for each arch overlap, only check those
2962 // of the arch we are compiling for.
2963 if (Context.BuiltinInfo.isTSBuiltin(ID: BuiltinID)) {
2964 if (Context.BuiltinInfo.isAuxBuiltinID(ID: BuiltinID)) {
2965 assert(Context.getAuxTargetInfo() &&
2966 "Aux Target Builtin, but not an aux target?");
2967
2968 if (CheckTSBuiltinFunctionCall(
2969 TI: *Context.getAuxTargetInfo(),
2970 BuiltinID: Context.BuiltinInfo.getAuxBuiltinID(ID: BuiltinID), TheCall))
2971 return ExprError();
2972 } else {
2973 if (CheckTSBuiltinFunctionCall(TI: Context.getTargetInfo(), BuiltinID,
2974 TheCall))
2975 return ExprError();
2976 }
2977 }
2978
2979 return TheCallResult;
2980}
2981
2982// Get the valid immediate range for the specified NEON type code.
2983static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) {
2984 NeonTypeFlags Type(t);
2985 int IsQuad = ForceQuad ? true : Type.isQuad();
2986 switch (Type.getEltType()) {
2987 case NeonTypeFlags::Int8:
2988 case NeonTypeFlags::Poly8:
2989 return shift ? 7 : (8 << IsQuad) - 1;
2990 case NeonTypeFlags::Int16:
2991 case NeonTypeFlags::Poly16:
2992 return shift ? 15 : (4 << IsQuad) - 1;
2993 case NeonTypeFlags::Int32:
2994 return shift ? 31 : (2 << IsQuad) - 1;
2995 case NeonTypeFlags::Int64:
2996 case NeonTypeFlags::Poly64:
2997 return shift ? 63 : (1 << IsQuad) - 1;
2998 case NeonTypeFlags::Poly128:
2999 return shift ? 127 : (1 << IsQuad) - 1;
3000 case NeonTypeFlags::Float16:
3001 assert(!shift && "cannot shift float types!");
3002 return (4 << IsQuad) - 1;
3003 case NeonTypeFlags::Float32:
3004 assert(!shift && "cannot shift float types!");
3005 return (2 << IsQuad) - 1;
3006 case NeonTypeFlags::Float64:
3007 assert(!shift && "cannot shift float types!");
3008 return (1 << IsQuad) - 1;
3009 case NeonTypeFlags::BFloat16:
3010 assert(!shift && "cannot shift float types!");
3011 return (4 << IsQuad) - 1;
3012 }
3013 llvm_unreachable("Invalid NeonTypeFlag!");
3014}
3015
3016/// getNeonEltType - Return the QualType corresponding to the elements of
3017/// the vector type specified by the NeonTypeFlags. This is used to check
3018/// the pointer arguments for Neon load/store intrinsics.
3019static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context,
3020 bool IsPolyUnsigned, bool IsInt64Long) {
3021 switch (Flags.getEltType()) {
3022 case NeonTypeFlags::Int8:
3023 return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy;
3024 case NeonTypeFlags::Int16:
3025 return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy;
3026 case NeonTypeFlags::Int32:
3027 return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy;
3028 case NeonTypeFlags::Int64:
3029 if (IsInt64Long)
3030 return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy;
3031 else
3032 return Flags.isUnsigned() ? Context.UnsignedLongLongTy
3033 : Context.LongLongTy;
3034 case NeonTypeFlags::Poly8:
3035 return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy;
3036 case NeonTypeFlags::Poly16:
3037 return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy;
3038 case NeonTypeFlags::Poly64:
3039 if (IsInt64Long)
3040 return Context.UnsignedLongTy;
3041 else
3042 return Context.UnsignedLongLongTy;
3043 case NeonTypeFlags::Poly128:
3044 break;
3045 case NeonTypeFlags::Float16:
3046 return Context.HalfTy;
3047 case NeonTypeFlags::Float32:
3048 return Context.FloatTy;
3049 case NeonTypeFlags::Float64:
3050 return Context.DoubleTy;
3051 case NeonTypeFlags::BFloat16:
3052 return Context.BFloat16Ty;
3053 }
3054 llvm_unreachable("Invalid NeonTypeFlag!");
3055}
3056
3057enum ArmStreamingType {
3058 ArmNonStreaming,
3059 ArmStreaming,
3060 ArmStreamingCompatible,
3061 ArmStreamingOrSVE2p1
3062};
3063
3064enum ArmSMEState : unsigned {
3065 ArmNoState = 0,
3066
3067 ArmInZA = 0b01,
3068 ArmOutZA = 0b10,
3069 ArmInOutZA = 0b11,
3070 ArmZAMask = 0b11,
3071
3072 ArmInZT0 = 0b01 << 2,
3073 ArmOutZT0 = 0b10 << 2,
3074 ArmInOutZT0 = 0b11 << 2,
3075 ArmZT0Mask = 0b11 << 2
3076};
3077
3078bool Sema::ParseSVEImmChecks(
3079 CallExpr *TheCall, SmallVector<std::tuple<int, int, int>, 3> &ImmChecks) {
3080 // Perform all the immediate checks for this builtin call.
3081 bool HasError = false;
3082 for (auto &I : ImmChecks) {
3083 int ArgNum, CheckTy, ElementSizeInBits;
3084 std::tie(args&: ArgNum, args&: CheckTy, args&: ElementSizeInBits) = I;
3085
3086 typedef bool (*OptionSetCheckFnTy)(int64_t Value);
3087
3088 // Function that checks whether the operand (ArgNum) is an immediate
3089 // that is one of the predefined values.
3090 auto CheckImmediateInSet = [&](OptionSetCheckFnTy CheckImm,
3091 int ErrDiag) -> bool {
3092 // We can't check the value of a dependent argument.
3093 Expr *Arg = TheCall->getArg(Arg: ArgNum);
3094 if (Arg->isTypeDependent() || Arg->isValueDependent())
3095 return false;
3096
3097 // Check constant-ness first.
3098 llvm::APSInt Imm;
3099 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result&: Imm))
3100 return true;
3101
3102 if (!CheckImm(Imm.getSExtValue()))
3103 return Diag(Loc: TheCall->getBeginLoc(), DiagID: ErrDiag) << Arg->getSourceRange();
3104 return false;
3105 };
3106
3107 switch ((SVETypeFlags::ImmCheckType)CheckTy) {
3108 case SVETypeFlags::ImmCheck0_31:
3109 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, Low: 0, High: 31))
3110 HasError = true;
3111 break;
3112 case SVETypeFlags::ImmCheck0_13:
3113 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, Low: 0, High: 13))
3114 HasError = true;
3115 break;
3116 case SVETypeFlags::ImmCheck1_16:
3117 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, Low: 1, High: 16))
3118 HasError = true;
3119 break;
3120 case SVETypeFlags::ImmCheck0_7:
3121 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, Low: 0, High: 7))
3122 HasError = true;
3123 break;
3124 case SVETypeFlags::ImmCheck1_1:
3125 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, Low: 1, High: 1))
3126 HasError = true;
3127 break;
3128 case SVETypeFlags::ImmCheck1_3:
3129 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, Low: 1, High: 3))
3130 HasError = true;
3131 break;
3132 case SVETypeFlags::ImmCheck1_7:
3133 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, Low: 1, High: 7))
3134 HasError = true;
3135 break;
3136 case SVETypeFlags::ImmCheckExtract:
3137 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, Low: 0,
3138 High: (2048 / ElementSizeInBits) - 1))
3139 HasError = true;
3140 break;
3141 case SVETypeFlags::ImmCheckShiftRight:
3142 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, Low: 1, High: ElementSizeInBits))
3143 HasError = true;
3144 break;
3145 case SVETypeFlags::ImmCheckShiftRightNarrow:
3146 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, Low: 1,
3147 High: ElementSizeInBits / 2))
3148 HasError = true;
3149 break;
3150 case SVETypeFlags::ImmCheckShiftLeft:
3151 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, Low: 0,
3152 High: ElementSizeInBits - 1))
3153 HasError = true;
3154 break;
3155 case SVETypeFlags::ImmCheckLaneIndex:
3156 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, Low: 0,
3157 High: (128 / (1 * ElementSizeInBits)) - 1))
3158 HasError = true;
3159 break;
3160 case SVETypeFlags::ImmCheckLaneIndexCompRotate:
3161 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, Low: 0,
3162 High: (128 / (2 * ElementSizeInBits)) - 1))
3163 HasError = true;
3164 break;
3165 case SVETypeFlags::ImmCheckLaneIndexDot:
3166 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, Low: 0,
3167 High: (128 / (4 * ElementSizeInBits)) - 1))
3168 HasError = true;
3169 break;
3170 case SVETypeFlags::ImmCheckComplexRot90_270:
3171 if (CheckImmediateInSet([](int64_t V) { return V == 90 || V == 270; },
3172 diag::err_rotation_argument_to_cadd))
3173 HasError = true;
3174 break;
3175 case SVETypeFlags::ImmCheckComplexRotAll90:
3176 if (CheckImmediateInSet(
3177 [](int64_t V) {
3178 return V == 0 || V == 90 || V == 180 || V == 270;
3179 },
3180 diag::err_rotation_argument_to_cmla))
3181 HasError = true;
3182 break;
3183 case SVETypeFlags::ImmCheck0_1:
3184 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, Low: 0, High: 1))
3185 HasError = true;
3186 break;
3187 case SVETypeFlags::ImmCheck0_2:
3188 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, Low: 0, High: 2))
3189 HasError = true;
3190 break;
3191 case SVETypeFlags::ImmCheck0_3:
3192 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, Low: 0, High: 3))
3193 HasError = true;
3194 break;
3195 case SVETypeFlags::ImmCheck0_0:
3196 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, Low: 0, High: 0))
3197 HasError = true;
3198 break;
3199 case SVETypeFlags::ImmCheck0_15:
3200 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, Low: 0, High: 15))
3201 HasError = true;
3202 break;
3203 case SVETypeFlags::ImmCheck0_255:
3204 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, Low: 0, High: 255))
3205 HasError = true;
3206 break;
3207 case SVETypeFlags::ImmCheck2_4_Mul2:
3208 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, Low: 2, High: 4) ||
3209 SemaBuiltinConstantArgMultiple(TheCall, ArgNum, Multiple: 2))
3210 HasError = true;
3211 break;
3212 }
3213 }
3214
3215 return HasError;
3216}
3217
3218static ArmStreamingType getArmStreamingFnType(const FunctionDecl *FD) {
3219 if (FD->hasAttr<ArmLocallyStreamingAttr>())
3220 return ArmStreaming;
3221 if (const auto *T = FD->getType()->getAs<FunctionProtoType>()) {
3222 if (T->getAArch64SMEAttributes() & FunctionType::SME_PStateSMEnabledMask)
3223 return ArmStreaming;
3224 if (T->getAArch64SMEAttributes() & FunctionType::SME_PStateSMCompatibleMask)
3225 return ArmStreamingCompatible;
3226 }
3227 return ArmNonStreaming;
3228}
3229
3230static void checkArmStreamingBuiltin(Sema &S, CallExpr *TheCall,
3231 const FunctionDecl *FD,
3232 ArmStreamingType BuiltinType) {
3233 ArmStreamingType FnType = getArmStreamingFnType(FD);
3234 if (BuiltinType == ArmStreamingOrSVE2p1) {
3235 // Check intrinsics that are available in [sve2p1 or sme/sme2].
3236 llvm::StringMap<bool> CallerFeatureMap;
3237 S.Context.getFunctionFeatureMap(FeatureMap&: CallerFeatureMap, FD);
3238 if (Builtin::evaluateRequiredTargetFeatures("sve2p1", CallerFeatureMap))
3239 BuiltinType = ArmStreamingCompatible;
3240 else
3241 BuiltinType = ArmStreaming;
3242 }
3243
3244 if (FnType == ArmStreaming && BuiltinType == ArmNonStreaming) {
3245 S.Diag(TheCall->getBeginLoc(), diag::warn_attribute_arm_sm_incompat_builtin)
3246 << TheCall->getSourceRange() << "streaming";
3247 }
3248
3249 if (FnType == ArmStreamingCompatible &&
3250 BuiltinType != ArmStreamingCompatible) {
3251 S.Diag(TheCall->getBeginLoc(), diag::warn_attribute_arm_sm_incompat_builtin)
3252 << TheCall->getSourceRange() << "streaming compatible";
3253 return;
3254 }
3255
3256 if (FnType == ArmNonStreaming && BuiltinType == ArmStreaming) {
3257 S.Diag(TheCall->getBeginLoc(), diag::warn_attribute_arm_sm_incompat_builtin)
3258 << TheCall->getSourceRange() << "non-streaming";
3259 }
3260}
3261
3262static bool hasArmZAState(const FunctionDecl *FD) {
3263 const auto *T = FD->getType()->getAs<FunctionProtoType>();
3264 return (T && FunctionType::getArmZAState(T->getAArch64SMEAttributes()) !=
3265 FunctionType::ARM_None) ||
3266 (FD->hasAttr<ArmNewAttr>() && FD->getAttr<ArmNewAttr>()->isNewZA());
3267}
3268
3269static bool hasArmZT0State(const FunctionDecl *FD) {
3270 const auto *T = FD->getType()->getAs<FunctionProtoType>();
3271 return (T && FunctionType::getArmZT0State(T->getAArch64SMEAttributes()) !=
3272 FunctionType::ARM_None) ||
3273 (FD->hasAttr<ArmNewAttr>() && FD->getAttr<ArmNewAttr>()->isNewZT0());
3274}
3275
3276static ArmSMEState getSMEState(unsigned BuiltinID) {
3277 switch (BuiltinID) {
3278 default:
3279 return ArmNoState;
3280#define GET_SME_BUILTIN_GET_STATE
3281#include "clang/Basic/arm_sme_builtins_za_state.inc"
3282#undef GET_SME_BUILTIN_GET_STATE
3283 }
3284}
3285
3286bool Sema::CheckSMEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
3287 if (const FunctionDecl *FD = getCurFunctionDecl()) {
3288 std::optional<ArmStreamingType> BuiltinType;
3289
3290 switch (BuiltinID) {
3291#define GET_SME_STREAMING_ATTRS
3292#include "clang/Basic/arm_sme_streaming_attrs.inc"
3293#undef GET_SME_STREAMING_ATTRS
3294 }
3295
3296 if (BuiltinType)
3297 checkArmStreamingBuiltin(*this, TheCall, FD, *BuiltinType);
3298
3299 if ((getSMEState(BuiltinID) & ArmZAMask) && !hasArmZAState(FD))
3300 Diag(TheCall->getBeginLoc(),
3301 diag::warn_attribute_arm_za_builtin_no_za_state)
3302 << TheCall->getSourceRange();
3303
3304 if ((getSMEState(BuiltinID) & ArmZT0Mask) && !hasArmZT0State(FD))
3305 Diag(TheCall->getBeginLoc(),
3306 diag::warn_attribute_arm_zt0_builtin_no_zt0_state)
3307 << TheCall->getSourceRange();
3308 }
3309
3310 // Range check SME intrinsics that take immediate values.
3311 SmallVector<std::tuple<int, int, int>, 3> ImmChecks;
3312
3313 switch (BuiltinID) {
3314 default:
3315 return false;
3316#define GET_SME_IMMEDIATE_CHECK
3317#include "clang/Basic/arm_sme_sema_rangechecks.inc"
3318#undef GET_SME_IMMEDIATE_CHECK
3319 }
3320
3321 return ParseSVEImmChecks(TheCall, ImmChecks);
3322}
3323
3324bool Sema::CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
3325 if (const FunctionDecl *FD = getCurFunctionDecl()) {
3326 std::optional<ArmStreamingType> BuiltinType;
3327
3328 switch (BuiltinID) {
3329#define GET_SVE_STREAMING_ATTRS
3330#include "clang/Basic/arm_sve_streaming_attrs.inc"
3331#undef GET_SVE_STREAMING_ATTRS
3332 }
3333 if (BuiltinType)
3334 checkArmStreamingBuiltin(S&: *this, TheCall, FD, BuiltinType: *BuiltinType);
3335 }
3336 // Range check SVE intrinsics that take immediate values.
3337 SmallVector<std::tuple<int, int, int>, 3> ImmChecks;
3338
3339 switch (BuiltinID) {
3340 default:
3341 return false;
3342#define GET_SVE_IMMEDIATE_CHECK
3343#include "clang/Basic/arm_sve_sema_rangechecks.inc"
3344#undef GET_SVE_IMMEDIATE_CHECK
3345 }
3346
3347 return ParseSVEImmChecks(TheCall, ImmChecks);
3348}
3349
3350bool Sema::CheckNeonBuiltinFunctionCall(const TargetInfo &TI,
3351 unsigned BuiltinID, CallExpr *TheCall) {
3352 if (const FunctionDecl *FD = getCurFunctionDecl()) {
3353
3354 switch (BuiltinID) {
3355 default:
3356 break;
3357#define GET_NEON_BUILTINS
3358#define TARGET_BUILTIN(id, ...) case NEON::BI##id:
3359#define BUILTIN(id, ...) case NEON::BI##id:
3360#include "clang/Basic/arm_neon.inc"
3361 checkArmStreamingBuiltin(S&: *this, TheCall, FD, BuiltinType: ArmNonStreaming);
3362 break;
3363#undef TARGET_BUILTIN
3364#undef BUILTIN
3365#undef GET_NEON_BUILTINS
3366 }
3367 }
3368
3369 llvm::APSInt Result;
3370 uint64_t mask = 0;
3371 unsigned TV = 0;
3372 int PtrArgNum = -1;
3373 bool HasConstPtr = false;
3374 switch (BuiltinID) {
3375#define GET_NEON_OVERLOAD_CHECK
3376#include "clang/Basic/arm_neon.inc"
3377#include "clang/Basic/arm_fp16.inc"
3378#undef GET_NEON_OVERLOAD_CHECK
3379 }
3380
3381 // For NEON intrinsics which are overloaded on vector element type, validate
3382 // the immediate which specifies which variant to emit.
3383 unsigned ImmArg = TheCall->getNumArgs()-1;
3384 if (mask) {
3385 if (SemaBuiltinConstantArg(TheCall, ArgNum: ImmArg, Result))
3386 return true;
3387
3388 TV = Result.getLimitedValue(Limit: 64);
3389 if ((TV > 63) || (mask & (1ULL << TV)) == 0)
3390 return Diag(TheCall->getBeginLoc(), diag::err_invalid_neon_type_code)
3391 << TheCall->getArg(ImmArg)->getSourceRange();
3392 }
3393
3394 if (PtrArgNum >= 0) {
3395 // Check that pointer arguments have the specified type.
3396 Expr *Arg = TheCall->getArg(Arg: PtrArgNum);
3397 if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg))
3398 Arg = ICE->getSubExpr();
3399 ExprResult RHS = DefaultFunctionArrayLvalueConversion(E: Arg);
3400 QualType RHSTy = RHS.get()->getType();
3401
3402 llvm::Triple::ArchType Arch = TI.getTriple().getArch();
3403 bool IsPolyUnsigned = Arch == llvm::Triple::aarch64 ||
3404 Arch == llvm::Triple::aarch64_32 ||
3405 Arch == llvm::Triple::aarch64_be;
3406 bool IsInt64Long = TI.getInt64Type() == TargetInfo::SignedLong;
3407 QualType EltTy =
3408 getNeonEltType(Flags: NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long);
3409 if (HasConstPtr)
3410 EltTy = EltTy.withConst();
3411 QualType LHSTy = Context.getPointerType(T: EltTy);
3412 AssignConvertType ConvTy;
3413 ConvTy = CheckSingleAssignmentConstraints(LHSType: LHSTy, RHS);
3414 if (RHS.isInvalid())
3415 return true;
3416 if (DiagnoseAssignmentResult(ConvTy, Loc: Arg->getBeginLoc(), DstType: LHSTy, SrcType: RHSTy,
3417 SrcExpr: RHS.get(), Action: AA_Assigning))
3418 return true;
3419 }
3420
3421 // For NEON intrinsics which take an immediate value as part of the
3422 // instruction, range check them here.
3423 unsigned i = 0, l = 0, u = 0;
3424 switch (BuiltinID) {
3425 default:
3426 return false;
3427 #define GET_NEON_IMMEDIATE_CHECK
3428 #include "clang/Basic/arm_neon.inc"
3429 #include "clang/Basic/arm_fp16.inc"
3430 #undef GET_NEON_IMMEDIATE_CHECK
3431 }
3432
3433 return SemaBuiltinConstantArgRange(TheCall, ArgNum: i, Low: l, High: u + l);
3434}
3435
3436bool Sema::CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
3437 switch (BuiltinID) {
3438 default:
3439 return false;
3440 #include "clang/Basic/arm_mve_builtin_sema.inc"
3441 }
3442}
3443
3444bool Sema::CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
3445 CallExpr *TheCall) {
3446 bool Err = false;
3447 switch (BuiltinID) {
3448 default:
3449 return false;
3450#include "clang/Basic/arm_cde_builtin_sema.inc"
3451 }
3452
3453 if (Err)
3454 return true;
3455
3456 return CheckARMCoprocessorImmediate(TI, CoprocArg: TheCall->getArg(Arg: 0), /*WantCDE*/ true);
3457}
3458
3459bool Sema::CheckARMCoprocessorImmediate(const TargetInfo &TI,
3460 const Expr *CoprocArg, bool WantCDE) {
3461 if (isConstantEvaluatedContext())
3462 return false;
3463
3464 // We can't check the value of a dependent argument.
3465 if (CoprocArg->isTypeDependent() || CoprocArg->isValueDependent())
3466 return false;
3467
3468 llvm::APSInt CoprocNoAP = *CoprocArg->getIntegerConstantExpr(Ctx: Context);
3469 int64_t CoprocNo = CoprocNoAP.getExtValue();
3470 assert(CoprocNo >= 0 && "Coprocessor immediate must be non-negative");
3471
3472 uint32_t CDECoprocMask = TI.getARMCDECoprocMask();
3473 bool IsCDECoproc = CoprocNo <= 7 && (CDECoprocMask & (1 << CoprocNo));
3474
3475 if (IsCDECoproc != WantCDE)
3476 return Diag(CoprocArg->getBeginLoc(), diag::err_arm_invalid_coproc)
3477 << (int)CoprocNo << (int)WantCDE << CoprocArg->getSourceRange();
3478
3479 return false;
3480}
3481
3482bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
3483 unsigned MaxWidth) {
3484 assert((BuiltinID == ARM::BI__builtin_arm_ldrex ||
3485 BuiltinID == ARM::BI__builtin_arm_ldaex ||
3486 BuiltinID == ARM::BI__builtin_arm_strex ||
3487 BuiltinID == ARM::BI__builtin_arm_stlex ||
3488 BuiltinID == AArch64::BI__builtin_arm_ldrex ||
3489 BuiltinID == AArch64::BI__builtin_arm_ldaex ||
3490 BuiltinID == AArch64::BI__builtin_arm_strex ||
3491 BuiltinID == AArch64::BI__builtin_arm_stlex) &&
3492 "unexpected ARM builtin");
3493 bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex ||
3494 BuiltinID == ARM::BI__builtin_arm_ldaex ||
3495 BuiltinID == AArch64::BI__builtin_arm_ldrex ||
3496 BuiltinID == AArch64::BI__builtin_arm_ldaex;
3497
3498 DeclRefExpr *DRE =cast<DeclRefExpr>(Val: TheCall->getCallee()->IgnoreParenCasts());
3499
3500 // Ensure that we have the proper number of arguments.
3501 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: IsLdrex ? 1 : 2))
3502 return true;
3503
3504 // Inspect the pointer argument of the atomic builtin. This should always be
3505 // a pointer type, whose element is an integral scalar or pointer type.
3506 // Because it is a pointer type, we don't have to worry about any implicit
3507 // casts here.
3508 Expr *PointerArg = TheCall->getArg(Arg: IsLdrex ? 0 : 1);
3509 ExprResult PointerArgRes = DefaultFunctionArrayLvalueConversion(E: PointerArg);
3510 if (PointerArgRes.isInvalid())
3511 return true;
3512 PointerArg = PointerArgRes.get();
3513
3514 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>();
3515 if (!pointerType) {
3516 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer)
3517 << PointerArg->getType() << PointerArg->getSourceRange();
3518 return true;
3519 }
3520
3521 // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next
3522 // task is to insert the appropriate casts into the AST. First work out just
3523 // what the appropriate type is.
3524 QualType ValType = pointerType->getPointeeType();
3525 QualType AddrType = ValType.getUnqualifiedType().withVolatile();
3526 if (IsLdrex)
3527 AddrType.addConst();
3528
3529 // Issue a warning if the cast is dodgy.
3530 CastKind CastNeeded = CK_NoOp;
3531 if (!AddrType.isAtLeastAsQualifiedAs(other: ValType)) {
3532 CastNeeded = CK_BitCast;
3533 Diag(DRE->getBeginLoc(), diag::ext_typecheck_convert_discards_qualifiers)
3534 << PointerArg->getType() << Context.getPointerType(AddrType)
3535 << AA_Passing << PointerArg->getSourceRange();
3536 }
3537
3538 // Finally, do the cast and replace the argument with the corrected version.
3539 AddrType = Context.getPointerType(T: AddrType);
3540 PointerArgRes = ImpCastExprToType(E: PointerArg, Type: AddrType, CK: CastNeeded);
3541 if (PointerArgRes.isInvalid())
3542 return true;
3543 PointerArg = PointerArgRes.get();
3544
3545 TheCall->setArg(Arg: IsLdrex ? 0 : 1, ArgExpr: PointerArg);
3546
3547 // In general, we allow ints, floats and pointers to be loaded and stored.
3548 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
3549 !ValType->isBlockPointerType() && !ValType->isFloatingType()) {
3550 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intfltptr)
3551 << PointerArg->getType() << PointerArg->getSourceRange();
3552 return true;
3553 }
3554
3555 // But ARM doesn't have instructions to deal with 128-bit versions.
3556 if (Context.getTypeSize(T: ValType) > MaxWidth) {
3557 assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate");
3558 Diag(DRE->getBeginLoc(), diag::err_atomic_exclusive_builtin_pointer_size)
3559 << PointerArg->getType() << PointerArg->getSourceRange();
3560 return true;
3561 }
3562
3563 switch (ValType.getObjCLifetime()) {
3564 case Qualifiers::OCL_None:
3565 case Qualifiers::OCL_ExplicitNone:
3566 // okay
3567 break;
3568
3569 case Qualifiers::OCL_Weak:
3570 case Qualifiers::OCL_Strong:
3571 case Qualifiers::OCL_Autoreleasing:
3572 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership)
3573 << ValType << PointerArg->getSourceRange();
3574 return true;
3575 }
3576
3577 if (IsLdrex) {
3578 TheCall->setType(ValType);
3579 return false;
3580 }
3581
3582 // Initialize the argument to be stored.
3583 ExprResult ValArg = TheCall->getArg(Arg: 0);
3584 InitializedEntity Entity = InitializedEntity::InitializeParameter(
3585 Context, Type: ValType, /*consume*/ Consumed: false);
3586 ValArg = PerformCopyInitialization(Entity, EqualLoc: SourceLocation(), Init: ValArg);
3587 if (ValArg.isInvalid())
3588 return true;
3589 TheCall->setArg(Arg: 0, ArgExpr: ValArg.get());
3590
3591 // __builtin_arm_strex always returns an int. It's marked as such in the .def,
3592 // but the custom checker bypasses all default analysis.
3593 TheCall->setType(Context.IntTy);
3594 return false;
3595}
3596
3597bool Sema::CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
3598 CallExpr *TheCall) {
3599 if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
3600 BuiltinID == ARM::BI__builtin_arm_ldaex ||
3601 BuiltinID == ARM::BI__builtin_arm_strex ||
3602 BuiltinID == ARM::BI__builtin_arm_stlex) {
3603 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, MaxWidth: 64);
3604 }
3605
3606 if (BuiltinID == ARM::BI__builtin_arm_prefetch) {
3607 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 1) ||
3608 SemaBuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 1);
3609 }
3610
3611 if (BuiltinID == ARM::BI__builtin_arm_rsr64 ||
3612 BuiltinID == ARM::BI__builtin_arm_wsr64)
3613 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, ArgNum: 0, ExpectedFieldNum: 3, AllowName: false);
3614
3615 if (BuiltinID == ARM::BI__builtin_arm_rsr ||
3616 BuiltinID == ARM::BI__builtin_arm_rsrp ||
3617 BuiltinID == ARM::BI__builtin_arm_wsr ||
3618 BuiltinID == ARM::BI__builtin_arm_wsrp)
3619 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, ArgNum: 0, ExpectedFieldNum: 5, AllowName: true);
3620
3621 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall))
3622 return true;
3623 if (CheckMVEBuiltinFunctionCall(BuiltinID, TheCall))
3624 return true;
3625 if (CheckCDEBuiltinFunctionCall(TI, BuiltinID, TheCall))
3626 return true;
3627
3628 // For intrinsics which take an immediate value as part of the instruction,
3629 // range check them here.
3630 // FIXME: VFP Intrinsics should error if VFP not present.
3631 switch (BuiltinID) {
3632 default: return false;
3633 case ARM::BI__builtin_arm_ssat:
3634 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 1, High: 32);
3635 case ARM::BI__builtin_arm_usat:
3636 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 31);
3637 case ARM::BI__builtin_arm_ssat16:
3638 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 1, High: 16);
3639 case ARM::BI__builtin_arm_usat16:
3640 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 15);
3641 case ARM::BI__builtin_arm_vcvtr_f:
3642 case ARM::BI__builtin_arm_vcvtr_d:
3643 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 1);
3644 case ARM::BI__builtin_arm_dmb:
3645 case ARM::BI__builtin_arm_dsb:
3646 case ARM::BI__builtin_arm_isb:
3647 case ARM::BI__builtin_arm_dbg:
3648 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 15);
3649 case ARM::BI__builtin_arm_cdp:
3650 case ARM::BI__builtin_arm_cdp2:
3651 case ARM::BI__builtin_arm_mcr:
3652 case ARM::BI__builtin_arm_mcr2:
3653 case ARM::BI__builtin_arm_mrc:
3654 case ARM::BI__builtin_arm_mrc2:
3655 case ARM::BI__builtin_arm_mcrr:
3656 case ARM::BI__builtin_arm_mcrr2:
3657 case ARM::BI__builtin_arm_mrrc:
3658 case ARM::BI__builtin_arm_mrrc2:
3659 case ARM::BI__builtin_arm_ldc:
3660 case ARM::BI__builtin_arm_ldcl:
3661 case ARM::BI__builtin_arm_ldc2:
3662 case ARM::BI__builtin_arm_ldc2l:
3663 case ARM::BI__builtin_arm_stc:
3664 case ARM::BI__builtin_arm_stcl:
3665 case ARM::BI__builtin_arm_stc2:
3666 case ARM::BI__builtin_arm_stc2l:
3667 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 15) ||
3668 CheckARMCoprocessorImmediate(TI, CoprocArg: TheCall->getArg(Arg: 0),
3669 /*WantCDE*/ false);
3670 }
3671}
3672
3673bool Sema::CheckAArch64BuiltinFunctionCall(const TargetInfo &TI,
3674 unsigned BuiltinID,
3675 CallExpr *TheCall) {
3676 if (BuiltinID == AArch64::BI__builtin_arm_ldrex ||
3677 BuiltinID == AArch64::BI__builtin_arm_ldaex ||
3678 BuiltinID == AArch64::BI__builtin_arm_strex ||
3679 BuiltinID == AArch64::BI__builtin_arm_stlex) {
3680 return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, MaxWidth: 128);
3681 }
3682
3683 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) {
3684 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 1) ||
3685 SemaBuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 3) ||
3686 SemaBuiltinConstantArgRange(TheCall, ArgNum: 3, Low: 0, High: 1) ||
3687 SemaBuiltinConstantArgRange(TheCall, ArgNum: 4, Low: 0, High: 1);
3688 }
3689
3690 if (BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
3691 BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
3692 BuiltinID == AArch64::BI__builtin_arm_rsr128 ||
3693 BuiltinID == AArch64::BI__builtin_arm_wsr128)
3694 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, ArgNum: 0, ExpectedFieldNum: 5, AllowName: true);
3695
3696 // Memory Tagging Extensions (MTE) Intrinsics
3697 if (BuiltinID == AArch64::BI__builtin_arm_irg ||
3698 BuiltinID == AArch64::BI__builtin_arm_addg ||
3699 BuiltinID == AArch64::BI__builtin_arm_gmi ||
3700 BuiltinID == AArch64::BI__builtin_arm_ldg ||
3701 BuiltinID == AArch64::BI__builtin_arm_stg ||
3702 BuiltinID == AArch64::BI__builtin_arm_subp) {
3703 return SemaBuiltinARMMemoryTaggingCall(BuiltinID, TheCall);
3704 }
3705
3706 if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
3707 BuiltinID == AArch64::BI__builtin_arm_rsrp ||
3708 BuiltinID == AArch64::BI__builtin_arm_wsr ||
3709 BuiltinID == AArch64::BI__builtin_arm_wsrp)
3710 return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, ArgNum: 0, ExpectedFieldNum: 5, AllowName: true);
3711
3712 // Only check the valid encoding range. Any constant in this range would be
3713 // converted to a register of the form S1_2_C3_C4_5. Let the hardware throw
3714 // an exception for incorrect registers. This matches MSVC behavior.
3715 if (BuiltinID == AArch64::BI_ReadStatusReg ||
3716 BuiltinID == AArch64::BI_WriteStatusReg)
3717 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 0x7fff);
3718
3719 if (BuiltinID == AArch64::BI__getReg)
3720 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 31);
3721
3722 if (BuiltinID == AArch64::BI__break)
3723 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 0xffff);
3724
3725 if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall))
3726 return true;
3727
3728 if (CheckSVEBuiltinFunctionCall(BuiltinID, TheCall))
3729 return true;
3730
3731 if (CheckSMEBuiltinFunctionCall(BuiltinID, TheCall))
3732 return true;
3733
3734 // For intrinsics which take an immediate value as part of the instruction,
3735 // range check them here.
3736 unsigned i = 0, l = 0, u = 0;
3737 switch (BuiltinID) {
3738 default: return false;
3739 case AArch64::BI__builtin_arm_dmb:
3740 case AArch64::BI__builtin_arm_dsb:
3741 case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break;
3742 case AArch64::BI__builtin_arm_tcancel: l = 0; u = 65535; break;
3743 }
3744
3745 return SemaBuiltinConstantArgRange(TheCall, ArgNum: i, Low: l, High: u + l);
3746}
3747
3748static bool isValidBPFPreserveFieldInfoArg(Expr *Arg) {
3749 if (Arg->getType()->getAsPlaceholderType())
3750 return false;
3751
3752 // The first argument needs to be a record field access.
3753 // If it is an array element access, we delay decision
3754 // to BPF backend to check whether the access is a
3755 // field access or not.
3756 return (Arg->IgnoreParens()->getObjectKind() == OK_BitField ||
3757 isa<MemberExpr>(Val: Arg->IgnoreParens()) ||
3758 isa<ArraySubscriptExpr>(Val: Arg->IgnoreParens()));
3759}
3760
3761static bool isValidBPFPreserveTypeInfoArg(Expr *Arg) {
3762 QualType ArgType = Arg->getType();
3763 if (ArgType->getAsPlaceholderType())
3764 return false;
3765
3766 // for TYPE_EXISTENCE/TYPE_MATCH/TYPE_SIZEOF reloc type
3767 // format:
3768 // 1. __builtin_preserve_type_info(*(<type> *)0, flag);
3769 // 2. <type> var;
3770 // __builtin_preserve_type_info(var, flag);
3771 if (!isa<DeclRefExpr>(Val: Arg->IgnoreParens()) &&
3772 !isa<UnaryOperator>(Val: Arg->IgnoreParens()))
3773 return false;
3774
3775 // Typedef type.
3776 if (ArgType->getAs<TypedefType>())
3777 return true;
3778
3779 // Record type or Enum type.
3780 const Type *Ty = ArgType->getUnqualifiedDesugaredType();
3781 if (const auto *RT = Ty->getAs<RecordType>()) {
3782 if (!RT->getDecl()->getDeclName().isEmpty())
3783 return true;
3784 } else if (const auto *ET = Ty->getAs<EnumType>()) {
3785 if (!ET->getDecl()->getDeclName().isEmpty())
3786 return true;
3787 }
3788
3789 return false;
3790}
3791
3792static bool isValidBPFPreserveEnumValueArg(Expr *Arg) {
3793 QualType ArgType = Arg->getType();
3794 if (ArgType->getAsPlaceholderType())
3795 return false;
3796
3797 // for ENUM_VALUE_EXISTENCE/ENUM_VALUE reloc type
3798 // format:
3799 // __builtin_preserve_enum_value(*(<enum_type> *)<enum_value>,
3800 // flag);
3801 const auto *UO = dyn_cast<UnaryOperator>(Val: Arg->IgnoreParens());
3802 if (!UO)
3803 return false;
3804
3805 const auto *CE = dyn_cast<CStyleCastExpr>(Val: UO->getSubExpr());
3806 if (!CE)
3807 return false;
3808 if (CE->getCastKind() != CK_IntegralToPointer &&
3809 CE->getCastKind() != CK_NullToPointer)
3810 return false;
3811
3812 // The integer must be from an EnumConstantDecl.
3813 const auto *DR = dyn_cast<DeclRefExpr>(CE->getSubExpr());
3814 if (!DR)
3815 return false;
3816
3817 const EnumConstantDecl *Enumerator =
3818 dyn_cast<EnumConstantDecl>(DR->getDecl());
3819 if (!Enumerator)
3820 return false;
3821
3822 // The type must be EnumType.
3823 const Type *Ty = ArgType->getUnqualifiedDesugaredType();
3824 const auto *ET = Ty->getAs<EnumType>();
3825 if (!ET)
3826 return false;
3827
3828 // The enum value must be supported.
3829 return llvm::is_contained(Range: ET->getDecl()->enumerators(), Element: Enumerator);
3830}
3831
3832bool Sema::CheckBPFBuiltinFunctionCall(unsigned BuiltinID,
3833 CallExpr *TheCall) {
3834 assert((BuiltinID == BPF::BI__builtin_preserve_field_info ||
3835 BuiltinID == BPF::BI__builtin_btf_type_id ||
3836 BuiltinID == BPF::BI__builtin_preserve_type_info ||
3837 BuiltinID == BPF::BI__builtin_preserve_enum_value) &&
3838 "unexpected BPF builtin");
3839
3840 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 2))
3841 return true;
3842
3843 // The second argument needs to be a constant int
3844 Expr *Arg = TheCall->getArg(Arg: 1);
3845 std::optional<llvm::APSInt> Value = Arg->getIntegerConstantExpr(Ctx: Context);
3846 diag::kind kind;
3847 if (!Value) {
3848 if (BuiltinID == BPF::BI__builtin_preserve_field_info)
3849 kind = diag::err_preserve_field_info_not_const;
3850 else if (BuiltinID == BPF::BI__builtin_btf_type_id)
3851 kind = diag::err_btf_type_id_not_const;
3852 else if (BuiltinID == BPF::BI__builtin_preserve_type_info)
3853 kind = diag::err_preserve_type_info_not_const;
3854 else
3855 kind = diag::err_preserve_enum_value_not_const;
3856 Diag(Arg->getBeginLoc(), kind) << 2 << Arg->getSourceRange();
3857 return true;
3858 }
3859
3860 // The first argument
3861 Arg = TheCall->getArg(Arg: 0);
3862 bool InvalidArg = false;
3863 bool ReturnUnsignedInt = true;
3864 if (BuiltinID == BPF::BI__builtin_preserve_field_info) {
3865 if (!isValidBPFPreserveFieldInfoArg(Arg)) {
3866 InvalidArg = true;
3867 kind = diag::err_preserve_field_info_not_field;
3868 }
3869 } else if (BuiltinID == BPF::BI__builtin_preserve_type_info) {
3870 if (!isValidBPFPreserveTypeInfoArg(Arg)) {
3871 InvalidArg = true;
3872 kind = diag::err_preserve_type_info_invalid;
3873 }
3874 } else if (BuiltinID == BPF::BI__builtin_preserve_enum_value) {
3875 if (!isValidBPFPreserveEnumValueArg(Arg)) {
3876 InvalidArg = true;
3877 kind = diag::err_preserve_enum_value_invalid;
3878 }
3879 ReturnUnsignedInt = false;
3880 } else if (BuiltinID == BPF::BI__builtin_btf_type_id) {
3881 ReturnUnsignedInt = false;
3882 }
3883
3884 if (InvalidArg) {
3885 Diag(Arg->getBeginLoc(), kind) << 1 << Arg->getSourceRange();
3886 return true;
3887 }
3888
3889 if (ReturnUnsignedInt)
3890 TheCall->setType(Context.UnsignedIntTy);
3891 else
3892 TheCall->setType(Context.UnsignedLongTy);
3893 return false;
3894}
3895
3896bool Sema::CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) {
3897 struct ArgInfo {
3898 uint8_t OpNum;
3899 bool IsSigned;
3900 uint8_t BitWidth;
3901 uint8_t Align;
3902 };
3903 struct BuiltinInfo {
3904 unsigned BuiltinID;
3905 ArgInfo Infos[2];
3906 };
3907
3908 static BuiltinInfo Infos[] = {
3909 { .BuiltinID: Hexagon::BI__builtin_circ_ldd, .Infos: {{ .OpNum: 3, .IsSigned: true, .BitWidth: 4, .Align: 3 }} },
3910 { .BuiltinID: Hexagon::BI__builtin_circ_ldw, .Infos: {{ .OpNum: 3, .IsSigned: true, .BitWidth: 4, .Align: 2 }} },
3911 { .BuiltinID: Hexagon::BI__builtin_circ_ldh, .Infos: {{ .OpNum: 3, .IsSigned: true, .BitWidth: 4, .Align: 1 }} },
3912 { .BuiltinID: Hexagon::BI__builtin_circ_lduh, .Infos: {{ .OpNum: 3, .IsSigned: true, .BitWidth: 4, .Align: 1 }} },
3913 { .BuiltinID: Hexagon::BI__builtin_circ_ldb, .Infos: {{ .OpNum: 3, .IsSigned: true, .BitWidth: 4, .Align: 0 }} },
3914 { .BuiltinID: Hexagon::BI__builtin_circ_ldub, .Infos: {{ .OpNum: 3, .IsSigned: true, .BitWidth: 4, .Align: 0 }} },
3915 { .BuiltinID: Hexagon::BI__builtin_circ_std, .Infos: {{ .OpNum: 3, .IsSigned: true, .BitWidth: 4, .Align: 3 }} },
3916 { .BuiltinID: Hexagon::BI__builtin_circ_stw, .Infos: {{ .OpNum: 3, .IsSigned: true, .BitWidth: 4, .Align: 2 }} },
3917 { .BuiltinID: Hexagon::BI__builtin_circ_sth, .Infos: {{ .OpNum: 3, .IsSigned: true, .BitWidth: 4, .Align: 1 }} },
3918 { .BuiltinID: Hexagon::BI__builtin_circ_sthhi, .Infos: {{ .OpNum: 3, .IsSigned: true, .BitWidth: 4, .Align: 1 }} },
3919 { .BuiltinID: Hexagon::BI__builtin_circ_stb, .Infos: {{ .OpNum: 3, .IsSigned: true, .BitWidth: 4, .Align: 0 }} },
3920
3921 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci, .Infos: {{ .OpNum: 1, .IsSigned: true, .BitWidth: 4, .Align: 0 }} },
3922 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci, .Infos: {{ .OpNum: 1, .IsSigned: true, .BitWidth: 4, .Align: 0 }} },
3923 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci, .Infos: {{ .OpNum: 1, .IsSigned: true, .BitWidth: 4, .Align: 1 }} },
3924 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci, .Infos: {{ .OpNum: 1, .IsSigned: true, .BitWidth: 4, .Align: 1 }} },
3925 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_L2_loadri_pci, .Infos: {{ .OpNum: 1, .IsSigned: true, .BitWidth: 4, .Align: 2 }} },
3926 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci, .Infos: {{ .OpNum: 1, .IsSigned: true, .BitWidth: 4, .Align: 3 }} },
3927 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_storerb_pci, .Infos: {{ .OpNum: 1, .IsSigned: true, .BitWidth: 4, .Align: 0 }} },
3928 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_storerh_pci, .Infos: {{ .OpNum: 1, .IsSigned: true, .BitWidth: 4, .Align: 1 }} },
3929 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_storerf_pci, .Infos: {{ .OpNum: 1, .IsSigned: true, .BitWidth: 4, .Align: 1 }} },
3930 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_storeri_pci, .Infos: {{ .OpNum: 1, .IsSigned: true, .BitWidth: 4, .Align: 2 }} },
3931 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_storerd_pci, .Infos: {{ .OpNum: 1, .IsSigned: true, .BitWidth: 4, .Align: 3 }} },
3932
3933 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_A2_combineii, .Infos: {{ .OpNum: 1, .IsSigned: true, .BitWidth: 8, .Align: 0 }} },
3934 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_A2_tfrih, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 16, .Align: 0 }} },
3935 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_A2_tfril, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 16, .Align: 0 }} },
3936 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_A2_tfrpi, .Infos: {{ .OpNum: 0, .IsSigned: true, .BitWidth: 8, .Align: 0 }} },
3937 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_A4_bitspliti, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
3938 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_A4_cmpbeqi, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 8, .Align: 0 }} },
3939 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_A4_cmpbgti, .Infos: {{ .OpNum: 1, .IsSigned: true, .BitWidth: 8, .Align: 0 }} },
3940 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_A4_cround_ri, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
3941 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_A4_round_ri, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
3942 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
3943 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_A4_vcmpbeqi, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 8, .Align: 0 }} },
3944 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_A4_vcmpbgti, .Infos: {{ .OpNum: 1, .IsSigned: true, .BitWidth: 8, .Align: 0 }} },
3945 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_A4_vcmpbgtui, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 7, .Align: 0 }} },
3946 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_A4_vcmpheqi, .Infos: {{ .OpNum: 1, .IsSigned: true, .BitWidth: 8, .Align: 0 }} },
3947 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_A4_vcmphgti, .Infos: {{ .OpNum: 1, .IsSigned: true, .BitWidth: 8, .Align: 0 }} },
3948 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_A4_vcmphgtui, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 7, .Align: 0 }} },
3949 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_A4_vcmpweqi, .Infos: {{ .OpNum: 1, .IsSigned: true, .BitWidth: 8, .Align: 0 }} },
3950 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_A4_vcmpwgti, .Infos: {{ .OpNum: 1, .IsSigned: true, .BitWidth: 8, .Align: 0 }} },
3951 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_A4_vcmpwgtui, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 7, .Align: 0 }} },
3952 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_C2_bitsclri, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
3953 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_C2_muxii, .Infos: {{ .OpNum: 2, .IsSigned: true, .BitWidth: 8, .Align: 0 }} },
3954 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_C4_nbitsclri, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
3955 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_F2_dfclass, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
3956 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_F2_dfimm_n, .Infos: {{ .OpNum: 0, .IsSigned: false, .BitWidth: 10, .Align: 0 }} },
3957 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_F2_dfimm_p, .Infos: {{ .OpNum: 0, .IsSigned: false, .BitWidth: 10, .Align: 0 }} },
3958 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_F2_sfclass, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
3959 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_F2_sfimm_n, .Infos: {{ .OpNum: 0, .IsSigned: false, .BitWidth: 10, .Align: 0 }} },
3960 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_F2_sfimm_p, .Infos: {{ .OpNum: 0, .IsSigned: false, .BitWidth: 10, .Align: 0 }} },
3961 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_M4_mpyri_addi, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
3962 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_M4_mpyri_addr_u2, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 6, .Align: 2 }} },
3963 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 3, .Align: 0 }} },
3964 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
3965 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
3966 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asl_i_p, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
3967 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
3968 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
3969 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
3970 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
3971 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
3972 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asl_i_r, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
3973 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
3974 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
3975 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
3976 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
3977 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 4, .Align: 0 }} },
3978 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
3979 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
3980 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
3981 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asr_i_p, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
3982 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
3983 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
3984 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax,
3985 .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
3986 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
3987 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
3988 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
3989 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asr_i_r, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
3990 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
3991 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
3992 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax,
3993 .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
3994 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
3995 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
3996 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 4, .Align: 0 }} },
3997 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
3998 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_clrbit_i, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
3999 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_extractu, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 5, .Align: 0 },
4000 { .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4001 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_extractup, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 6, .Align: 0 },
4002 { .OpNum: 2, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
4003 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_insert, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 },
4004 { .OpNum: 3, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4005 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_insertp, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 6, .Align: 0 },
4006 { .OpNum: 3, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
4007 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
4008 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
4009 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
4010 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
4011 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
4012 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
4013 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4014 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4015 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4016 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4017 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4018 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4019 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 4, .Align: 0 }} },
4020 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4021 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_setbit_i, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4022 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax,
4023 .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 4, .Align: 0 },
4024 { .OpNum: 3, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4025 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax,
4026 .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 4, .Align: 0 },
4027 { .OpNum: 3, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4028 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax,
4029 .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 4, .Align: 0 },
4030 { .OpNum: 3, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4031 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax,
4032 .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 4, .Align: 0 },
4033 { .OpNum: 3, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4034 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_togglebit_i, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4035 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_tstbit_i, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4036 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_valignib, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 3, .Align: 0 }} },
4037 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S2_vspliceib, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 3, .Align: 0 }} },
4038 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S4_addi_asl_ri, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4039 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S4_addi_lsr_ri, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4040 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S4_andi_asl_ri, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4041 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S4_andi_lsr_ri, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4042 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S4_clbaddi, .Infos: {{ .OpNum: 1, .IsSigned: true , .BitWidth: 6, .Align: 0 }} },
4043 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S4_clbpaddi, .Infos: {{ .OpNum: 1, .IsSigned: true, .BitWidth: 6, .Align: 0 }} },
4044 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S4_extract, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 5, .Align: 0 },
4045 { .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4046 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S4_extractp, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 6, .Align: 0 },
4047 { .OpNum: 2, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
4048 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S4_lsli, .Infos: {{ .OpNum: 0, .IsSigned: true, .BitWidth: 6, .Align: 0 }} },
4049 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S4_ntstbit_i, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4050 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S4_ori_asl_ri, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4051 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S4_ori_lsr_ri, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4052 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S4_subi_asl_ri, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4053 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S4_subi_lsr_ri, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4054 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S4_vrcrotate_acc, .Infos: {{ .OpNum: 3, .IsSigned: false, .BitWidth: 2, .Align: 0 }} },
4055 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S4_vrcrotate, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 2, .Align: 0 }} },
4056 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax,
4057 .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 4, .Align: 0 }} },
4058 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S5_asrhub_sat, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 4, .Align: 0 }} },
4059 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S5_vasrhrnd_goodsyntax,
4060 .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 4, .Align: 0 }} },
4061 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
4062 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
4063 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
4064 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
4065 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
4066 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 6, .Align: 0 }} },
4067 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, .Infos: {{ .OpNum: 1, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4068 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4069 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4070 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4071 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4072 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 5, .Align: 0 }} },
4073 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_valignbi, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 3, .Align: 0 }} },
4074 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 3, .Align: 0 }} },
4075 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 3, .Align: 0 }} },
4076 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 3, .Align: 0 }} },
4077 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 1, .Align: 0 }} },
4078 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 1, .Align: 0 }} },
4079 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, .Infos: {{ .OpNum: 3, .IsSigned: false, .BitWidth: 1, .Align: 0 }} },
4080 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B,
4081 .Infos: {{ .OpNum: 3, .IsSigned: false, .BitWidth: 1, .Align: 0 }} },
4082 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 1, .Align: 0 }} },
4083 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 1, .Align: 0 }} },
4084 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, .Infos: {{ .OpNum: 3, .IsSigned: false, .BitWidth: 1, .Align: 0 }} },
4085 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B,
4086 .Infos: {{ .OpNum: 3, .IsSigned: false, .BitWidth: 1, .Align: 0 }} },
4087 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 1, .Align: 0 }} },
4088 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 1, .Align: 0 }} },
4089 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, .Infos: {{ .OpNum: 3, .IsSigned: false, .BitWidth: 1, .Align: 0 }} },
4090 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B,
4091 .Infos: {{ .OpNum: 3, .IsSigned: false, .BitWidth: 1, .Align: 0 }} },
4092
4093 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 2, .Align: 0 }} },
4094 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10_128B,
4095 .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 2, .Align: 0 }} },
4096 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10_vxx,
4097 .Infos: {{ .OpNum: 3, .IsSigned: false, .BitWidth: 2, .Align: 0 }} },
4098 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_v6mpyhubs10_vxx_128B,
4099 .Infos: {{ .OpNum: 3, .IsSigned: false, .BitWidth: 2, .Align: 0 }} },
4100 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 2, .Align: 0 }} },
4101 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10_128B,
4102 .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 2, .Align: 0 }} },
4103 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10_vxx,
4104 .Infos: {{ .OpNum: 3, .IsSigned: false, .BitWidth: 2, .Align: 0 }} },
4105 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_v6mpyvubs10_vxx_128B,
4106 .Infos: {{ .OpNum: 3, .IsSigned: false, .BitWidth: 2, .Align: 0 }} },
4107 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 3, .Align: 0 }} },
4108 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_vlutvvbi_128B, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 3, .Align: 0 }} },
4109 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci, .Infos: {{ .OpNum: 3, .IsSigned: false, .BitWidth: 3, .Align: 0 }} },
4110 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_vlutvvb_oracci_128B,
4111 .Infos: {{ .OpNum: 3, .IsSigned: false, .BitWidth: 3, .Align: 0 }} },
4112 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 3, .Align: 0 }} },
4113 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_vlutvwhi_128B, .Infos: {{ .OpNum: 2, .IsSigned: false, .BitWidth: 3, .Align: 0 }} },
4114 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci, .Infos: {{ .OpNum: 3, .IsSigned: false, .BitWidth: 3, .Align: 0 }} },
4115 { .BuiltinID: Hexagon::BI__builtin_HEXAGON_V6_vlutvwh_oracci_128B,
4116 .Infos: {{ .OpNum: 3, .IsSigned: false, .BitWidth: 3, .Align: 0 }} },
4117 };
4118
4119 // Use a dynamically initialized static to sort the table exactly once on
4120 // first run.
4121 static const bool SortOnce =
4122 (llvm::sort(C&: Infos,
4123 Comp: [](const BuiltinInfo &LHS, const BuiltinInfo &RHS) {
4124 return LHS.BuiltinID < RHS.BuiltinID;
4125 }),
4126 true);
4127 (void)SortOnce;
4128
4129 const BuiltinInfo *F = llvm::partition_point(
4130 Range&: Infos, P: [=](const BuiltinInfo &BI) { return BI.BuiltinID < BuiltinID; });
4131 if (F == std::end(arr&: Infos) || F->BuiltinID != BuiltinID)
4132 return false;
4133
4134 bool Error = false;
4135
4136 for (const ArgInfo &A : F->Infos) {
4137 // Ignore empty ArgInfo elements.
4138 if (A.BitWidth == 0)
4139 continue;
4140
4141 int32_t Min = A.IsSigned ? -(1 << (A.BitWidth - 1)) : 0;
4142 int32_t Max = (1 << (A.IsSigned ? A.BitWidth - 1 : A.BitWidth)) - 1;
4143 if (!A.Align) {
4144 Error |= SemaBuiltinConstantArgRange(TheCall, ArgNum: A.OpNum, Low: Min, High: Max);
4145 } else {
4146 unsigned M = 1 << A.Align;
4147 Min *= M;
4148 Max *= M;
4149 Error |= SemaBuiltinConstantArgRange(TheCall, ArgNum: A.OpNum, Low: Min, High: Max);
4150 Error |= SemaBuiltinConstantArgMultiple(TheCall, ArgNum: A.OpNum, Multiple: M);
4151 }
4152 }
4153 return Error;
4154}
4155
4156bool Sema::CheckHexagonBuiltinFunctionCall(unsigned BuiltinID,
4157 CallExpr *TheCall) {
4158 return CheckHexagonBuiltinArgument(BuiltinID, TheCall);
4159}
4160
4161bool Sema::CheckLoongArchBuiltinFunctionCall(const TargetInfo &TI,
4162 unsigned BuiltinID,
4163 CallExpr *TheCall) {
4164 switch (BuiltinID) {
4165 default:
4166 break;
4167 // Basic intrinsics.
4168 case LoongArch::BI__builtin_loongarch_cacop_d:
4169 case LoongArch::BI__builtin_loongarch_cacop_w: {
4170 SemaBuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: llvm::maxUIntN(N: 5));
4171 SemaBuiltinConstantArgRange(TheCall, ArgNum: 2, Low: llvm::minIntN(N: 12),
4172 High: llvm::maxIntN(N: 12));
4173 break;
4174 }
4175 case LoongArch::BI__builtin_loongarch_break:
4176 case LoongArch::BI__builtin_loongarch_dbar:
4177 case LoongArch::BI__builtin_loongarch_ibar:
4178 case LoongArch::BI__builtin_loongarch_syscall:
4179 // Check if immediate is in [0, 32767].
4180 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 32767);
4181 case LoongArch::BI__builtin_loongarch_csrrd_w:
4182 case LoongArch::BI__builtin_loongarch_csrrd_d:
4183 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 16383);
4184 case LoongArch::BI__builtin_loongarch_csrwr_w:
4185 case LoongArch::BI__builtin_loongarch_csrwr_d:
4186 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 16383);
4187 case LoongArch::BI__builtin_loongarch_csrxchg_w:
4188 case LoongArch::BI__builtin_loongarch_csrxchg_d:
4189 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 16383);
4190 case LoongArch::BI__builtin_loongarch_lddir_d:
4191 case LoongArch::BI__builtin_loongarch_ldpte_d:
4192 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 31);
4193 case LoongArch::BI__builtin_loongarch_movfcsr2gr:
4194 case LoongArch::BI__builtin_loongarch_movgr2fcsr:
4195 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: llvm::maxUIntN(N: 2));
4196
4197 // LSX intrinsics.
4198 case LoongArch::BI__builtin_lsx_vbitclri_b:
4199 case LoongArch::BI__builtin_lsx_vbitrevi_b:
4200 case LoongArch::BI__builtin_lsx_vbitseti_b:
4201 case LoongArch::BI__builtin_lsx_vsat_b:
4202 case LoongArch::BI__builtin_lsx_vsat_bu:
4203 case LoongArch::BI__builtin_lsx_vslli_b:
4204 case LoongArch::BI__builtin_lsx_vsrai_b:
4205 case LoongArch::BI__builtin_lsx_vsrari_b:
4206 case LoongArch::BI__builtin_lsx_vsrli_b:
4207 case LoongArch::BI__builtin_lsx_vsllwil_h_b:
4208 case LoongArch::BI__builtin_lsx_vsllwil_hu_bu:
4209 case LoongArch::BI__builtin_lsx_vrotri_b:
4210 case LoongArch::BI__builtin_lsx_vsrlri_b:
4211 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 7);
4212 case LoongArch::BI__builtin_lsx_vbitclri_h:
4213 case LoongArch::BI__builtin_lsx_vbitrevi_h:
4214 case LoongArch::BI__builtin_lsx_vbitseti_h:
4215 case LoongArch::BI__builtin_lsx_vsat_h:
4216 case LoongArch::BI__builtin_lsx_vsat_hu:
4217 case LoongArch::BI__builtin_lsx_vslli_h:
4218 case LoongArch::BI__builtin_lsx_vsrai_h:
4219 case LoongArch::BI__builtin_lsx_vsrari_h:
4220 case LoongArch::BI__builtin_lsx_vsrli_h:
4221 case LoongArch::BI__builtin_lsx_vsllwil_w_h:
4222 case LoongArch::BI__builtin_lsx_vsllwil_wu_hu:
4223 case LoongArch::BI__builtin_lsx_vrotri_h:
4224 case LoongArch::BI__builtin_lsx_vsrlri_h:
4225 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 15);
4226 case LoongArch::BI__builtin_lsx_vssrarni_b_h:
4227 case LoongArch::BI__builtin_lsx_vssrarni_bu_h:
4228 case LoongArch::BI__builtin_lsx_vssrani_b_h:
4229 case LoongArch::BI__builtin_lsx_vssrani_bu_h:
4230 case LoongArch::BI__builtin_lsx_vsrarni_b_h:
4231 case LoongArch::BI__builtin_lsx_vsrlni_b_h:
4232 case LoongArch::BI__builtin_lsx_vsrlrni_b_h:
4233 case LoongArch::BI__builtin_lsx_vssrlni_b_h:
4234 case LoongArch::BI__builtin_lsx_vssrlni_bu_h:
4235 case LoongArch::BI__builtin_lsx_vssrlrni_b_h:
4236 case LoongArch::BI__builtin_lsx_vssrlrni_bu_h:
4237 case LoongArch::BI__builtin_lsx_vsrani_b_h:
4238 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 15);
4239 case LoongArch::BI__builtin_lsx_vslei_bu:
4240 case LoongArch::BI__builtin_lsx_vslei_hu:
4241 case LoongArch::BI__builtin_lsx_vslei_wu:
4242 case LoongArch::BI__builtin_lsx_vslei_du:
4243 case LoongArch::BI__builtin_lsx_vslti_bu:
4244 case LoongArch::BI__builtin_lsx_vslti_hu:
4245 case LoongArch::BI__builtin_lsx_vslti_wu:
4246 case LoongArch::BI__builtin_lsx_vslti_du:
4247 case LoongArch::BI__builtin_lsx_vmaxi_bu:
4248 case LoongArch::BI__builtin_lsx_vmaxi_hu:
4249 case LoongArch::BI__builtin_lsx_vmaxi_wu:
4250 case LoongArch::BI__builtin_lsx_vmaxi_du:
4251 case LoongArch::BI__builtin_lsx_vmini_bu:
4252 case LoongArch::BI__builtin_lsx_vmini_hu:
4253 case LoongArch::BI__builtin_lsx_vmini_wu:
4254 case LoongArch::BI__builtin_lsx_vmini_du:
4255 case LoongArch::BI__builtin_lsx_vaddi_bu:
4256 case LoongArch::BI__builtin_lsx_vaddi_hu:
4257 case LoongArch::BI__builtin_lsx_vaddi_wu:
4258 case LoongArch::BI__builtin_lsx_vaddi_du:
4259 case LoongArch::BI__builtin_lsx_vbitclri_w:
4260 case LoongArch::BI__builtin_lsx_vbitrevi_w:
4261 case LoongArch::BI__builtin_lsx_vbitseti_w:
4262 case LoongArch::BI__builtin_lsx_vsat_w:
4263 case LoongArch::BI__builtin_lsx_vsat_wu:
4264 case LoongArch::BI__builtin_lsx_vslli_w:
4265 case LoongArch::BI__builtin_lsx_vsrai_w:
4266 case LoongArch::BI__builtin_lsx_vsrari_w:
4267 case LoongArch::BI__builtin_lsx_vsrli_w:
4268 case LoongArch::BI__builtin_lsx_vsllwil_d_w:
4269 case LoongArch::BI__builtin_lsx_vsllwil_du_wu:
4270 case LoongArch::BI__builtin_lsx_vsrlri_w:
4271 case LoongArch::BI__builtin_lsx_vrotri_w:
4272 case LoongArch::BI__builtin_lsx_vsubi_bu:
4273 case LoongArch::BI__builtin_lsx_vsubi_hu:
4274 case LoongArch::BI__builtin_lsx_vbsrl_v:
4275 case LoongArch::BI__builtin_lsx_vbsll_v:
4276 case LoongArch::BI__builtin_lsx_vsubi_wu:
4277 case LoongArch::BI__builtin_lsx_vsubi_du:
4278 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 31);
4279 case LoongArch::BI__builtin_lsx_vssrarni_h_w:
4280 case LoongArch::BI__builtin_lsx_vssrarni_hu_w:
4281 case LoongArch::BI__builtin_lsx_vssrani_h_w:
4282 case LoongArch::BI__builtin_lsx_vssrani_hu_w:
4283 case LoongArch::BI__builtin_lsx_vsrarni_h_w:
4284 case LoongArch::BI__builtin_lsx_vsrani_h_w:
4285 case LoongArch::BI__builtin_lsx_vfrstpi_b:
4286 case LoongArch::BI__builtin_lsx_vfrstpi_h:
4287 case LoongArch::BI__builtin_lsx_vsrlni_h_w:
4288 case LoongArch::BI__builtin_lsx_vsrlrni_h_w:
4289 case LoongArch::BI__builtin_lsx_vssrlni_h_w:
4290 case LoongArch::BI__builtin_lsx_vssrlni_hu_w:
4291 case LoongArch::BI__builtin_lsx_vssrlrni_h_w:
4292 case LoongArch::BI__builtin_lsx_vssrlrni_hu_w:
4293 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 31);
4294 case LoongArch::BI__builtin_lsx_vbitclri_d:
4295 case LoongArch::BI__builtin_lsx_vbitrevi_d:
4296 case LoongArch::BI__builtin_lsx_vbitseti_d:
4297 case LoongArch::BI__builtin_lsx_vsat_d:
4298 case LoongArch::BI__builtin_lsx_vsat_du:
4299 case LoongArch::BI__builtin_lsx_vslli_d:
4300 case LoongArch::BI__builtin_lsx_vsrai_d:
4301 case LoongArch::BI__builtin_lsx_vsrli_d:
4302 case LoongArch::BI__builtin_lsx_vsrari_d:
4303 case LoongArch::BI__builtin_lsx_vrotri_d:
4304 case LoongArch::BI__builtin_lsx_vsrlri_d:
4305 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 63);
4306 case LoongArch::BI__builtin_lsx_vssrarni_w_d:
4307 case LoongArch::BI__builtin_lsx_vssrarni_wu_d:
4308 case LoongArch::BI__builtin_lsx_vssrani_w_d:
4309 case LoongArch::BI__builtin_lsx_vssrani_wu_d:
4310 case LoongArch::BI__builtin_lsx_vsrarni_w_d:
4311 case LoongArch::BI__builtin_lsx_vsrlni_w_d:
4312 case LoongArch::BI__builtin_lsx_vsrlrni_w_d:
4313 case LoongArch::BI__builtin_lsx_vssrlni_w_d:
4314 case LoongArch::BI__builtin_lsx_vssrlni_wu_d:
4315 case LoongArch::BI__builtin_lsx_vssrlrni_w_d:
4316 case LoongArch::BI__builtin_lsx_vssrlrni_wu_d:
4317 case LoongArch::BI__builtin_lsx_vsrani_w_d:
4318 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 63);
4319 case LoongArch::BI__builtin_lsx_vssrarni_d_q:
4320 case LoongArch::BI__builtin_lsx_vssrarni_du_q:
4321 case LoongArch::BI__builtin_lsx_vssrani_d_q:
4322 case LoongArch::BI__builtin_lsx_vssrani_du_q:
4323 case LoongArch::BI__builtin_lsx_vsrarni_d_q:
4324 case LoongArch::BI__builtin_lsx_vssrlni_d_q:
4325 case LoongArch::BI__builtin_lsx_vssrlni_du_q:
4326 case LoongArch::BI__builtin_lsx_vssrlrni_d_q:
4327 case LoongArch::BI__builtin_lsx_vssrlrni_du_q:
4328 case LoongArch::BI__builtin_lsx_vsrani_d_q:
4329 case LoongArch::BI__builtin_lsx_vsrlrni_d_q:
4330 case LoongArch::BI__builtin_lsx_vsrlni_d_q:
4331 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 127);
4332 case LoongArch::BI__builtin_lsx_vseqi_b:
4333 case LoongArch::BI__builtin_lsx_vseqi_h:
4334 case LoongArch::BI__builtin_lsx_vseqi_w:
4335 case LoongArch::BI__builtin_lsx_vseqi_d:
4336 case LoongArch::BI__builtin_lsx_vslti_b:
4337 case LoongArch::BI__builtin_lsx_vslti_h:
4338 case LoongArch::BI__builtin_lsx_vslti_w:
4339 case LoongArch::BI__builtin_lsx_vslti_d:
4340 case LoongArch::BI__builtin_lsx_vslei_b:
4341 case LoongArch::BI__builtin_lsx_vslei_h:
4342 case LoongArch::BI__builtin_lsx_vslei_w:
4343 case LoongArch::BI__builtin_lsx_vslei_d:
4344 case LoongArch::BI__builtin_lsx_vmaxi_b:
4345 case LoongArch::BI__builtin_lsx_vmaxi_h:
4346 case LoongArch::BI__builtin_lsx_vmaxi_w:
4347 case LoongArch::BI__builtin_lsx_vmaxi_d:
4348 case LoongArch::BI__builtin_lsx_vmini_b:
4349 case LoongArch::BI__builtin_lsx_vmini_h:
4350 case LoongArch::BI__builtin_lsx_vmini_w:
4351 case LoongArch::BI__builtin_lsx_vmini_d:
4352 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: -16, High: 15);
4353 case LoongArch::BI__builtin_lsx_vandi_b:
4354 case LoongArch::BI__builtin_lsx_vnori_b:
4355 case LoongArch::BI__builtin_lsx_vori_b:
4356 case LoongArch::BI__builtin_lsx_vshuf4i_b:
4357 case LoongArch::BI__builtin_lsx_vshuf4i_h:
4358 case LoongArch::BI__builtin_lsx_vshuf4i_w:
4359 case LoongArch::BI__builtin_lsx_vxori_b:
4360 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 255);
4361 case LoongArch::BI__builtin_lsx_vbitseli_b:
4362 case LoongArch::BI__builtin_lsx_vshuf4i_d:
4363 case LoongArch::BI__builtin_lsx_vextrins_b:
4364 case LoongArch::BI__builtin_lsx_vextrins_h:
4365 case LoongArch::BI__builtin_lsx_vextrins_w:
4366 case LoongArch::BI__builtin_lsx_vextrins_d:
4367 case LoongArch::BI__builtin_lsx_vpermi_w:
4368 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 255);
4369 case LoongArch::BI__builtin_lsx_vpickve2gr_b:
4370 case LoongArch::BI__builtin_lsx_vpickve2gr_bu:
4371 case LoongArch::BI__builtin_lsx_vreplvei_b:
4372 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 15);
4373 case LoongArch::BI__builtin_lsx_vinsgr2vr_b:
4374 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 15);
4375 case LoongArch::BI__builtin_lsx_vpickve2gr_h:
4376 case LoongArch::BI__builtin_lsx_vpickve2gr_hu:
4377 case LoongArch::BI__builtin_lsx_vreplvei_h:
4378 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 7);
4379 case LoongArch::BI__builtin_lsx_vinsgr2vr_h:
4380 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 7);
4381 case LoongArch::BI__builtin_lsx_vpickve2gr_w:
4382 case LoongArch::BI__builtin_lsx_vpickve2gr_wu:
4383 case LoongArch::BI__builtin_lsx_vreplvei_w:
4384 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 3);
4385 case LoongArch::BI__builtin_lsx_vinsgr2vr_w:
4386 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 3);
4387 case LoongArch::BI__builtin_lsx_vpickve2gr_d:
4388 case LoongArch::BI__builtin_lsx_vpickve2gr_du:
4389 case LoongArch::BI__builtin_lsx_vreplvei_d:
4390 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 1);
4391 case LoongArch::BI__builtin_lsx_vinsgr2vr_d:
4392 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 1);
4393 case LoongArch::BI__builtin_lsx_vstelm_b:
4394 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 2, Low: -128, High: 127) ||
4395 SemaBuiltinConstantArgRange(TheCall, ArgNum: 3, Low: 0, High: 15);
4396 case LoongArch::BI__builtin_lsx_vstelm_h:
4397 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 2, Low: -256, High: 254) ||
4398 SemaBuiltinConstantArgRange(TheCall, ArgNum: 3, Low: 0, High: 7);
4399 case LoongArch::BI__builtin_lsx_vstelm_w:
4400 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 2, Low: -512, High: 508) ||
4401 SemaBuiltinConstantArgRange(TheCall, ArgNum: 3, Low: 0, High: 3);
4402 case LoongArch::BI__builtin_lsx_vstelm_d:
4403 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 2, Low: -1024, High: 1016) ||
4404 SemaBuiltinConstantArgRange(TheCall, ArgNum: 3, Low: 0, High: 1);
4405 case LoongArch::BI__builtin_lsx_vldrepl_b:
4406 case LoongArch::BI__builtin_lsx_vld:
4407 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: -2048, High: 2047);
4408 case LoongArch::BI__builtin_lsx_vldrepl_h:
4409 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: -2048, High: 2046);
4410 case LoongArch::BI__builtin_lsx_vldrepl_w:
4411 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: -2048, High: 2044);
4412 case LoongArch::BI__builtin_lsx_vldrepl_d:
4413 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: -2048, High: 2040);
4414 case LoongArch::BI__builtin_lsx_vst:
4415 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 2, Low: -2048, High: 2047);
4416 case LoongArch::BI__builtin_lsx_vldi:
4417 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 0, Low: -4096, High: 4095);
4418 case LoongArch::BI__builtin_lsx_vrepli_b:
4419 case LoongArch::BI__builtin_lsx_vrepli_h:
4420 case LoongArch::BI__builtin_lsx_vrepli_w:
4421 case LoongArch::BI__builtin_lsx_vrepli_d:
4422 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 0, Low: -512, High: 511);
4423
4424 // LASX intrinsics.
4425 case LoongArch::BI__builtin_lasx_xvbitclri_b:
4426 case LoongArch::BI__builtin_lasx_xvbitrevi_b:
4427 case LoongArch::BI__builtin_lasx_xvbitseti_b:
4428 case LoongArch::BI__builtin_lasx_xvsat_b:
4429 case LoongArch::BI__builtin_lasx_xvsat_bu:
4430 case LoongArch::BI__builtin_lasx_xvslli_b:
4431 case LoongArch::BI__builtin_lasx_xvsrai_b:
4432 case LoongArch::BI__builtin_lasx_xvsrari_b:
4433 case LoongArch::BI__builtin_lasx_xvsrli_b:
4434 case LoongArch::BI__builtin_lasx_xvsllwil_h_b:
4435 case LoongArch::BI__builtin_lasx_xvsllwil_hu_bu:
4436 case LoongArch::BI__builtin_lasx_xvrotri_b:
4437 case LoongArch::BI__builtin_lasx_xvsrlri_b:
4438 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 7);
4439 case LoongArch::BI__builtin_lasx_xvbitclri_h:
4440 case LoongArch::BI__builtin_lasx_xvbitrevi_h:
4441 case LoongArch::BI__builtin_lasx_xvbitseti_h:
4442 case LoongArch::BI__builtin_lasx_xvsat_h:
4443 case LoongArch::BI__builtin_lasx_xvsat_hu:
4444 case LoongArch::BI__builtin_lasx_xvslli_h:
4445 case LoongArch::BI__builtin_lasx_xvsrai_h:
4446 case LoongArch::BI__builtin_lasx_xvsrari_h:
4447 case LoongArch::BI__builtin_lasx_xvsrli_h:
4448 case LoongArch::BI__builtin_lasx_xvsllwil_w_h:
4449 case LoongArch::BI__builtin_lasx_xvsllwil_wu_hu:
4450 case LoongArch::BI__builtin_lasx_xvrotri_h:
4451 case LoongArch::BI__builtin_lasx_xvsrlri_h:
4452 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 15);
4453 case LoongArch::BI__builtin_lasx_xvssrarni_b_h:
4454 case LoongArch::BI__builtin_lasx_xvssrarni_bu_h:
4455 case LoongArch::BI__builtin_lasx_xvssrani_b_h:
4456 case LoongArch::BI__builtin_lasx_xvssrani_bu_h:
4457 case LoongArch::BI__builtin_lasx_xvsrarni_b_h:
4458 case LoongArch::BI__builtin_lasx_xvsrlni_b_h:
4459 case LoongArch::BI__builtin_lasx_xvsrlrni_b_h:
4460 case LoongArch::BI__builtin_lasx_xvssrlni_b_h:
4461 case LoongArch::BI__builtin_lasx_xvssrlni_bu_h:
4462 case LoongArch::BI__builtin_lasx_xvssrlrni_b_h:
4463 case LoongArch::BI__builtin_lasx_xvssrlrni_bu_h:
4464 case LoongArch::BI__builtin_lasx_xvsrani_b_h:
4465 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 15);
4466 case LoongArch::BI__builtin_lasx_xvslei_bu:
4467 case LoongArch::BI__builtin_lasx_xvslei_hu:
4468 case LoongArch::BI__builtin_lasx_xvslei_wu:
4469 case LoongArch::BI__builtin_lasx_xvslei_du:
4470 case LoongArch::BI__builtin_lasx_xvslti_bu:
4471 case LoongArch::BI__builtin_lasx_xvslti_hu:
4472 case LoongArch::BI__builtin_lasx_xvslti_wu:
4473 case LoongArch::BI__builtin_lasx_xvslti_du:
4474 case LoongArch::BI__builtin_lasx_xvmaxi_bu:
4475 case LoongArch::BI__builtin_lasx_xvmaxi_hu:
4476 case LoongArch::BI__builtin_lasx_xvmaxi_wu:
4477 case LoongArch::BI__builtin_lasx_xvmaxi_du:
4478 case LoongArch::BI__builtin_lasx_xvmini_bu:
4479 case LoongArch::BI__builtin_lasx_xvmini_hu:
4480 case LoongArch::BI__builtin_lasx_xvmini_wu:
4481 case LoongArch::BI__builtin_lasx_xvmini_du:
4482 case LoongArch::BI__builtin_lasx_xvaddi_bu:
4483 case LoongArch::BI__builtin_lasx_xvaddi_hu:
4484 case LoongArch::BI__builtin_lasx_xvaddi_wu:
4485 case LoongArch::BI__builtin_lasx_xvaddi_du:
4486 case LoongArch::BI__builtin_lasx_xvbitclri_w:
4487 case LoongArch::BI__builtin_lasx_xvbitrevi_w:
4488 case LoongArch::BI__builtin_lasx_xvbitseti_w:
4489 case LoongArch::BI__builtin_lasx_xvsat_w:
4490 case LoongArch::BI__builtin_lasx_xvsat_wu:
4491 case LoongArch::BI__builtin_lasx_xvslli_w:
4492 case LoongArch::BI__builtin_lasx_xvsrai_w:
4493 case LoongArch::BI__builtin_lasx_xvsrari_w:
4494 case LoongArch::BI__builtin_lasx_xvsrli_w:
4495 case LoongArch::BI__builtin_lasx_xvsllwil_d_w:
4496 case LoongArch::BI__builtin_lasx_xvsllwil_du_wu:
4497 case LoongArch::BI__builtin_lasx_xvsrlri_w:
4498 case LoongArch::BI__builtin_lasx_xvrotri_w:
4499 case LoongArch::BI__builtin_lasx_xvsubi_bu:
4500 case LoongArch::BI__builtin_lasx_xvsubi_hu:
4501 case LoongArch::BI__builtin_lasx_xvsubi_wu:
4502 case LoongArch::BI__builtin_lasx_xvsubi_du:
4503 case LoongArch::BI__builtin_lasx_xvbsrl_v:
4504 case LoongArch::BI__builtin_lasx_xvbsll_v:
4505 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 31);
4506 case LoongArch::BI__builtin_lasx_xvssrarni_h_w:
4507 case LoongArch::BI__builtin_lasx_xvssrarni_hu_w:
4508 case LoongArch::BI__builtin_lasx_xvssrani_h_w:
4509 case LoongArch::BI__builtin_lasx_xvssrani_hu_w:
4510 case LoongArch::BI__builtin_lasx_xvsrarni_h_w:
4511 case LoongArch::BI__builtin_lasx_xvsrani_h_w:
4512 case LoongArch::BI__builtin_lasx_xvfrstpi_b:
4513 case LoongArch::BI__builtin_lasx_xvfrstpi_h:
4514 case LoongArch::BI__builtin_lasx_xvsrlni_h_w:
4515 case LoongArch::BI__builtin_lasx_xvsrlrni_h_w:
4516 case LoongArch::BI__builtin_lasx_xvssrlni_h_w:
4517 case LoongArch::BI__builtin_lasx_xvssrlni_hu_w:
4518 case LoongArch::BI__builtin_lasx_xvssrlrni_h_w:
4519 case LoongArch::BI__builtin_lasx_xvssrlrni_hu_w:
4520 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 31);
4521 case LoongArch::BI__builtin_lasx_xvbitclri_d:
4522 case LoongArch::BI__builtin_lasx_xvbitrevi_d:
4523 case LoongArch::BI__builtin_lasx_xvbitseti_d:
4524 case LoongArch::BI__builtin_lasx_xvsat_d:
4525 case LoongArch::BI__builtin_lasx_xvsat_du:
4526 case LoongArch::BI__builtin_lasx_xvslli_d:
4527 case LoongArch::BI__builtin_lasx_xvsrai_d:
4528 case LoongArch::BI__builtin_lasx_xvsrli_d:
4529 case LoongArch::BI__builtin_lasx_xvsrari_d:
4530 case LoongArch::BI__builtin_lasx_xvrotri_d:
4531 case LoongArch::BI__builtin_lasx_xvsrlri_d:
4532 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 63);
4533 case LoongArch::BI__builtin_lasx_xvssrarni_w_d:
4534 case LoongArch::BI__builtin_lasx_xvssrarni_wu_d:
4535 case LoongArch::BI__builtin_lasx_xvssrani_w_d:
4536 case LoongArch::BI__builtin_lasx_xvssrani_wu_d:
4537 case LoongArch::BI__builtin_lasx_xvsrarni_w_d:
4538 case LoongArch::BI__builtin_lasx_xvsrlni_w_d:
4539 case LoongArch::BI__builtin_lasx_xvsrlrni_w_d:
4540 case LoongArch::BI__builtin_lasx_xvssrlni_w_d:
4541 case LoongArch::BI__builtin_lasx_xvssrlni_wu_d:
4542 case LoongArch::BI__builtin_lasx_xvssrlrni_w_d:
4543 case LoongArch::BI__builtin_lasx_xvssrlrni_wu_d:
4544 case LoongArch::BI__builtin_lasx_xvsrani_w_d:
4545 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 63);
4546 case LoongArch::BI__builtin_lasx_xvssrarni_d_q:
4547 case LoongArch::BI__builtin_lasx_xvssrarni_du_q:
4548 case LoongArch::BI__builtin_lasx_xvssrani_d_q:
4549 case LoongArch::BI__builtin_lasx_xvssrani_du_q:
4550 case LoongArch::BI__builtin_lasx_xvsrarni_d_q:
4551 case LoongArch::BI__builtin_lasx_xvssrlni_d_q:
4552 case LoongArch::BI__builtin_lasx_xvssrlni_du_q:
4553 case LoongArch::BI__builtin_lasx_xvssrlrni_d_q:
4554 case LoongArch::BI__builtin_lasx_xvssrlrni_du_q:
4555 case LoongArch::BI__builtin_lasx_xvsrani_d_q:
4556 case LoongArch::BI__builtin_lasx_xvsrlni_d_q:
4557 case LoongArch::BI__builtin_lasx_xvsrlrni_d_q:
4558 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 127);
4559 case LoongArch::BI__builtin_lasx_xvseqi_b:
4560 case LoongArch::BI__builtin_lasx_xvseqi_h:
4561 case LoongArch::BI__builtin_lasx_xvseqi_w:
4562 case LoongArch::BI__builtin_lasx_xvseqi_d:
4563 case LoongArch::BI__builtin_lasx_xvslti_b:
4564 case LoongArch::BI__builtin_lasx_xvslti_h:
4565 case LoongArch::BI__builtin_lasx_xvslti_w:
4566 case LoongArch::BI__builtin_lasx_xvslti_d:
4567 case LoongArch::BI__builtin_lasx_xvslei_b:
4568 case LoongArch::BI__builtin_lasx_xvslei_h:
4569 case LoongArch::BI__builtin_lasx_xvslei_w:
4570 case LoongArch::BI__builtin_lasx_xvslei_d:
4571 case LoongArch::BI__builtin_lasx_xvmaxi_b:
4572 case LoongArch::BI__builtin_lasx_xvmaxi_h:
4573 case LoongArch::BI__builtin_lasx_xvmaxi_w:
4574 case LoongArch::BI__builtin_lasx_xvmaxi_d:
4575 case LoongArch::BI__builtin_lasx_xvmini_b:
4576 case LoongArch::BI__builtin_lasx_xvmini_h:
4577 case LoongArch::BI__builtin_lasx_xvmini_w:
4578 case LoongArch::BI__builtin_lasx_xvmini_d:
4579 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: -16, High: 15);
4580 case LoongArch::BI__builtin_lasx_xvandi_b:
4581 case LoongArch::BI__builtin_lasx_xvnori_b:
4582 case LoongArch::BI__builtin_lasx_xvori_b:
4583 case LoongArch::BI__builtin_lasx_xvshuf4i_b:
4584 case LoongArch::BI__builtin_lasx_xvshuf4i_h:
4585 case LoongArch::BI__builtin_lasx_xvshuf4i_w:
4586 case LoongArch::BI__builtin_lasx_xvxori_b:
4587 case LoongArch::BI__builtin_lasx_xvpermi_d:
4588 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 255);
4589 case LoongArch::BI__builtin_lasx_xvbitseli_b:
4590 case LoongArch::BI__builtin_lasx_xvshuf4i_d:
4591 case LoongArch::BI__builtin_lasx_xvextrins_b:
4592 case LoongArch::BI__builtin_lasx_xvextrins_h:
4593 case LoongArch::BI__builtin_lasx_xvextrins_w:
4594 case LoongArch::BI__builtin_lasx_xvextrins_d:
4595 case LoongArch::BI__builtin_lasx_xvpermi_q:
4596 case LoongArch::BI__builtin_lasx_xvpermi_w:
4597 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 255);
4598 case LoongArch::BI__builtin_lasx_xvrepl128vei_b:
4599 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 15);
4600 case LoongArch::BI__builtin_lasx_xvrepl128vei_h:
4601 case LoongArch::BI__builtin_lasx_xvpickve2gr_w:
4602 case LoongArch::BI__builtin_lasx_xvpickve2gr_wu:
4603 case LoongArch::BI__builtin_lasx_xvpickve_w_f:
4604 case LoongArch::BI__builtin_lasx_xvpickve_w:
4605 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 7);
4606 case LoongArch::BI__builtin_lasx_xvinsgr2vr_w:
4607 case LoongArch::BI__builtin_lasx_xvinsve0_w:
4608 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 7);
4609 case LoongArch::BI__builtin_lasx_xvrepl128vei_w:
4610 case LoongArch::BI__builtin_lasx_xvpickve2gr_d:
4611 case LoongArch::BI__builtin_lasx_xvpickve2gr_du:
4612 case LoongArch::BI__builtin_lasx_xvpickve_d_f:
4613 case LoongArch::BI__builtin_lasx_xvpickve_d:
4614 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 3);
4615 case LoongArch::BI__builtin_lasx_xvinsve0_d:
4616 case LoongArch::BI__builtin_lasx_xvinsgr2vr_d:
4617 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 3);
4618 case LoongArch::BI__builtin_lasx_xvstelm_b:
4619 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 2, Low: -128, High: 127) ||
4620 SemaBuiltinConstantArgRange(TheCall, ArgNum: 3, Low: 0, High: 31);
4621 case LoongArch::BI__builtin_lasx_xvstelm_h:
4622 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 2, Low: -256, High: 254) ||
4623 SemaBuiltinConstantArgRange(TheCall, ArgNum: 3, Low: 0, High: 15);
4624 case LoongArch::BI__builtin_lasx_xvstelm_w:
4625 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 2, Low: -512, High: 508) ||
4626 SemaBuiltinConstantArgRange(TheCall, ArgNum: 3, Low: 0, High: 7);
4627 case LoongArch::BI__builtin_lasx_xvstelm_d:
4628 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 2, Low: -1024, High: 1016) ||
4629 SemaBuiltinConstantArgRange(TheCall, ArgNum: 3, Low: 0, High: 3);
4630 case LoongArch::BI__builtin_lasx_xvrepl128vei_d:
4631 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 1);
4632 case LoongArch::BI__builtin_lasx_xvldrepl_b:
4633 case LoongArch::BI__builtin_lasx_xvld:
4634 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: -2048, High: 2047);
4635 case LoongArch::BI__builtin_lasx_xvldrepl_h:
4636 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: -2048, High: 2046);
4637 case LoongArch::BI__builtin_lasx_xvldrepl_w:
4638 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: -2048, High: 2044);
4639 case LoongArch::BI__builtin_lasx_xvldrepl_d:
4640 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: -2048, High: 2040);
4641 case LoongArch::BI__builtin_lasx_xvst:
4642 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 2, Low: -2048, High: 2047);
4643 case LoongArch::BI__builtin_lasx_xvldi:
4644 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 0, Low: -4096, High: 4095);
4645 case LoongArch::BI__builtin_lasx_xvrepli_b:
4646 case LoongArch::BI__builtin_lasx_xvrepli_h:
4647 case LoongArch::BI__builtin_lasx_xvrepli_w:
4648 case LoongArch::BI__builtin_lasx_xvrepli_d:
4649 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 0, Low: -512, High: 511);
4650 }
4651 return false;
4652}
4653
4654bool Sema::CheckMipsBuiltinFunctionCall(const TargetInfo &TI,
4655 unsigned BuiltinID, CallExpr *TheCall) {
4656 return CheckMipsBuiltinCpu(TI, BuiltinID, TheCall) ||
4657 CheckMipsBuiltinArgument(BuiltinID, TheCall);
4658}
4659
4660bool Sema::CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID,
4661 CallExpr *TheCall) {
4662
4663 if (Mips::BI__builtin_mips_addu_qb <= BuiltinID &&
4664 BuiltinID <= Mips::BI__builtin_mips_lwx) {
4665 if (!TI.hasFeature("dsp"))
4666 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_dsp);
4667 }
4668
4669 if (Mips::BI__builtin_mips_absq_s_qb <= BuiltinID &&
4670 BuiltinID <= Mips::BI__builtin_mips_subuh_r_qb) {
4671 if (!TI.hasFeature("dspr2"))
4672 return Diag(TheCall->getBeginLoc(),
4673 diag::err_mips_builtin_requires_dspr2);
4674 }
4675
4676 if (Mips::BI__builtin_msa_add_a_b <= BuiltinID &&
4677 BuiltinID <= Mips::BI__builtin_msa_xori_b) {
4678 if (!TI.hasFeature("msa"))
4679 return Diag(TheCall->getBeginLoc(), diag::err_mips_builtin_requires_msa);
4680 }
4681
4682 return false;
4683}
4684
4685// CheckMipsBuiltinArgument - Checks the constant value passed to the
4686// intrinsic is correct. The switch statement is ordered by DSP, MSA. The
4687// ordering for DSP is unspecified. MSA is ordered by the data format used
4688// by the underlying instruction i.e., df/m, df/n and then by size.
4689//
4690// FIXME: The size tests here should instead be tablegen'd along with the
4691// definitions from include/clang/Basic/BuiltinsMips.def.
4692// FIXME: GCC is strict on signedness for some of these intrinsics, we should
4693// be too.
4694bool Sema::CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) {
4695 unsigned i = 0, l = 0, u = 0, m = 0;
4696 switch (BuiltinID) {
4697 default: return false;
4698 case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break;
4699 case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break;
4700 case Mips::BI__builtin_mips_append: i = 2; l = 0; u = 31; break;
4701 case Mips::BI__builtin_mips_balign: i = 2; l = 0; u = 3; break;
4702 case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break;
4703 case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break;
4704 case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break;
4705 // MSA intrinsics. Instructions (which the intrinsics maps to) which use the
4706 // df/m field.
4707 // These intrinsics take an unsigned 3 bit immediate.
4708 case Mips::BI__builtin_msa_bclri_b:
4709 case Mips::BI__builtin_msa_bnegi_b:
4710 case Mips::BI__builtin_msa_bseti_b:
4711 case Mips::BI__builtin_msa_sat_s_b:
4712 case Mips::BI__builtin_msa_sat_u_b:
4713 case Mips::BI__builtin_msa_slli_b:
4714 case Mips::BI__builtin_msa_srai_b:
4715 case Mips::BI__builtin_msa_srari_b:
4716 case Mips::BI__builtin_msa_srli_b:
4717 case Mips::BI__builtin_msa_srlri_b: i = 1; l = 0; u = 7; break;
4718 case Mips::BI__builtin_msa_binsli_b:
4719 case Mips::BI__builtin_msa_binsri_b: i = 2; l = 0; u = 7; break;
4720 // These intrinsics take an unsigned 4 bit immediate.
4721 case Mips::BI__builtin_msa_bclri_h:
4722 case Mips::BI__builtin_msa_bnegi_h:
4723 case Mips::BI__builtin_msa_bseti_h:
4724 case Mips::BI__builtin_msa_sat_s_h:
4725 case Mips::BI__builtin_msa_sat_u_h:
4726 case Mips::BI__builtin_msa_slli_h:
4727 case Mips::BI__builtin_msa_srai_h:
4728 case Mips::BI__builtin_msa_srari_h:
4729 case Mips::BI__builtin_msa_srli_h:
4730 case Mips::BI__builtin_msa_srlri_h: i = 1; l = 0; u = 15; break;
4731 case Mips::BI__builtin_msa_binsli_h:
4732 case Mips::BI__builtin_msa_binsri_h: i = 2; l = 0; u = 15; break;
4733 // These intrinsics take an unsigned 5 bit immediate.
4734 // The first block of intrinsics actually have an unsigned 5 bit field,
4735 // not a df/n field.
4736 case Mips::BI__builtin_msa_cfcmsa:
4737 case Mips::BI__builtin_msa_ctcmsa: i = 0; l = 0; u = 31; break;
4738 case Mips::BI__builtin_msa_clei_u_b:
4739 case Mips::BI__builtin_msa_clei_u_h:
4740 case Mips::BI__builtin_msa_clei_u_w:
4741 case Mips::BI__builtin_msa_clei_u_d:
4742 case Mips::BI__builtin_msa_clti_u_b:
4743 case Mips::BI__builtin_msa_clti_u_h:
4744 case Mips::BI__builtin_msa_clti_u_w:
4745 case Mips::BI__builtin_msa_clti_u_d:
4746 case Mips::BI__builtin_msa_maxi_u_b:
4747 case Mips::BI__builtin_msa_maxi_u_h:
4748 case Mips::BI__builtin_msa_maxi_u_w:
4749 case Mips::BI__builtin_msa_maxi_u_d:
4750 case Mips::BI__builtin_msa_mini_u_b:
4751 case Mips::BI__builtin_msa_mini_u_h:
4752 case Mips::BI__builtin_msa_mini_u_w:
4753 case Mips::BI__builtin_msa_mini_u_d:
4754 case Mips::BI__builtin_msa_addvi_b:
4755 case Mips::BI__builtin_msa_addvi_h:
4756 case Mips::BI__builtin_msa_addvi_w:
4757 case Mips::BI__builtin_msa_addvi_d:
4758 case Mips::BI__builtin_msa_bclri_w:
4759 case Mips::BI__builtin_msa_bnegi_w:
4760 case Mips::BI__builtin_msa_bseti_w:
4761 case Mips::BI__builtin_msa_sat_s_w:
4762 case Mips::BI__builtin_msa_sat_u_w:
4763 case Mips::BI__builtin_msa_slli_w:
4764 case Mips::BI__builtin_msa_srai_w:
4765 case Mips::BI__builtin_msa_srari_w:
4766 case Mips::BI__builtin_msa_srli_w:
4767 case Mips::BI__builtin_msa_srlri_w:
4768 case Mips::BI__builtin_msa_subvi_b:
4769 case Mips::BI__builtin_msa_subvi_h:
4770 case Mips::BI__builtin_msa_subvi_w:
4771 case Mips::BI__builtin_msa_subvi_d: i = 1; l = 0; u = 31; break;
4772 case Mips::BI__builtin_msa_binsli_w:
4773 case Mips::BI__builtin_msa_binsri_w: i = 2; l = 0; u = 31; break;
4774 // These intrinsics take an unsigned 6 bit immediate.
4775 case Mips::BI__builtin_msa_bclri_d:
4776 case Mips::BI__builtin_msa_bnegi_d:
4777 case Mips::BI__builtin_msa_bseti_d:
4778 case Mips::BI__builtin_msa_sat_s_d:
4779 case Mips::BI__builtin_msa_sat_u_d:
4780 case Mips::BI__builtin_msa_slli_d:
4781 case Mips::BI__builtin_msa_srai_d:
4782 case Mips::BI__builtin_msa_srari_d:
4783 case Mips::BI__builtin_msa_srli_d:
4784 case Mips::BI__builtin_msa_srlri_d: i = 1; l = 0; u = 63; break;
4785 case Mips::BI__builtin_msa_binsli_d:
4786 case Mips::BI__builtin_msa_binsri_d: i = 2; l = 0; u = 63; break;
4787 // These intrinsics take a signed 5 bit immediate.
4788 case Mips::BI__builtin_msa_ceqi_b:
4789 case Mips::BI__builtin_msa_ceqi_h:
4790 case Mips::BI__builtin_msa_ceqi_w:
4791 case Mips::BI__builtin_msa_ceqi_d:
4792 case Mips::BI__builtin_msa_clti_s_b:
4793 case Mips::BI__builtin_msa_clti_s_h:
4794 case Mips::BI__builtin_msa_clti_s_w:
4795 case Mips::BI__builtin_msa_clti_s_d:
4796 case Mips::BI__builtin_msa_clei_s_b:
4797 case Mips::BI__builtin_msa_clei_s_h:
4798 case Mips::BI__builtin_msa_clei_s_w:
4799 case Mips::BI__builtin_msa_clei_s_d:
4800 case Mips::BI__builtin_msa_maxi_s_b:
4801 case Mips::BI__builtin_msa_maxi_s_h:
4802 case Mips::BI__builtin_msa_maxi_s_w:
4803 case Mips::BI__builtin_msa_maxi_s_d:
4804 case Mips::BI__builtin_msa_mini_s_b:
4805 case Mips::BI__builtin_msa_mini_s_h:
4806 case Mips::BI__builtin_msa_mini_s_w:
4807 case Mips::BI__builtin_msa_mini_s_d: i = 1; l = -16; u = 15; break;
4808 // These intrinsics take an unsigned 8 bit immediate.
4809 case Mips::BI__builtin_msa_andi_b:
4810 case Mips::BI__builtin_msa_nori_b:
4811 case Mips::BI__builtin_msa_ori_b:
4812 case Mips::BI__builtin_msa_shf_b:
4813 case Mips::BI__builtin_msa_shf_h:
4814 case Mips::BI__builtin_msa_shf_w:
4815 case Mips::BI__builtin_msa_xori_b: i = 1; l = 0; u = 255; break;
4816 case Mips::BI__builtin_msa_bseli_b:
4817 case Mips::BI__builtin_msa_bmnzi_b:
4818 case Mips::BI__builtin_msa_bmzi_b: i = 2; l = 0; u = 255; break;
4819 // df/n format
4820 // These intrinsics take an unsigned 4 bit immediate.
4821 case Mips::BI__builtin_msa_copy_s_b:
4822 case Mips::BI__builtin_msa_copy_u_b:
4823 case Mips::BI__builtin_msa_insve_b:
4824 case Mips::BI__builtin_msa_splati_b: i = 1; l = 0; u = 15; break;
4825 case Mips::BI__builtin_msa_sldi_b: i = 2; l = 0; u = 15; break;
4826 // These intrinsics take an unsigned 3 bit immediate.
4827 case Mips::BI__builtin_msa_copy_s_h:
4828 case Mips::BI__builtin_msa_copy_u_h:
4829 case Mips::BI__builtin_msa_insve_h:
4830 case Mips::BI__builtin_msa_splati_h: i = 1; l = 0; u = 7; break;
4831 case Mips::BI__builtin_msa_sldi_h: i = 2; l = 0; u = 7; break;
4832 // These intrinsics take an unsigned 2 bit immediate.
4833 case Mips::BI__builtin_msa_copy_s_w:
4834 case Mips::BI__builtin_msa_copy_u_w:
4835 case Mips::BI__builtin_msa_insve_w:
4836 case Mips::BI__builtin_msa_splati_w: i = 1; l = 0; u = 3; break;
4837 case Mips::BI__builtin_msa_sldi_w: i = 2; l = 0; u = 3; break;
4838 // These intrinsics take an unsigned 1 bit immediate.
4839 case Mips::BI__builtin_msa_copy_s_d:
4840 case Mips::BI__builtin_msa_copy_u_d:
4841 case Mips::BI__builtin_msa_insve_d:
4842 case Mips::BI__builtin_msa_splati_d: i = 1; l = 0; u = 1; break;
4843 case Mips::BI__builtin_msa_sldi_d: i = 2; l = 0; u = 1; break;
4844 // Memory offsets and immediate loads.
4845 // These intrinsics take a signed 10 bit immediate.
4846 case Mips::BI__builtin_msa_ldi_b: i = 0; l = -128; u = 255; break;
4847 case Mips::BI__builtin_msa_ldi_h:
4848 case Mips::BI__builtin_msa_ldi_w:
4849 case Mips::BI__builtin_msa_ldi_d: i = 0; l = -512; u = 511; break;
4850 case Mips::BI__builtin_msa_ld_b: i = 1; l = -512; u = 511; m = 1; break;
4851 case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 2; break;
4852 case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 4; break;
4853 case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 8; break;
4854 case Mips::BI__builtin_msa_ldr_d: i = 1; l = -4096; u = 4088; m = 8; break;
4855 case Mips::BI__builtin_msa_ldr_w: i = 1; l = -2048; u = 2044; m = 4; break;
4856 case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 1; break;
4857 case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 2; break;
4858 case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 4; break;
4859 case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 8; break;
4860 case Mips::BI__builtin_msa_str_d: i = 2; l = -4096; u = 4088; m = 8; break;
4861 case Mips::BI__builtin_msa_str_w: i = 2; l = -2048; u = 2044; m = 4; break;
4862 }
4863
4864 if (!m)
4865 return SemaBuiltinConstantArgRange(TheCall, ArgNum: i, Low: l, High: u);
4866
4867 return SemaBuiltinConstantArgRange(TheCall, ArgNum: i, Low: l, High: u) ||
4868 SemaBuiltinConstantArgMultiple(TheCall, ArgNum: i, Multiple: m);
4869}
4870
4871/// DecodePPCMMATypeFromStr - This decodes one PPC MMA type descriptor from Str,
4872/// advancing the pointer over the consumed characters. The decoded type is
4873/// returned. If the decoded type represents a constant integer with a
4874/// constraint on its value then Mask is set to that value. The type descriptors
4875/// used in Str are specific to PPC MMA builtins and are documented in the file
4876/// defining the PPC builtins.
4877static QualType DecodePPCMMATypeFromStr(ASTContext &Context, const char *&Str,
4878 unsigned &Mask) {
4879 bool RequireICE = false;
4880 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None;
4881 switch (*Str++) {
4882 case 'V':
4883 return Context.getVectorType(VectorType: Context.UnsignedCharTy, NumElts: 16,
4884 VecKind: VectorKind::AltiVecVector);
4885 case 'i': {
4886 char *End;
4887 unsigned size = strtoul(nptr: Str, endptr: &End, base: 10);
4888 assert(End != Str && "Missing constant parameter constraint");
4889 Str = End;
4890 Mask = size;
4891 return Context.IntTy;
4892 }
4893 case 'W': {
4894 char *End;
4895 unsigned size = strtoul(nptr: Str, endptr: &End, base: 10);
4896 assert(End != Str && "Missing PowerPC MMA type size");
4897 Str = End;
4898 QualType Type;
4899 switch (size) {
4900 #define PPC_VECTOR_TYPE(typeName, Id, size) \
4901 case size: Type = Context.Id##Ty; break;
4902 #include "clang/Basic/PPCTypes.def"
4903 default: llvm_unreachable("Invalid PowerPC MMA vector type");
4904 }
4905 bool CheckVectorArgs = false;
4906 while (!CheckVectorArgs) {
4907 switch (*Str++) {
4908 case '*':
4909 Type = Context.getPointerType(T: Type);
4910 break;
4911 case 'C':
4912 Type = Type.withConst();
4913 break;
4914 default:
4915 CheckVectorArgs = true;
4916 --Str;
4917 break;
4918 }
4919 }
4920 return Type;
4921 }
4922 default:
4923 return Context.DecodeTypeStr(Str&: --Str, Context, Error, RequireICE, AllowTypeModifiers: true);
4924 }
4925}
4926
4927static bool isPPC_64Builtin(unsigned BuiltinID) {
4928 // These builtins only work on PPC 64bit targets.
4929 switch (BuiltinID) {
4930 case PPC::BI__builtin_divde:
4931 case PPC::BI__builtin_divdeu:
4932 case PPC::BI__builtin_bpermd:
4933 case PPC::BI__builtin_pdepd:
4934 case PPC::BI__builtin_pextd:
4935 case PPC::BI__builtin_ppc_ldarx:
4936 case PPC::BI__builtin_ppc_stdcx:
4937 case PPC::BI__builtin_ppc_tdw:
4938 case PPC::BI__builtin_ppc_trapd:
4939 case PPC::BI__builtin_ppc_cmpeqb:
4940 case PPC::BI__builtin_ppc_setb:
4941 case PPC::BI__builtin_ppc_mulhd:
4942 case PPC::BI__builtin_ppc_mulhdu:
4943 case PPC::BI__builtin_ppc_maddhd:
4944 case PPC::BI__builtin_ppc_maddhdu:
4945 case PPC::BI__builtin_ppc_maddld:
4946 case PPC::BI__builtin_ppc_load8r:
4947 case PPC::BI__builtin_ppc_store8r:
4948 case PPC::BI__builtin_ppc_insert_exp:
4949 case PPC::BI__builtin_ppc_extract_sig:
4950 case PPC::BI__builtin_ppc_addex:
4951 case PPC::BI__builtin_darn:
4952 case PPC::BI__builtin_darn_raw:
4953 case PPC::BI__builtin_ppc_compare_and_swaplp:
4954 case PPC::BI__builtin_ppc_fetch_and_addlp:
4955 case PPC::BI__builtin_ppc_fetch_and_andlp:
4956 case PPC::BI__builtin_ppc_fetch_and_orlp:
4957 case PPC::BI__builtin_ppc_fetch_and_swaplp:
4958 return true;
4959 }
4960 return false;
4961}
4962
4963/// Returns true if the argument consists of one contiguous run of 1s with any
4964/// number of 0s on either side. The 1s are allowed to wrap from LSB to MSB, so
4965/// 0x000FFF0, 0x0000FFFF, 0xFF0000FF, 0x0 are all runs. 0x0F0F0000 is not,
4966/// since all 1s are not contiguous.
4967bool Sema::SemaValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum) {
4968 llvm::APSInt Result;
4969 // We can't check the value of a dependent argument.
4970 Expr *Arg = TheCall->getArg(Arg: ArgNum);
4971 if (Arg->isTypeDependent() || Arg->isValueDependent())
4972 return false;
4973
4974 // Check constant-ness first.
4975 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
4976 return true;
4977
4978 // Check contiguous run of 1s, 0xFF0000FF is also a run of 1s.
4979 if (Result.isShiftedMask() || (~Result).isShiftedMask())
4980 return false;
4981
4982 return Diag(TheCall->getBeginLoc(),
4983 diag::err_argument_not_contiguous_bit_field)
4984 << ArgNum << Arg->getSourceRange();
4985}
4986
4987bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
4988 CallExpr *TheCall) {
4989 unsigned i = 0, l = 0, u = 0;
4990 bool IsTarget64Bit = TI.getTypeWidth(T: TI.getIntPtrType()) == 64;
4991 llvm::APSInt Result;
4992
4993 if (isPPC_64Builtin(BuiltinID) && !IsTarget64Bit)
4994 return Diag(TheCall->getBeginLoc(), diag::err_64_bit_builtin_32_bit_tgt)
4995 << TheCall->getSourceRange();
4996
4997 switch (BuiltinID) {
4998 default: return false;
4999 case PPC::BI__builtin_altivec_crypto_vshasigmaw:
5000 case PPC::BI__builtin_altivec_crypto_vshasigmad:
5001 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 1) ||
5002 SemaBuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 15);
5003 case PPC::BI__builtin_altivec_dss:
5004 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 3);
5005 case PPC::BI__builtin_tbegin:
5006 case PPC::BI__builtin_tend:
5007 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 1);
5008 case PPC::BI__builtin_tsr:
5009 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 7);
5010 case PPC::BI__builtin_tabortwc:
5011 case PPC::BI__builtin_tabortdc:
5012 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 31);
5013 case PPC::BI__builtin_tabortwci:
5014 case PPC::BI__builtin_tabortdci:
5015 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 31) ||
5016 SemaBuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 31);
5017 // According to GCC 'Basic PowerPC Built-in Functions Available on ISA 2.05',
5018 // __builtin_(un)pack_longdouble are available only if long double uses IBM
5019 // extended double representation.
5020 case PPC::BI__builtin_unpack_longdouble:
5021 if (SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 1))
5022 return true;
5023 [[fallthrough]];
5024 case PPC::BI__builtin_pack_longdouble:
5025 if (&TI.getLongDoubleFormat() != &llvm::APFloat::PPCDoubleDouble())
5026 return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_requires_abi)
5027 << "ibmlongdouble";
5028 return false;
5029 case PPC::BI__builtin_altivec_dst:
5030 case PPC::BI__builtin_altivec_dstt:
5031 case PPC::BI__builtin_altivec_dstst:
5032 case PPC::BI__builtin_altivec_dststt:
5033 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 3);
5034 case PPC::BI__builtin_vsx_xxpermdi:
5035 case PPC::BI__builtin_vsx_xxsldwi:
5036 return SemaBuiltinVSX(TheCall);
5037 case PPC::BI__builtin_unpack_vector_int128:
5038 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 1);
5039 case PPC::BI__builtin_altivec_vgnb:
5040 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 2, High: 7);
5041 case PPC::BI__builtin_vsx_xxeval:
5042 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 3, Low: 0, High: 255);
5043 case PPC::BI__builtin_altivec_vsldbi:
5044 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 7);
5045 case PPC::BI__builtin_altivec_vsrdbi:
5046 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 7);
5047 case PPC::BI__builtin_vsx_xxpermx:
5048 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 3, Low: 0, High: 7);
5049 case PPC::BI__builtin_ppc_tw:
5050 case PPC::BI__builtin_ppc_tdw:
5051 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 1, High: 31);
5052 case PPC::BI__builtin_ppc_cmprb:
5053 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 1);
5054 // For __rlwnm, __rlwimi and __rldimi, the last parameter mask must
5055 // be a constant that represents a contiguous bit field.
5056 case PPC::BI__builtin_ppc_rlwnm:
5057 return SemaValueIsRunOfOnes(TheCall, ArgNum: 2);
5058 case PPC::BI__builtin_ppc_rlwimi:
5059 case PPC::BI__builtin_ppc_rldimi:
5060 return SemaBuiltinConstantArg(TheCall, ArgNum: 2, Result) ||
5061 SemaValueIsRunOfOnes(TheCall, ArgNum: 3);
5062 case PPC::BI__builtin_ppc_addex: {
5063 if (SemaBuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 3))
5064 return true;
5065 // Output warning for reserved values 1 to 3.
5066 int ArgValue =
5067 TheCall->getArg(Arg: 2)->getIntegerConstantExpr(Ctx: Context)->getSExtValue();
5068 if (ArgValue != 0)
5069 Diag(TheCall->getBeginLoc(), diag::warn_argument_undefined_behaviour)
5070 << ArgValue;
5071 return false;
5072 }
5073 case PPC::BI__builtin_ppc_mtfsb0:
5074 case PPC::BI__builtin_ppc_mtfsb1:
5075 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 31);
5076 case PPC::BI__builtin_ppc_mtfsf:
5077 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 255);
5078 case PPC::BI__builtin_ppc_mtfsfi:
5079 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 7) ||
5080 SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 15);
5081 case PPC::BI__builtin_ppc_alignx:
5082 return SemaBuiltinConstantArgPower2(TheCall, ArgNum: 0);
5083 case PPC::BI__builtin_ppc_rdlam:
5084 return SemaValueIsRunOfOnes(TheCall, ArgNum: 2);
5085 case PPC::BI__builtin_vsx_ldrmb:
5086 case PPC::BI__builtin_vsx_strmb:
5087 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 1, High: 16);
5088 case PPC::BI__builtin_altivec_vcntmbb:
5089 case PPC::BI__builtin_altivec_vcntmbh:
5090 case PPC::BI__builtin_altivec_vcntmbw:
5091 case PPC::BI__builtin_altivec_vcntmbd:
5092 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 1);
5093 case PPC::BI__builtin_vsx_xxgenpcvbm:
5094 case PPC::BI__builtin_vsx_xxgenpcvhm:
5095 case PPC::BI__builtin_vsx_xxgenpcvwm:
5096 case PPC::BI__builtin_vsx_xxgenpcvdm:
5097 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 3);
5098 case PPC::BI__builtin_ppc_test_data_class: {
5099 // Check if the first argument of the __builtin_ppc_test_data_class call is
5100 // valid. The argument must be 'float' or 'double' or '__float128'.
5101 QualType ArgType = TheCall->getArg(Arg: 0)->getType();
5102 if (ArgType != QualType(Context.FloatTy) &&
5103 ArgType != QualType(Context.DoubleTy) &&
5104 ArgType != QualType(Context.Float128Ty))
5105 return Diag(TheCall->getBeginLoc(),
5106 diag::err_ppc_invalid_test_data_class_type);
5107 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 127);
5108 }
5109 case PPC::BI__builtin_ppc_maxfe:
5110 case PPC::BI__builtin_ppc_minfe:
5111 case PPC::BI__builtin_ppc_maxfl:
5112 case PPC::BI__builtin_ppc_minfl:
5113 case PPC::BI__builtin_ppc_maxfs:
5114 case PPC::BI__builtin_ppc_minfs: {
5115 if (Context.getTargetInfo().getTriple().isOSAIX() &&
5116 (BuiltinID == PPC::BI__builtin_ppc_maxfe ||
5117 BuiltinID == PPC::BI__builtin_ppc_minfe))
5118 return Diag(TheCall->getBeginLoc(), diag::err_target_unsupported_type)
5119 << "builtin" << true << 128 << QualType(Context.LongDoubleTy)
5120 << false << Context.getTargetInfo().getTriple().str();
5121 // Argument type should be exact.
5122 QualType ArgType = QualType(Context.LongDoubleTy);
5123 if (BuiltinID == PPC::BI__builtin_ppc_maxfl ||
5124 BuiltinID == PPC::BI__builtin_ppc_minfl)
5125 ArgType = QualType(Context.DoubleTy);
5126 else if (BuiltinID == PPC::BI__builtin_ppc_maxfs ||
5127 BuiltinID == PPC::BI__builtin_ppc_minfs)
5128 ArgType = QualType(Context.FloatTy);
5129 for (unsigned I = 0, E = TheCall->getNumArgs(); I < E; ++I)
5130 if (TheCall->getArg(I)->getType() != ArgType)
5131 return Diag(TheCall->getBeginLoc(),
5132 diag::err_typecheck_convert_incompatible)
5133 << TheCall->getArg(I)->getType() << ArgType << 1 << 0 << 0;
5134 return false;
5135 }
5136#define CUSTOM_BUILTIN(Name, Intr, Types, Acc, Feature) \
5137 case PPC::BI__builtin_##Name: \
5138 return SemaBuiltinPPCMMACall(TheCall, BuiltinID, Types);
5139#include "clang/Basic/BuiltinsPPC.def"
5140 }
5141 return SemaBuiltinConstantArgRange(TheCall, ArgNum: i, Low: l, High: u);
5142}
5143
5144// Check if the given type is a non-pointer PPC MMA type. This function is used
5145// in Sema to prevent invalid uses of restricted PPC MMA types.
5146bool Sema::CheckPPCMMAType(QualType Type, SourceLocation TypeLoc) {
5147 if (Type->isPointerType() || Type->isArrayType())
5148 return false;
5149
5150 QualType CoreType = Type.getCanonicalType().getUnqualifiedType();
5151#define PPC_VECTOR_TYPE(Name, Id, Size) || CoreType == Context.Id##Ty
5152 if (false
5153#include "clang/Basic/PPCTypes.def"
5154 ) {
5155 Diag(TypeLoc, diag::err_ppc_invalid_use_mma_type);
5156 return true;
5157 }
5158 return false;
5159}
5160
5161bool Sema::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID,
5162 CallExpr *TheCall) {
5163 // position of memory order and scope arguments in the builtin
5164 unsigned OrderIndex, ScopeIndex;
5165 switch (BuiltinID) {
5166 case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
5167 case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
5168 case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
5169 case AMDGPU::BI__builtin_amdgcn_atomic_dec64:
5170 OrderIndex = 2;
5171 ScopeIndex = 3;
5172 break;
5173 case AMDGPU::BI__builtin_amdgcn_fence:
5174 OrderIndex = 0;
5175 ScopeIndex = 1;
5176 break;
5177 default:
5178 return false;
5179 }
5180
5181 ExprResult Arg = TheCall->getArg(Arg: OrderIndex);
5182 auto ArgExpr = Arg.get();
5183 Expr::EvalResult ArgResult;
5184
5185 if (!ArgExpr->EvaluateAsInt(ArgResult, Context))
5186 return Diag(ArgExpr->getExprLoc(), diag::err_typecheck_expect_int)
5187 << ArgExpr->getType();
5188 auto Ord = ArgResult.Val.getInt().getZExtValue();
5189
5190 // Check validity of memory ordering as per C11 / C++11's memody model.
5191 // Only fence needs check. Atomic dec/inc allow all memory orders.
5192 if (!llvm::isValidAtomicOrderingCABI(Ord))
5193 return Diag(ArgExpr->getBeginLoc(),
5194 diag::warn_atomic_op_has_invalid_memory_order)
5195 << 0 << ArgExpr->getSourceRange();
5196 switch (static_cast<llvm::AtomicOrderingCABI>(Ord)) {
5197 case llvm::AtomicOrderingCABI::relaxed:
5198 case llvm::AtomicOrderingCABI::consume:
5199 if (BuiltinID == AMDGPU::BI__builtin_amdgcn_fence)
5200 return Diag(ArgExpr->getBeginLoc(),
5201 diag::warn_atomic_op_has_invalid_memory_order)
5202 << 0 << ArgExpr->getSourceRange();
5203 break;
5204 case llvm::AtomicOrderingCABI::acquire:
5205 case llvm::AtomicOrderingCABI::release:
5206 case llvm::AtomicOrderingCABI::acq_rel:
5207 case llvm::AtomicOrderingCABI::seq_cst:
5208 break;
5209 }
5210
5211 Arg = TheCall->getArg(Arg: ScopeIndex);
5212 ArgExpr = Arg.get();
5213 Expr::EvalResult ArgResult1;
5214 // Check that sync scope is a constant literal
5215 if (!ArgExpr->EvaluateAsConstantExpr(ArgResult1, Context))
5216 return Diag(ArgExpr->getExprLoc(), diag::err_expr_not_string_literal)
5217 << ArgExpr->getType();
5218
5219 return false;
5220}
5221
5222bool Sema::CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum) {
5223 llvm::APSInt Result;
5224
5225 // We can't check the value of a dependent argument.
5226 Expr *Arg = TheCall->getArg(Arg: ArgNum);
5227 if (Arg->isTypeDependent() || Arg->isValueDependent())
5228 return false;
5229
5230 // Check constant-ness first.
5231 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
5232 return true;
5233
5234 int64_t Val = Result.getSExtValue();
5235 if ((Val >= 0 && Val <= 3) || (Val >= 5 && Val <= 7))
5236 return false;
5237
5238 return Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_invalid_lmul)
5239 << Arg->getSourceRange();
5240}
5241
5242static bool CheckInvalidVLENandLMUL(const TargetInfo &TI, CallExpr *TheCall,
5243 Sema &S, QualType Type, int EGW) {
5244 assert((EGW == 128 || EGW == 256) && "EGW can only be 128 or 256 bits");
5245
5246 // LMUL * VLEN >= EGW
5247 ASTContext::BuiltinVectorTypeInfo Info =
5248 S.Context.getBuiltinVectorTypeInfo(VecTy: Type->castAs<BuiltinType>());
5249 unsigned ElemSize = S.Context.getTypeSize(Info.ElementType);
5250 unsigned MinElemCount = Info.EC.getKnownMinValue();
5251
5252 unsigned EGS = EGW / ElemSize;
5253 // If EGS is less than or equal to the minimum number of elements, then the
5254 // type is valid.
5255 if (EGS <= MinElemCount)
5256 return false;
5257
5258 // Otherwise, we need vscale to be at least EGS / MinElemCont.
5259 assert(EGS % MinElemCount == 0);
5260 unsigned VScaleFactor = EGS / MinElemCount;
5261 // Vscale is VLEN/RVVBitsPerBlock.
5262 unsigned MinRequiredVLEN = VScaleFactor * llvm::RISCV::RVVBitsPerBlock;
5263 std::string RequiredExt = "zvl" + std::to_string(val: MinRequiredVLEN) + "b";
5264 if (!TI.hasFeature(RequiredExt))
5265 return S.Diag(TheCall->getBeginLoc(),
5266 diag::err_riscv_type_requires_extension) << Type << RequiredExt;
5267
5268 return false;
5269}
5270
5271bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI,
5272 unsigned BuiltinID,
5273 CallExpr *TheCall) {
5274 // CodeGenFunction can also detect this, but this gives a better error
5275 // message.
5276 bool FeatureMissing = false;
5277 SmallVector<StringRef> ReqFeatures;
5278 StringRef Features = Context.BuiltinInfo.getRequiredFeatures(ID: BuiltinID);
5279 Features.split(A&: ReqFeatures, Separator: ',', MaxSplit: -1, KeepEmpty: false);
5280
5281 // Check if each required feature is included
5282 for (StringRef F : ReqFeatures) {
5283 SmallVector<StringRef> ReqOpFeatures;
5284 F.split(A&: ReqOpFeatures, Separator: '|');
5285
5286 if (llvm::none_of(Range&: ReqOpFeatures,
5287 P: [&TI](StringRef OF) { return TI.hasFeature(Feature: OF); })) {
5288 std::string FeatureStrs;
5289 bool IsExtension = true;
5290 for (StringRef OF : ReqOpFeatures) {
5291 // If the feature is 64bit, alter the string so it will print better in
5292 // the diagnostic.
5293 if (OF == "64bit") {
5294 assert(ReqOpFeatures.size() == 1 && "Expected '64bit' to be alone");
5295 OF = "RV64";
5296 IsExtension = false;
5297 }
5298 if (OF == "32bit") {
5299 assert(ReqOpFeatures.size() == 1 && "Expected '32bit' to be alone");
5300 OF = "RV32";
5301 IsExtension = false;
5302 }
5303
5304 // Convert features like "zbr" and "experimental-zbr" to "Zbr".
5305 OF.consume_front(Prefix: "experimental-");
5306 std::string FeatureStr = OF.str();
5307 FeatureStr[0] = std::toupper(c: FeatureStr[0]);
5308 // Combine strings.
5309 FeatureStrs += FeatureStrs.empty() ? "" : ", ";
5310 FeatureStrs += "'";
5311 FeatureStrs += FeatureStr;
5312 FeatureStrs += "'";
5313 }
5314 // Error message
5315 FeatureMissing = true;
5316 Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_requires_extension)
5317 << IsExtension
5318 << TheCall->getSourceRange() << StringRef(FeatureStrs);
5319 }
5320 }
5321
5322 if (FeatureMissing)
5323 return true;
5324
5325 // vmulh.vv, vmulh.vx, vmulhu.vv, vmulhu.vx, vmulhsu.vv, vmulhsu.vx,
5326 // vsmul.vv, vsmul.vx are not included for EEW=64 in Zve64*.
5327 switch (BuiltinID) {
5328 default:
5329 break;
5330 case RISCVVector::BI__builtin_rvv_vmulhsu_vv:
5331 case RISCVVector::BI__builtin_rvv_vmulhsu_vx:
5332 case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tu:
5333 case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tu:
5334 case RISCVVector::BI__builtin_rvv_vmulhsu_vv_m:
5335 case RISCVVector::BI__builtin_rvv_vmulhsu_vx_m:
5336 case RISCVVector::BI__builtin_rvv_vmulhsu_vv_mu:
5337 case RISCVVector::BI__builtin_rvv_vmulhsu_vx_mu:
5338 case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tum:
5339 case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tum:
5340 case RISCVVector::BI__builtin_rvv_vmulhsu_vv_tumu:
5341 case RISCVVector::BI__builtin_rvv_vmulhsu_vx_tumu:
5342 case RISCVVector::BI__builtin_rvv_vmulhu_vv:
5343 case RISCVVector::BI__builtin_rvv_vmulhu_vx:
5344 case RISCVVector::BI__builtin_rvv_vmulhu_vv_tu:
5345 case RISCVVector::BI__builtin_rvv_vmulhu_vx_tu:
5346 case RISCVVector::BI__builtin_rvv_vmulhu_vv_m:
5347 case RISCVVector::BI__builtin_rvv_vmulhu_vx_m:
5348 case RISCVVector::BI__builtin_rvv_vmulhu_vv_mu:
5349 case RISCVVector::BI__builtin_rvv_vmulhu_vx_mu:
5350 case RISCVVector::BI__builtin_rvv_vmulhu_vv_tum:
5351 case RISCVVector::BI__builtin_rvv_vmulhu_vx_tum:
5352 case RISCVVector::BI__builtin_rvv_vmulhu_vv_tumu:
5353 case RISCVVector::BI__builtin_rvv_vmulhu_vx_tumu:
5354 case RISCVVector::BI__builtin_rvv_vmulh_vv:
5355 case RISCVVector::BI__builtin_rvv_vmulh_vx:
5356 case RISCVVector::BI__builtin_rvv_vmulh_vv_tu:
5357 case RISCVVector::BI__builtin_rvv_vmulh_vx_tu:
5358 case RISCVVector::BI__builtin_rvv_vmulh_vv_m:
5359 case RISCVVector::BI__builtin_rvv_vmulh_vx_m:
5360 case RISCVVector::BI__builtin_rvv_vmulh_vv_mu:
5361 case RISCVVector::BI__builtin_rvv_vmulh_vx_mu:
5362 case RISCVVector::BI__builtin_rvv_vmulh_vv_tum:
5363 case RISCVVector::BI__builtin_rvv_vmulh_vx_tum:
5364 case RISCVVector::BI__builtin_rvv_vmulh_vv_tumu:
5365 case RISCVVector::BI__builtin_rvv_vmulh_vx_tumu:
5366 case RISCVVector::BI__builtin_rvv_vsmul_vv:
5367 case RISCVVector::BI__builtin_rvv_vsmul_vx:
5368 case RISCVVector::BI__builtin_rvv_vsmul_vv_tu:
5369 case RISCVVector::BI__builtin_rvv_vsmul_vx_tu:
5370 case RISCVVector::BI__builtin_rvv_vsmul_vv_m:
5371 case RISCVVector::BI__builtin_rvv_vsmul_vx_m:
5372 case RISCVVector::BI__builtin_rvv_vsmul_vv_mu:
5373 case RISCVVector::BI__builtin_rvv_vsmul_vx_mu:
5374 case RISCVVector::BI__builtin_rvv_vsmul_vv_tum:
5375 case RISCVVector::BI__builtin_rvv_vsmul_vx_tum:
5376 case RISCVVector::BI__builtin_rvv_vsmul_vv_tumu:
5377 case RISCVVector::BI__builtin_rvv_vsmul_vx_tumu: {
5378 ASTContext::BuiltinVectorTypeInfo Info = Context.getBuiltinVectorTypeInfo(
5379 VecTy: TheCall->getType()->castAs<BuiltinType>());
5380
5381 if (Context.getTypeSize(Info.ElementType) == 64 && !TI.hasFeature("v"))
5382 return Diag(TheCall->getBeginLoc(),
5383 diag::err_riscv_builtin_requires_extension)
5384 << /* IsExtension */ true << TheCall->getSourceRange() << "v";
5385
5386 break;
5387 }
5388 }
5389
5390 switch (BuiltinID) {
5391 case RISCVVector::BI__builtin_rvv_vsetvli:
5392 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 3) ||
5393 CheckRISCVLMUL(TheCall, ArgNum: 2);
5394 case RISCVVector::BI__builtin_rvv_vsetvlimax:
5395 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 3) ||
5396 CheckRISCVLMUL(TheCall, ArgNum: 1);
5397 case RISCVVector::BI__builtin_rvv_vget_v: {
5398 ASTContext::BuiltinVectorTypeInfo ResVecInfo =
5399 Context.getBuiltinVectorTypeInfo(VecTy: cast<BuiltinType>(
5400 TheCall->getType().getCanonicalType().getTypePtr()));
5401 ASTContext::BuiltinVectorTypeInfo VecInfo =
5402 Context.getBuiltinVectorTypeInfo(VecTy: cast<BuiltinType>(
5403 Val: TheCall->getArg(Arg: 0)->getType().getCanonicalType().getTypePtr()));
5404 unsigned MaxIndex;
5405 if (VecInfo.NumVectors != 1) // vget for tuple type
5406 MaxIndex = VecInfo.NumVectors;
5407 else // vget for non-tuple type
5408 MaxIndex = (VecInfo.EC.getKnownMinValue() * VecInfo.NumVectors) /
5409 (ResVecInfo.EC.getKnownMinValue() * ResVecInfo.NumVectors);
5410 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: MaxIndex - 1);
5411 }
5412 case RISCVVector::BI__builtin_rvv_vset_v: {
5413 ASTContext::BuiltinVectorTypeInfo ResVecInfo =
5414 Context.getBuiltinVectorTypeInfo(VecTy: cast<BuiltinType>(
5415 TheCall->getType().getCanonicalType().getTypePtr()));
5416 ASTContext::BuiltinVectorTypeInfo VecInfo =
5417 Context.getBuiltinVectorTypeInfo(VecTy: cast<BuiltinType>(
5418 Val: TheCall->getArg(Arg: 2)->getType().getCanonicalType().getTypePtr()));
5419 unsigned MaxIndex;
5420 if (ResVecInfo.NumVectors != 1) // vset for tuple type
5421 MaxIndex = ResVecInfo.NumVectors;
5422 else // vset fo non-tuple type
5423 MaxIndex = (ResVecInfo.EC.getKnownMinValue() * ResVecInfo.NumVectors) /
5424 (VecInfo.EC.getKnownMinValue() * VecInfo.NumVectors);
5425 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: MaxIndex - 1);
5426 }
5427 // Vector Crypto
5428 case RISCVVector::BI__builtin_rvv_vaeskf1_vi_tu:
5429 case RISCVVector::BI__builtin_rvv_vaeskf2_vi_tu:
5430 case RISCVVector::BI__builtin_rvv_vaeskf2_vi:
5431 case RISCVVector::BI__builtin_rvv_vsm4k_vi_tu: {
5432 QualType Op1Type = TheCall->getArg(Arg: 0)->getType();
5433 QualType Op2Type = TheCall->getArg(Arg: 1)->getType();
5434 return CheckInvalidVLENandLMUL(TI, TheCall, S&: *this, Type: Op1Type, EGW: 128) ||
5435 CheckInvalidVLENandLMUL(TI, TheCall, S&: *this, Type: Op2Type, EGW: 128) ||
5436 SemaBuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 31);
5437 }
5438 case RISCVVector::BI__builtin_rvv_vsm3c_vi_tu:
5439 case RISCVVector::BI__builtin_rvv_vsm3c_vi: {
5440 QualType Op1Type = TheCall->getArg(Arg: 0)->getType();
5441 return CheckInvalidVLENandLMUL(TI, TheCall, S&: *this, Type: Op1Type, EGW: 256) ||
5442 SemaBuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 31);
5443 }
5444 case RISCVVector::BI__builtin_rvv_vaeskf1_vi:
5445 case RISCVVector::BI__builtin_rvv_vsm4k_vi: {
5446 QualType Op1Type = TheCall->getArg(Arg: 0)->getType();
5447 return CheckInvalidVLENandLMUL(TI, TheCall, S&: *this, Type: Op1Type, EGW: 128) ||
5448 SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 31);
5449 }
5450 case RISCVVector::BI__builtin_rvv_vaesdf_vv:
5451 case RISCVVector::BI__builtin_rvv_vaesdf_vs:
5452 case RISCVVector::BI__builtin_rvv_vaesdm_vv:
5453 case RISCVVector::BI__builtin_rvv_vaesdm_vs:
5454 case RISCVVector::BI__builtin_rvv_vaesef_vv:
5455 case RISCVVector::BI__builtin_rvv_vaesef_vs:
5456 case RISCVVector::BI__builtin_rvv_vaesem_vv:
5457 case RISCVVector::BI__builtin_rvv_vaesem_vs:
5458 case RISCVVector::BI__builtin_rvv_vaesz_vs:
5459 case RISCVVector::BI__builtin_rvv_vsm4r_vv:
5460 case RISCVVector::BI__builtin_rvv_vsm4r_vs:
5461 case RISCVVector::BI__builtin_rvv_vaesdf_vv_tu:
5462 case RISCVVector::BI__builtin_rvv_vaesdf_vs_tu:
5463 case RISCVVector::BI__builtin_rvv_vaesdm_vv_tu:
5464 case RISCVVector::BI__builtin_rvv_vaesdm_vs_tu:
5465 case RISCVVector::BI__builtin_rvv_vaesef_vv_tu:
5466 case RISCVVector::BI__builtin_rvv_vaesef_vs_tu:
5467 case RISCVVector::BI__builtin_rvv_vaesem_vv_tu:
5468 case RISCVVector::BI__builtin_rvv_vaesem_vs_tu:
5469 case RISCVVector::BI__builtin_rvv_vaesz_vs_tu:
5470 case RISCVVector::BI__builtin_rvv_vsm4r_vv_tu:
5471 case RISCVVector::BI__builtin_rvv_vsm4r_vs_tu: {
5472 QualType Op1Type = TheCall->getArg(Arg: 0)->getType();
5473 QualType Op2Type = TheCall->getArg(Arg: 1)->getType();
5474 return CheckInvalidVLENandLMUL(TI, TheCall, S&: *this, Type: Op1Type, EGW: 128) ||
5475 CheckInvalidVLENandLMUL(TI, TheCall, S&: *this, Type: Op2Type, EGW: 128);
5476 }
5477 case RISCVVector::BI__builtin_rvv_vsha2ch_vv:
5478 case RISCVVector::BI__builtin_rvv_vsha2cl_vv:
5479 case RISCVVector::BI__builtin_rvv_vsha2ms_vv:
5480 case RISCVVector::BI__builtin_rvv_vsha2ch_vv_tu:
5481 case RISCVVector::BI__builtin_rvv_vsha2cl_vv_tu:
5482 case RISCVVector::BI__builtin_rvv_vsha2ms_vv_tu: {
5483 QualType Op1Type = TheCall->getArg(Arg: 0)->getType();
5484 QualType Op2Type = TheCall->getArg(Arg: 1)->getType();
5485 QualType Op3Type = TheCall->getArg(Arg: 2)->getType();
5486 ASTContext::BuiltinVectorTypeInfo Info =
5487 Context.getBuiltinVectorTypeInfo(VecTy: Op1Type->castAs<BuiltinType>());
5488 uint64_t ElemSize = Context.getTypeSize(Info.ElementType);
5489 if (ElemSize == 64 && !TI.hasFeature("zvknhb"))
5490 return Diag(TheCall->getBeginLoc(),
5491 diag::err_riscv_builtin_requires_extension)
5492 << /* IsExtension */ true << TheCall->getSourceRange() << "zvknb";
5493
5494 return CheckInvalidVLENandLMUL(TI, TheCall, S&: *this, Type: Op1Type, EGW: ElemSize * 4) ||
5495 CheckInvalidVLENandLMUL(TI, TheCall, S&: *this, Type: Op2Type, EGW: ElemSize * 4) ||
5496 CheckInvalidVLENandLMUL(TI, TheCall, S&: *this, Type: Op3Type, EGW: ElemSize * 4);
5497 }
5498
5499 case RISCVVector::BI__builtin_rvv_sf_vc_i_se:
5500 // bit_27_26, bit_24_20, bit_11_7, simm5, sew, log2lmul
5501 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 3) ||
5502 SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 31) ||
5503 SemaBuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 31) ||
5504 SemaBuiltinConstantArgRange(TheCall, ArgNum: 3, Low: -16, High: 15) ||
5505 CheckRISCVLMUL(TheCall, ArgNum: 5);
5506 case RISCVVector::BI__builtin_rvv_sf_vc_iv_se:
5507 // bit_27_26, bit_11_7, vs2, simm5
5508 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 3) ||
5509 SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 31) ||
5510 SemaBuiltinConstantArgRange(TheCall, ArgNum: 3, Low: -16, High: 15);
5511 case RISCVVector::BI__builtin_rvv_sf_vc_v_i:
5512 case RISCVVector::BI__builtin_rvv_sf_vc_v_i_se:
5513 // bit_27_26, bit_24_20, simm5
5514 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 3) ||
5515 SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 31) ||
5516 SemaBuiltinConstantArgRange(TheCall, ArgNum: 2, Low: -16, High: 15);
5517 case RISCVVector::BI__builtin_rvv_sf_vc_v_iv:
5518 case RISCVVector::BI__builtin_rvv_sf_vc_v_iv_se:
5519 // bit_27_26, vs2, simm5
5520 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 3) ||
5521 SemaBuiltinConstantArgRange(TheCall, ArgNum: 2, Low: -16, High: 15);
5522 case RISCVVector::BI__builtin_rvv_sf_vc_ivv_se:
5523 case RISCVVector::BI__builtin_rvv_sf_vc_ivw_se:
5524 case RISCVVector::BI__builtin_rvv_sf_vc_v_ivv:
5525 case RISCVVector::BI__builtin_rvv_sf_vc_v_ivw:
5526 case RISCVVector::BI__builtin_rvv_sf_vc_v_ivv_se:
5527 case RISCVVector::BI__builtin_rvv_sf_vc_v_ivw_se:
5528 // bit_27_26, vd, vs2, simm5
5529 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 3) ||
5530 SemaBuiltinConstantArgRange(TheCall, ArgNum: 3, Low: -16, High: 15);
5531 case RISCVVector::BI__builtin_rvv_sf_vc_x_se:
5532 // bit_27_26, bit_24_20, bit_11_7, xs1, sew, log2lmul
5533 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 3) ||
5534 SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 31) ||
5535 SemaBuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 31) ||
5536 CheckRISCVLMUL(TheCall, ArgNum: 5);
5537 case RISCVVector::BI__builtin_rvv_sf_vc_xv_se:
5538 case RISCVVector::BI__builtin_rvv_sf_vc_vv_se:
5539 // bit_27_26, bit_11_7, vs2, xs1/vs1
5540 case RISCVVector::BI__builtin_rvv_sf_vc_v_x:
5541 case RISCVVector::BI__builtin_rvv_sf_vc_v_x_se:
5542 // bit_27_26, bit_24-20, xs1
5543 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 3) ||
5544 SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 31);
5545 case RISCVVector::BI__builtin_rvv_sf_vc_vvv_se:
5546 case RISCVVector::BI__builtin_rvv_sf_vc_xvv_se:
5547 case RISCVVector::BI__builtin_rvv_sf_vc_vvw_se:
5548 case RISCVVector::BI__builtin_rvv_sf_vc_xvw_se:
5549 // bit_27_26, vd, vs2, xs1
5550 case RISCVVector::BI__builtin_rvv_sf_vc_v_xv:
5551 case RISCVVector::BI__builtin_rvv_sf_vc_v_vv:
5552 case RISCVVector::BI__builtin_rvv_sf_vc_v_xv_se:
5553 case RISCVVector::BI__builtin_rvv_sf_vc_v_vv_se:
5554 // bit_27_26, vs2, xs1/vs1
5555 case RISCVVector::BI__builtin_rvv_sf_vc_v_xvv:
5556 case RISCVVector::BI__builtin_rvv_sf_vc_v_vvv:
5557 case RISCVVector::BI__builtin_rvv_sf_vc_v_xvw:
5558 case RISCVVector::BI__builtin_rvv_sf_vc_v_vvw:
5559 case RISCVVector::BI__builtin_rvv_sf_vc_v_xvv_se:
5560 case RISCVVector::BI__builtin_rvv_sf_vc_v_vvv_se:
5561 case RISCVVector::BI__builtin_rvv_sf_vc_v_xvw_se:
5562 case RISCVVector::BI__builtin_rvv_sf_vc_v_vvw_se:
5563 // bit_27_26, vd, vs2, xs1/vs1
5564 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 3);
5565 case RISCVVector::BI__builtin_rvv_sf_vc_fv_se:
5566 // bit_26, bit_11_7, vs2, fs1
5567 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 1) ||
5568 SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 31);
5569 case RISCVVector::BI__builtin_rvv_sf_vc_fvv_se:
5570 case RISCVVector::BI__builtin_rvv_sf_vc_fvw_se:
5571 case RISCVVector::BI__builtin_rvv_sf_vc_v_fvv:
5572 case RISCVVector::BI__builtin_rvv_sf_vc_v_fvw:
5573 case RISCVVector::BI__builtin_rvv_sf_vc_v_fvv_se:
5574 case RISCVVector::BI__builtin_rvv_sf_vc_v_fvw_se:
5575 // bit_26, vd, vs2, fs1
5576 case RISCVVector::BI__builtin_rvv_sf_vc_v_fv:
5577 case RISCVVector::BI__builtin_rvv_sf_vc_v_fv_se:
5578 // bit_26, vs2, fs1
5579 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 0, Low: 0, High: 1);
5580 // Check if byteselect is in [0, 3]
5581 case RISCV::BI__builtin_riscv_aes32dsi:
5582 case RISCV::BI__builtin_riscv_aes32dsmi:
5583 case RISCV::BI__builtin_riscv_aes32esi:
5584 case RISCV::BI__builtin_riscv_aes32esmi:
5585 case RISCV::BI__builtin_riscv_sm4ks:
5586 case RISCV::BI__builtin_riscv_sm4ed:
5587 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 3);
5588 // Check if rnum is in [0, 10]
5589 case RISCV::BI__builtin_riscv_aes64ks1i:
5590 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 10);
5591 // Check if value range for vxrm is in [0, 3]
5592 case RISCVVector::BI__builtin_rvv_vaaddu_vv:
5593 case RISCVVector::BI__builtin_rvv_vaaddu_vx:
5594 case RISCVVector::BI__builtin_rvv_vaadd_vv:
5595 case RISCVVector::BI__builtin_rvv_vaadd_vx:
5596 case RISCVVector::BI__builtin_rvv_vasubu_vv:
5597 case RISCVVector::BI__builtin_rvv_vasubu_vx:
5598 case RISCVVector::BI__builtin_rvv_vasub_vv:
5599 case RISCVVector::BI__builtin_rvv_vasub_vx:
5600 case RISCVVector::BI__builtin_rvv_vsmul_vv:
5601 case RISCVVector::BI__builtin_rvv_vsmul_vx:
5602 case RISCVVector::BI__builtin_rvv_vssra_vv:
5603 case RISCVVector::BI__builtin_rvv_vssra_vx:
5604 case RISCVVector::BI__builtin_rvv_vssrl_vv:
5605 case RISCVVector::BI__builtin_rvv_vssrl_vx:
5606 case RISCVVector::BI__builtin_rvv_vnclip_wv:
5607 case RISCVVector::BI__builtin_rvv_vnclip_wx:
5608 case RISCVVector::BI__builtin_rvv_vnclipu_wv:
5609 case RISCVVector::BI__builtin_rvv_vnclipu_wx:
5610 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 3);
5611 case RISCVVector::BI__builtin_rvv_vaaddu_vv_tu:
5612 case RISCVVector::BI__builtin_rvv_vaaddu_vx_tu:
5613 case RISCVVector::BI__builtin_rvv_vaadd_vv_tu:
5614 case RISCVVector::BI__builtin_rvv_vaadd_vx_tu:
5615 case RISCVVector::BI__builtin_rvv_vasubu_vv_tu:
5616 case RISCVVector::BI__builtin_rvv_vasubu_vx_tu:
5617 case RISCVVector::BI__builtin_rvv_vasub_vv_tu:
5618 case RISCVVector::BI__builtin_rvv_vasub_vx_tu:
5619 case RISCVVector::BI__builtin_rvv_vsmul_vv_tu:
5620 case RISCVVector::BI__builtin_rvv_vsmul_vx_tu:
5621 case RISCVVector::BI__builtin_rvv_vssra_vv_tu:
5622 case RISCVVector::BI__builtin_rvv_vssra_vx_tu:
5623 case RISCVVector::BI__builtin_rvv_vssrl_vv_tu:
5624 case RISCVVector::BI__builtin_rvv_vssrl_vx_tu:
5625 case RISCVVector::BI__builtin_rvv_vnclip_wv_tu:
5626 case RISCVVector::BI__builtin_rvv_vnclip_wx_tu:
5627 case RISCVVector::BI__builtin_rvv_vnclipu_wv_tu:
5628 case RISCVVector::BI__builtin_rvv_vnclipu_wx_tu:
5629 case RISCVVector::BI__builtin_rvv_vaaddu_vv_m:
5630 case RISCVVector::BI__builtin_rvv_vaaddu_vx_m:
5631 case RISCVVector::BI__builtin_rvv_vaadd_vv_m:
5632 case RISCVVector::BI__builtin_rvv_vaadd_vx_m:
5633 case RISCVVector::BI__builtin_rvv_vasubu_vv_m:
5634 case RISCVVector::BI__builtin_rvv_vasubu_vx_m:
5635 case RISCVVector::BI__builtin_rvv_vasub_vv_m:
5636 case RISCVVector::BI__builtin_rvv_vasub_vx_m:
5637 case RISCVVector::BI__builtin_rvv_vsmul_vv_m:
5638 case RISCVVector::BI__builtin_rvv_vsmul_vx_m:
5639 case RISCVVector::BI__builtin_rvv_vssra_vv_m:
5640 case RISCVVector::BI__builtin_rvv_vssra_vx_m:
5641 case RISCVVector::BI__builtin_rvv_vssrl_vv_m:
5642 case RISCVVector::BI__builtin_rvv_vssrl_vx_m:
5643 case RISCVVector::BI__builtin_rvv_vnclip_wv_m:
5644 case RISCVVector::BI__builtin_rvv_vnclip_wx_m:
5645 case RISCVVector::BI__builtin_rvv_vnclipu_wv_m:
5646 case RISCVVector::BI__builtin_rvv_vnclipu_wx_m:
5647 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 3, Low: 0, High: 3);
5648 case RISCVVector::BI__builtin_rvv_vaaddu_vv_tum:
5649 case RISCVVector::BI__builtin_rvv_vaaddu_vv_tumu:
5650 case RISCVVector::BI__builtin_rvv_vaaddu_vv_mu:
5651 case RISCVVector::BI__builtin_rvv_vaaddu_vx_tum:
5652 case RISCVVector::BI__builtin_rvv_vaaddu_vx_tumu:
5653 case RISCVVector::BI__builtin_rvv_vaaddu_vx_mu:
5654 case RISCVVector::BI__builtin_rvv_vaadd_vv_tum:
5655 case RISCVVector::BI__builtin_rvv_vaadd_vv_tumu:
5656 case RISCVVector::BI__builtin_rvv_vaadd_vv_mu:
5657 case RISCVVector::BI__builtin_rvv_vaadd_vx_tum:
5658 case RISCVVector::BI__builtin_rvv_vaadd_vx_tumu:
5659 case RISCVVector::BI__builtin_rvv_vaadd_vx_mu:
5660 case RISCVVector::BI__builtin_rvv_vasubu_vv_tum:
5661 case RISCVVector::BI__builtin_rvv_vasubu_vv_tumu:
5662 case RISCVVector::BI__builtin_rvv_vasubu_vv_mu:
5663 case RISCVVector::BI__builtin_rvv_vasubu_vx_tum:
5664 case RISCVVector::BI__builtin_rvv_vasubu_vx_tumu:
5665 case RISCVVector::BI__builtin_rvv_vasubu_vx_mu:
5666 case RISCVVector::BI__builtin_rvv_vasub_vv_tum:
5667 case RISCVVector::BI__builtin_rvv_vasub_vv_tumu:
5668 case RISCVVector::BI__builtin_rvv_vasub_vv_mu:
5669 case RISCVVector::BI__builtin_rvv_vasub_vx_tum:
5670 case RISCVVector::BI__builtin_rvv_vasub_vx_tumu:
5671 case RISCVVector::BI__builtin_rvv_vasub_vx_mu:
5672 case RISCVVector::BI__builtin_rvv_vsmul_vv_mu:
5673 case RISCVVector::BI__builtin_rvv_vsmul_vx_mu:
5674 case RISCVVector::BI__builtin_rvv_vssra_vv_mu:
5675 case RISCVVector::BI__builtin_rvv_vssra_vx_mu:
5676 case RISCVVector::BI__builtin_rvv_vssrl_vv_mu:
5677 case RISCVVector::BI__builtin_rvv_vssrl_vx_mu:
5678 case RISCVVector::BI__builtin_rvv_vnclip_wv_mu:
5679 case RISCVVector::BI__builtin_rvv_vnclip_wx_mu:
5680 case RISCVVector::BI__builtin_rvv_vnclipu_wv_mu:
5681 case RISCVVector::BI__builtin_rvv_vnclipu_wx_mu:
5682 case RISCVVector::BI__builtin_rvv_vsmul_vv_tum:
5683 case RISCVVector::BI__builtin_rvv_vsmul_vx_tum:
5684 case RISCVVector::BI__builtin_rvv_vssra_vv_tum:
5685 case RISCVVector::BI__builtin_rvv_vssra_vx_tum:
5686 case RISCVVector::BI__builtin_rvv_vssrl_vv_tum:
5687 case RISCVVector::BI__builtin_rvv_vssrl_vx_tum:
5688 case RISCVVector::BI__builtin_rvv_vnclip_wv_tum:
5689 case RISCVVector::BI__builtin_rvv_vnclip_wx_tum:
5690 case RISCVVector::BI__builtin_rvv_vnclipu_wv_tum:
5691 case RISCVVector::BI__builtin_rvv_vnclipu_wx_tum:
5692 case RISCVVector::BI__builtin_rvv_vsmul_vv_tumu:
5693 case RISCVVector::BI__builtin_rvv_vsmul_vx_tumu:
5694 case RISCVVector::BI__builtin_rvv_vssra_vv_tumu:
5695 case RISCVVector::BI__builtin_rvv_vssra_vx_tumu:
5696 case RISCVVector::BI__builtin_rvv_vssrl_vv_tumu:
5697 case RISCVVector::BI__builtin_rvv_vssrl_vx_tumu:
5698 case RISCVVector::BI__builtin_rvv_vnclip_wv_tumu:
5699 case RISCVVector::BI__builtin_rvv_vnclip_wx_tumu:
5700 case RISCVVector::BI__builtin_rvv_vnclipu_wv_tumu:
5701 case RISCVVector::BI__builtin_rvv_vnclipu_wx_tumu:
5702 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 4, Low: 0, High: 3);
5703 case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm:
5704 case RISCVVector::BI__builtin_rvv_vfrec7_v_rm:
5705 case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm:
5706 case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm:
5707 case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm:
5708 case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm:
5709 case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm:
5710 case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm:
5711 case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm:
5712 case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm:
5713 case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm:
5714 case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm:
5715 case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm:
5716 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 4);
5717 case RISCVVector::BI__builtin_rvv_vfadd_vv_rm:
5718 case RISCVVector::BI__builtin_rvv_vfadd_vf_rm:
5719 case RISCVVector::BI__builtin_rvv_vfsub_vv_rm:
5720 case RISCVVector::BI__builtin_rvv_vfsub_vf_rm:
5721 case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm:
5722 case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm:
5723 case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm:
5724 case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm:
5725 case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm:
5726 case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm:
5727 case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm:
5728 case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm:
5729 case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm:
5730 case RISCVVector::BI__builtin_rvv_vfmul_vv_rm:
5731 case RISCVVector::BI__builtin_rvv_vfmul_vf_rm:
5732 case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm:
5733 case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm:
5734 case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm:
5735 case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm:
5736 case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm:
5737 case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm:
5738 case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm:
5739 case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm:
5740 case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm:
5741 case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_tu:
5742 case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_tu:
5743 case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_tu:
5744 case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_tu:
5745 case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_tu:
5746 case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_tu:
5747 case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_tu:
5748 case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_tu:
5749 case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_tu:
5750 case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_tu:
5751 case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_tu:
5752 case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_tu:
5753 case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_tu:
5754 case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_m:
5755 case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_m:
5756 case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_m:
5757 case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_m:
5758 case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_m:
5759 case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_m:
5760 case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_m:
5761 case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_m:
5762 case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_m:
5763 case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_m:
5764 case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_m:
5765 case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_m:
5766 case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_m:
5767 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 4);
5768 case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tu:
5769 case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tu:
5770 case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_tu:
5771 case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_tu:
5772 case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_tu:
5773 case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_tu:
5774 case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_tu:
5775 case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_tu:
5776 case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_tu:
5777 case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_tu:
5778 case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_tu:
5779 case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_tu:
5780 case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_tu:
5781 case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_tu:
5782 case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_tu:
5783 case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_tu:
5784 case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_tu:
5785 case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_tu:
5786 case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_tu:
5787 case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_tu:
5788 case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm_tu:
5789 case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm_tu:
5790 case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm_tu:
5791 case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm_tu:
5792 case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm:
5793 case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm:
5794 case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm:
5795 case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm:
5796 case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm:
5797 case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm:
5798 case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm:
5799 case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm:
5800 case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm:
5801 case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm:
5802 case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm:
5803 case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm:
5804 case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm:
5805 case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm:
5806 case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm:
5807 case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm:
5808 case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm:
5809 case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm:
5810 case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm:
5811 case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm:
5812 case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm:
5813 case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm:
5814 case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm:
5815 case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm:
5816 case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_tu:
5817 case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_tu:
5818 case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_tu:
5819 case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_tu:
5820 case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_tu:
5821 case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_tu:
5822 case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_tu:
5823 case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_tu:
5824 case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_tu:
5825 case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_tu:
5826 case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_tu:
5827 case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_tu:
5828 case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_tu:
5829 case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_tu:
5830 case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_tu:
5831 case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_tu:
5832 case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_tu:
5833 case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_tu:
5834 case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_tu:
5835 case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_tu:
5836 case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_tu:
5837 case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_tu:
5838 case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_tu:
5839 case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_tu:
5840 case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_m:
5841 case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_m:
5842 case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_m:
5843 case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_m:
5844 case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_m:
5845 case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_m:
5846 case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_m:
5847 case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_m:
5848 case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_m:
5849 case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_m:
5850 case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_m:
5851 case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_m:
5852 case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_m:
5853 case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_m:
5854 case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_m:
5855 case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_m:
5856 case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_m:
5857 case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_m:
5858 case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_m:
5859 case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_m:
5860 case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm_m:
5861 case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm_m:
5862 case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm_m:
5863 case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm_m:
5864 case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_tum:
5865 case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_tum:
5866 case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_tum:
5867 case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_tum:
5868 case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_tum:
5869 case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_tum:
5870 case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_tum:
5871 case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_tum:
5872 case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_tum:
5873 case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_tum:
5874 case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_tum:
5875 case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_tum:
5876 case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_tum:
5877 case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_tumu:
5878 case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_tumu:
5879 case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_tumu:
5880 case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_tumu:
5881 case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_tumu:
5882 case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_tumu:
5883 case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_tumu:
5884 case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_tumu:
5885 case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_tumu:
5886 case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_tumu:
5887 case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_tumu:
5888 case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_tumu:
5889 case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_tumu:
5890 case RISCVVector::BI__builtin_rvv_vfsqrt_v_rm_mu:
5891 case RISCVVector::BI__builtin_rvv_vfrec7_v_rm_mu:
5892 case RISCVVector::BI__builtin_rvv_vfcvt_x_f_v_rm_mu:
5893 case RISCVVector::BI__builtin_rvv_vfcvt_xu_f_v_rm_mu:
5894 case RISCVVector::BI__builtin_rvv_vfcvt_f_x_v_rm_mu:
5895 case RISCVVector::BI__builtin_rvv_vfcvt_f_xu_v_rm_mu:
5896 case RISCVVector::BI__builtin_rvv_vfwcvt_x_f_v_rm_mu:
5897 case RISCVVector::BI__builtin_rvv_vfwcvt_xu_f_v_rm_mu:
5898 case RISCVVector::BI__builtin_rvv_vfncvt_x_f_w_rm_mu:
5899 case RISCVVector::BI__builtin_rvv_vfncvt_xu_f_w_rm_mu:
5900 case RISCVVector::BI__builtin_rvv_vfncvt_f_x_w_rm_mu:
5901 case RISCVVector::BI__builtin_rvv_vfncvt_f_xu_w_rm_mu:
5902 case RISCVVector::BI__builtin_rvv_vfncvt_f_f_w_rm_mu:
5903 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 3, Low: 0, High: 4);
5904 case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_m:
5905 case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_m:
5906 case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_m:
5907 case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_m:
5908 case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_m:
5909 case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_m:
5910 case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_m:
5911 case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_m:
5912 case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_m:
5913 case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_m:
5914 case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_m:
5915 case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_m:
5916 case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_m:
5917 case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_m:
5918 case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_m:
5919 case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_m:
5920 case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_m:
5921 case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_m:
5922 case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_m:
5923 case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_m:
5924 case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_m:
5925 case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_m:
5926 case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_m:
5927 case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_m:
5928 case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tum:
5929 case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tum:
5930 case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_tum:
5931 case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_tum:
5932 case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_tum:
5933 case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_tum:
5934 case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_tum:
5935 case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_tum:
5936 case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_tum:
5937 case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_tum:
5938 case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_tum:
5939 case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_tum:
5940 case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_tum:
5941 case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_tum:
5942 case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_tum:
5943 case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_tum:
5944 case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_tum:
5945 case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_tum:
5946 case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_tum:
5947 case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_tum:
5948 case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_tum:
5949 case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_tum:
5950 case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_tum:
5951 case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_tum:
5952 case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_tum:
5953 case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_tum:
5954 case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_tum:
5955 case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_tum:
5956 case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_tum:
5957 case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_tum:
5958 case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_tum:
5959 case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_tum:
5960 case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_tum:
5961 case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_tum:
5962 case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_tum:
5963 case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_tum:
5964 case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_tum:
5965 case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_tum:
5966 case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_tum:
5967 case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_tum:
5968 case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_tum:
5969 case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_tum:
5970 case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_tum:
5971 case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_tum:
5972 case RISCVVector::BI__builtin_rvv_vfredosum_vs_rm_tum:
5973 case RISCVVector::BI__builtin_rvv_vfredusum_vs_rm_tum:
5974 case RISCVVector::BI__builtin_rvv_vfwredosum_vs_rm_tum:
5975 case RISCVVector::BI__builtin_rvv_vfwredusum_vs_rm_tum:
5976 case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_tumu:
5977 case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_tumu:
5978 case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_tumu:
5979 case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_tumu:
5980 case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_tumu:
5981 case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_tumu:
5982 case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_tumu:
5983 case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_tumu:
5984 case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_tumu:
5985 case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_tumu:
5986 case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_tumu:
5987 case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_tumu:
5988 case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_tumu:
5989 case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_tumu:
5990 case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_tumu:
5991 case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_tumu:
5992 case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_tumu:
5993 case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_tumu:
5994 case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_tumu:
5995 case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_tumu:
5996 case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_tumu:
5997 case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_tumu:
5998 case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_tumu:
5999 case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_tumu:
6000 case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_tumu:
6001 case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_tumu:
6002 case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_tumu:
6003 case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_tumu:
6004 case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_tumu:
6005 case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_tumu:
6006 case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_tumu:
6007 case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_tumu:
6008 case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_tumu:
6009 case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_tumu:
6010 case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_tumu:
6011 case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_tumu:
6012 case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_tumu:
6013 case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_tumu:
6014 case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_tumu:
6015 case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_tumu:
6016 case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_tumu:
6017 case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_tumu:
6018 case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_tumu:
6019 case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_tumu:
6020 case RISCVVector::BI__builtin_rvv_vfadd_vv_rm_mu:
6021 case RISCVVector::BI__builtin_rvv_vfadd_vf_rm_mu:
6022 case RISCVVector::BI__builtin_rvv_vfsub_vv_rm_mu:
6023 case RISCVVector::BI__builtin_rvv_vfsub_vf_rm_mu:
6024 case RISCVVector::BI__builtin_rvv_vfrsub_vf_rm_mu:
6025 case RISCVVector::BI__builtin_rvv_vfwadd_vv_rm_mu:
6026 case RISCVVector::BI__builtin_rvv_vfwadd_vf_rm_mu:
6027 case RISCVVector::BI__builtin_rvv_vfwsub_vv_rm_mu:
6028 case RISCVVector::BI__builtin_rvv_vfwsub_vf_rm_mu:
6029 case RISCVVector::BI__builtin_rvv_vfwadd_wv_rm_mu:
6030 case RISCVVector::BI__builtin_rvv_vfwadd_wf_rm_mu:
6031 case RISCVVector::BI__builtin_rvv_vfwsub_wv_rm_mu:
6032 case RISCVVector::BI__builtin_rvv_vfwsub_wf_rm_mu:
6033 case RISCVVector::BI__builtin_rvv_vfmul_vv_rm_mu:
6034 case RISCVVector::BI__builtin_rvv_vfmul_vf_rm_mu:
6035 case RISCVVector::BI__builtin_rvv_vfdiv_vv_rm_mu:
6036 case RISCVVector::BI__builtin_rvv_vfdiv_vf_rm_mu:
6037 case RISCVVector::BI__builtin_rvv_vfrdiv_vf_rm_mu:
6038 case RISCVVector::BI__builtin_rvv_vfwmul_vv_rm_mu:
6039 case RISCVVector::BI__builtin_rvv_vfwmul_vf_rm_mu:
6040 case RISCVVector::BI__builtin_rvv_vfmacc_vv_rm_mu:
6041 case RISCVVector::BI__builtin_rvv_vfmacc_vf_rm_mu:
6042 case RISCVVector::BI__builtin_rvv_vfnmacc_vv_rm_mu:
6043 case RISCVVector::BI__builtin_rvv_vfnmacc_vf_rm_mu:
6044 case RISCVVector::BI__builtin_rvv_vfmsac_vv_rm_mu:
6045 case RISCVVector::BI__builtin_rvv_vfmsac_vf_rm_mu:
6046 case RISCVVector::BI__builtin_rvv_vfnmsac_vv_rm_mu:
6047 case RISCVVector::BI__builtin_rvv_vfnmsac_vf_rm_mu:
6048 case RISCVVector::BI__builtin_rvv_vfmadd_vv_rm_mu:
6049 case RISCVVector::BI__builtin_rvv_vfmadd_vf_rm_mu:
6050 case RISCVVector::BI__builtin_rvv_vfnmadd_vv_rm_mu:
6051 case RISCVVector::BI__builtin_rvv_vfnmadd_vf_rm_mu:
6052 case RISCVVector::BI__builtin_rvv_vfmsub_vv_rm_mu:
6053 case RISCVVector::BI__builtin_rvv_vfmsub_vf_rm_mu:
6054 case RISCVVector::BI__builtin_rvv_vfnmsub_vv_rm_mu:
6055 case RISCVVector::BI__builtin_rvv_vfnmsub_vf_rm_mu:
6056 case RISCVVector::BI__builtin_rvv_vfwmacc_vv_rm_mu:
6057 case RISCVVector::BI__builtin_rvv_vfwmacc_vf_rm_mu:
6058 case RISCVVector::BI__builtin_rvv_vfwnmacc_vv_rm_mu:
6059 case RISCVVector::BI__builtin_rvv_vfwnmacc_vf_rm_mu:
6060 case RISCVVector::BI__builtin_rvv_vfwmsac_vv_rm_mu:
6061 case RISCVVector::BI__builtin_rvv_vfwmsac_vf_rm_mu:
6062 case RISCVVector::BI__builtin_rvv_vfwnmsac_vv_rm_mu:
6063 case RISCVVector::BI__builtin_rvv_vfwnmsac_vf_rm_mu:
6064 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 4, Low: 0, High: 4);
6065 case RISCV::BI__builtin_riscv_ntl_load:
6066 case RISCV::BI__builtin_riscv_ntl_store:
6067 DeclRefExpr *DRE =
6068 cast<DeclRefExpr>(Val: TheCall->getCallee()->IgnoreParenCasts());
6069 assert((BuiltinID == RISCV::BI__builtin_riscv_ntl_store ||
6070 BuiltinID == RISCV::BI__builtin_riscv_ntl_load) &&
6071 "Unexpected RISC-V nontemporal load/store builtin!");
6072 bool IsStore = BuiltinID == RISCV::BI__builtin_riscv_ntl_store;
6073 unsigned NumArgs = IsStore ? 3 : 2;
6074
6075 if (checkArgCountAtLeast(S&: *this, Call: TheCall, MinArgCount: NumArgs - 1))
6076 return true;
6077
6078 if (checkArgCountAtMost(S&: *this, Call: TheCall, MaxArgCount: NumArgs))
6079 return true;
6080
6081 // Domain value should be compile-time constant.
6082 // 2 <= domain <= 5
6083 if (TheCall->getNumArgs() == NumArgs &&
6084 SemaBuiltinConstantArgRange(TheCall, ArgNum: NumArgs - 1, Low: 2, High: 5))
6085 return true;
6086
6087 Expr *PointerArg = TheCall->getArg(Arg: 0);
6088 ExprResult PointerArgResult =
6089 DefaultFunctionArrayLvalueConversion(E: PointerArg);
6090
6091 if (PointerArgResult.isInvalid())
6092 return true;
6093 PointerArg = PointerArgResult.get();
6094
6095 const PointerType *PtrType = PointerArg->getType()->getAs<PointerType>();
6096 if (!PtrType) {
6097 Diag(DRE->getBeginLoc(), diag::err_nontemporal_builtin_must_be_pointer)
6098 << PointerArg->getType() << PointerArg->getSourceRange();
6099 return true;
6100 }
6101
6102 QualType ValType = PtrType->getPointeeType();
6103 ValType = ValType.getUnqualifiedType();
6104 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
6105 !ValType->isBlockPointerType() && !ValType->isFloatingType() &&
6106 !ValType->isVectorType() && !ValType->isRVVSizelessBuiltinType()) {
6107 Diag(DRE->getBeginLoc(),
6108 diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector)
6109 << PointerArg->getType() << PointerArg->getSourceRange();
6110 return true;
6111 }
6112
6113 if (!IsStore) {
6114 TheCall->setType(ValType);
6115 return false;
6116 }
6117
6118 ExprResult ValArg = TheCall->getArg(Arg: 1);
6119 InitializedEntity Entity = InitializedEntity::InitializeParameter(
6120 Context, Type: ValType, /*consume*/ Consumed: false);
6121 ValArg = PerformCopyInitialization(Entity, EqualLoc: SourceLocation(), Init: ValArg);
6122 if (ValArg.isInvalid())
6123 return true;
6124
6125 TheCall->setArg(Arg: 1, ArgExpr: ValArg.get());
6126 TheCall->setType(Context.VoidTy);
6127 return false;
6128 }
6129
6130 return false;
6131}
6132
6133bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID,
6134 CallExpr *TheCall) {
6135 if (BuiltinID == SystemZ::BI__builtin_tabort) {
6136 Expr *Arg = TheCall->getArg(Arg: 0);
6137 if (std::optional<llvm::APSInt> AbortCode =
6138 Arg->getIntegerConstantExpr(Context))
6139 if (AbortCode->getSExtValue() >= 0 && AbortCode->getSExtValue() < 256)
6140 return Diag(Arg->getBeginLoc(), diag::err_systemz_invalid_tabort_code)
6141 << Arg->getSourceRange();
6142 }
6143
6144 // For intrinsics which take an immediate value as part of the instruction,
6145 // range check them here.
6146 unsigned i = 0, l = 0, u = 0;
6147 switch (BuiltinID) {
6148 default: return false;
6149 case SystemZ::BI__builtin_s390_lcbb: i = 1; l = 0; u = 15; break;
6150 case SystemZ::BI__builtin_s390_verimb:
6151 case SystemZ::BI__builtin_s390_verimh:
6152 case SystemZ::BI__builtin_s390_verimf:
6153 case SystemZ::BI__builtin_s390_verimg: i = 3; l = 0; u = 255; break;
6154 case SystemZ::BI__builtin_s390_vfaeb:
6155 case SystemZ::BI__builtin_s390_vfaeh:
6156 case SystemZ::BI__builtin_s390_vfaef:
6157 case SystemZ::BI__builtin_s390_vfaebs:
6158 case SystemZ::BI__builtin_s390_vfaehs:
6159 case SystemZ::BI__builtin_s390_vfaefs:
6160 case SystemZ::BI__builtin_s390_vfaezb:
6161 case SystemZ::BI__builtin_s390_vfaezh:
6162 case SystemZ::BI__builtin_s390_vfaezf:
6163 case SystemZ::BI__builtin_s390_vfaezbs:
6164 case SystemZ::BI__builtin_s390_vfaezhs:
6165 case SystemZ::BI__builtin_s390_vfaezfs: i = 2; l = 0; u = 15; break;
6166 case SystemZ::BI__builtin_s390_vfisb:
6167 case SystemZ::BI__builtin_s390_vfidb:
6168 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 15) ||
6169 SemaBuiltinConstantArgRange(TheCall, ArgNum: 2, Low: 0, High: 15);
6170 case SystemZ::BI__builtin_s390_vftcisb:
6171 case SystemZ::BI__builtin_s390_vftcidb: i = 1; l = 0; u = 4095; break;
6172 case SystemZ::BI__builtin_s390_vlbb: i = 1; l = 0; u = 15; break;
6173 case SystemZ::BI__builtin_s390_vpdi: i = 2; l = 0; u = 15; break;
6174 case SystemZ::BI__builtin_s390_vsldb: i = 2; l = 0; u = 15; break;
6175 case SystemZ::BI__builtin_s390_vstrcb:
6176 case SystemZ::BI__builtin_s390_vstrch:
6177 case SystemZ::BI__builtin_s390_vstrcf:
6178 case SystemZ::BI__builtin_s390_vstrczb:
6179 case SystemZ::BI__builtin_s390_vstrczh:
6180 case SystemZ::BI__builtin_s390_vstrczf:
6181 case SystemZ::BI__builtin_s390_vstrcbs:
6182 case SystemZ::BI__builtin_s390_vstrchs:
6183 case SystemZ::BI__builtin_s390_vstrcfs:
6184 case SystemZ::BI__builtin_s390_vstrczbs:
6185 case SystemZ::BI__builtin_s390_vstrczhs:
6186 case SystemZ::BI__builtin_s390_vstrczfs: i = 3; l = 0; u = 15; break;
6187 case SystemZ::BI__builtin_s390_vmslg: i = 3; l = 0; u = 15; break;
6188 case SystemZ::BI__builtin_s390_vfminsb:
6189 case SystemZ::BI__builtin_s390_vfmaxsb:
6190 case SystemZ::BI__builtin_s390_vfmindb:
6191 case SystemZ::BI__builtin_s390_vfmaxdb: i = 2; l = 0; u = 15; break;
6192 case SystemZ::BI__builtin_s390_vsld: i = 2; l = 0; u = 7; break;
6193 case SystemZ::BI__builtin_s390_vsrd: i = 2; l = 0; u = 7; break;
6194 case SystemZ::BI__builtin_s390_vclfnhs:
6195 case SystemZ::BI__builtin_s390_vclfnls:
6196 case SystemZ::BI__builtin_s390_vcfn:
6197 case SystemZ::BI__builtin_s390_vcnf: i = 1; l = 0; u = 15; break;
6198 case SystemZ::BI__builtin_s390_vcrnfs: i = 2; l = 0; u = 15; break;
6199 }
6200 return SemaBuiltinConstantArgRange(TheCall, ArgNum: i, Low: l, High: u);
6201}
6202
6203bool Sema::CheckWebAssemblyBuiltinFunctionCall(const TargetInfo &TI,
6204 unsigned BuiltinID,
6205 CallExpr *TheCall) {
6206 switch (BuiltinID) {
6207 case WebAssembly::BI__builtin_wasm_ref_null_extern:
6208 return BuiltinWasmRefNullExtern(TheCall);
6209 case WebAssembly::BI__builtin_wasm_ref_null_func:
6210 return BuiltinWasmRefNullFunc(TheCall);
6211 case WebAssembly::BI__builtin_wasm_table_get:
6212 return BuiltinWasmTableGet(TheCall);
6213 case WebAssembly::BI__builtin_wasm_table_set:
6214 return BuiltinWasmTableSet(TheCall);
6215 case WebAssembly::BI__builtin_wasm_table_size:
6216 return BuiltinWasmTableSize(TheCall);
6217 case WebAssembly::BI__builtin_wasm_table_grow:
6218 return BuiltinWasmTableGrow(TheCall);
6219 case WebAssembly::BI__builtin_wasm_table_fill:
6220 return BuiltinWasmTableFill(TheCall);
6221 case WebAssembly::BI__builtin_wasm_table_copy:
6222 return BuiltinWasmTableCopy(TheCall);
6223 }
6224
6225 return false;
6226}
6227
6228void Sema::checkRVVTypeSupport(QualType Ty, SourceLocation Loc, Decl *D) {
6229 const TargetInfo &TI = Context.getTargetInfo();
6230
6231 ASTContext::BuiltinVectorTypeInfo Info =
6232 Context.getBuiltinVectorTypeInfo(VecTy: Ty->castAs<BuiltinType>());
6233 unsigned EltSize = Context.getTypeSize(Info.ElementType);
6234 unsigned MinElts = Info.EC.getKnownMinValue();
6235
6236 // (ELEN, LMUL) pairs of (8, mf8), (16, mf4), (32, mf2), (64, m1) requires at
6237 // least zve64x
6238 if (((EltSize == 64 && Info.ElementType->isIntegerType()) || MinElts == 1) &&
6239 !TI.hasFeature("zve64x"))
6240 Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve64x";
6241 else if (Info.ElementType->isFloat16Type() && !TI.hasFeature("zvfh") &&
6242 !TI.hasFeature("zvfhmin"))
6243 Diag(Loc, diag::err_riscv_type_requires_extension, D)
6244 << Ty << "zvfh or zvfhmin";
6245 else if (Info.ElementType->isBFloat16Type() &&
6246 !TI.hasFeature("experimental-zvfbfmin"))
6247 Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zvfbfmin";
6248 else if (Info.ElementType->isSpecificBuiltinType(BuiltinType::Float) &&
6249 !TI.hasFeature("zve32f"))
6250 Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve32f";
6251 else if (Info.ElementType->isSpecificBuiltinType(BuiltinType::Double) &&
6252 !TI.hasFeature("zve64d"))
6253 Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve64d";
6254 // Given that caller already checked isRVVType() before calling this function,
6255 // if we don't have at least zve32x supported, then we need to emit error.
6256 else if (!TI.hasFeature("zve32x"))
6257 Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zve32x";
6258}
6259
6260bool Sema::CheckNVPTXBuiltinFunctionCall(const TargetInfo &TI,
6261 unsigned BuiltinID,
6262 CallExpr *TheCall) {
6263 switch (BuiltinID) {
6264 case NVPTX::BI__nvvm_cp_async_ca_shared_global_4:
6265 case NVPTX::BI__nvvm_cp_async_ca_shared_global_8:
6266 case NVPTX::BI__nvvm_cp_async_ca_shared_global_16:
6267 case NVPTX::BI__nvvm_cp_async_cg_shared_global_16:
6268 return checkArgCountAtMost(S&: *this, Call: TheCall, MaxArgCount: 3);
6269 }
6270
6271 return false;
6272}
6273
6274// Check if the rounding mode is legal.
6275bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) {
6276 // Indicates if this instruction has rounding control or just SAE.
6277 bool HasRC = false;
6278
6279 unsigned ArgNum = 0;
6280 switch (BuiltinID) {
6281 default:
6282 return false;
6283 case X86::BI__builtin_ia32_vcvttsd2si32:
6284 case X86::BI__builtin_ia32_vcvttsd2si64:
6285 case X86::BI__builtin_ia32_vcvttsd2usi32:
6286 case X86::BI__builtin_ia32_vcvttsd2usi64:
6287 case X86::BI__builtin_ia32_vcvttss2si32:
6288 case X86::BI__builtin_ia32_vcvttss2si64:
6289 case X86::BI__builtin_ia32_vcvttss2usi32:
6290 case X86::BI__builtin_ia32_vcvttss2usi64:
6291 case X86::BI__builtin_ia32_vcvttsh2si32:
6292 case X86::BI__builtin_ia32_vcvttsh2si64:
6293 case X86::BI__builtin_ia32_vcvttsh2usi32:
6294 case X86::BI__builtin_ia32_vcvttsh2usi64:
6295 ArgNum = 1;
6296 break;
6297 case X86::BI__builtin_ia32_maxpd512:
6298 case X86::BI__builtin_ia32_maxps512:
6299 case X86::BI__builtin_ia32_minpd512:
6300 case X86::BI__builtin_ia32_minps512:
6301 case X86::BI__builtin_ia32_maxph512:
6302 case X86::BI__builtin_ia32_minph512:
6303 ArgNum = 2;
6304 break;
6305 case X86::BI__builtin_ia32_vcvtph2pd512_mask:
6306 case X86::BI__builtin_ia32_vcvtph2psx512_mask:
6307 case X86::BI__builtin_ia32_cvtps2pd512_mask:
6308 case X86::BI__builtin_ia32_cvttpd2dq512_mask:
6309 case X86::BI__builtin_ia32_cvttpd2qq512_mask:
6310 case X86::BI__builtin_ia32_cvttpd2udq512_mask:
6311 case X86::BI__builtin_ia32_cvttpd2uqq512_mask:
6312 case X86::BI__builtin_ia32_cvttps2dq512_mask:
6313 case X86::BI__builtin_ia32_cvttps2qq512_mask:
6314 case X86::BI__builtin_ia32_cvttps2udq512_mask:
6315 case X86::BI__builtin_ia32_cvttps2uqq512_mask:
6316 case X86::BI__builtin_ia32_vcvttph2w512_mask:
6317 case X86::BI__builtin_ia32_vcvttph2uw512_mask:
6318 case X86::BI__builtin_ia32_vcvttph2dq512_mask:
6319 case X86::BI__builtin_ia32_vcvttph2udq512_mask:
6320 case X86::BI__builtin_ia32_vcvttph2qq512_mask:
6321 case X86::BI__builtin_ia32_vcvttph2uqq512_mask:
6322 case X86::BI__builtin_ia32_exp2pd_mask:
6323 case X86::BI__builtin_ia32_exp2ps_mask:
6324 case X86::BI__builtin_ia32_getexppd512_mask:
6325 case X86::BI__builtin_ia32_getexpps512_mask:
6326 case X86::BI__builtin_ia32_getexpph512_mask:
6327 case X86::BI__builtin_ia32_rcp28pd_mask:
6328 case X86::BI__builtin_ia32_rcp28ps_mask:
6329 case X86::BI__builtin_ia32_rsqrt28pd_mask:
6330 case X86::BI__builtin_ia32_rsqrt28ps_mask:
6331 case X86::BI__builtin_ia32_vcomisd:
6332 case X86::BI__builtin_ia32_vcomiss:
6333 case X86::BI__builtin_ia32_vcomish:
6334 case X86::BI__builtin_ia32_vcvtph2ps512_mask:
6335 ArgNum = 3;
6336 break;
6337 case X86::BI__builtin_ia32_cmppd512_mask:
6338 case X86::BI__builtin_ia32_cmpps512_mask:
6339 case X86::BI__builtin_ia32_cmpsd_mask:
6340 case X86::BI__builtin_ia32_cmpss_mask:
6341 case X86::BI__builtin_ia32_cmpsh_mask:
6342 case X86::BI__builtin_ia32_vcvtsh2sd_round_mask:
6343 case X86::BI__builtin_ia32_vcvtsh2ss_round_mask:
6344 case X86::BI__builtin_ia32_cvtss2sd_round_mask:
6345 case X86::BI__builtin_ia32_getexpsd128_round_mask:
6346 case X86::BI__builtin_ia32_getexpss128_round_mask:
6347 case X86::BI__builtin_ia32_getexpsh128_round_mask:
6348 case X86::BI__builtin_ia32_getmantpd512_mask:
6349 case X86::BI__builtin_ia32_getmantps512_mask:
6350 case X86::BI__builtin_ia32_getmantph512_mask:
6351 case X86::BI__builtin_ia32_maxsd_round_mask:
6352 case X86::BI__builtin_ia32_maxss_round_mask:
6353 case X86::BI__builtin_ia32_maxsh_round_mask:
6354 case X86::BI__builtin_ia32_minsd_round_mask:
6355 case X86::BI__builtin_ia32_minss_round_mask:
6356 case X86::BI__builtin_ia32_minsh_round_mask:
6357 case X86::BI__builtin_ia32_rcp28sd_round_mask:
6358 case X86::BI__builtin_ia32_rcp28ss_round_mask:
6359 case X86::BI__builtin_ia32_reducepd512_mask:
6360 case X86::BI__builtin_ia32_reduceps512_mask:
6361 case X86::BI__builtin_ia32_reduceph512_mask:
6362 case X86::BI__builtin_ia32_rndscalepd_mask:
6363 case X86::BI__builtin_ia32_rndscaleps_mask:
6364 case X86::BI__builtin_ia32_rndscaleph_mask:
6365 case X86::BI__builtin_ia32_rsqrt28sd_round_mask:
6366 case X86::BI__builtin_ia32_rsqrt28ss_round_mask:
6367 ArgNum = 4;
6368 break;
6369 case X86::BI__builtin_ia32_fixupimmpd512_mask:
6370 case X86::BI__builtin_ia32_fixupimmpd512_maskz:
6371 case X86::BI__builtin_ia32_fixupimmps512_mask:
6372 case X86::BI__builtin_ia32_fixupimmps512_maskz:
6373 case X86::BI__builtin_ia32_fixupimmsd_mask:
6374 case X86::BI__builtin_ia32_fixupimmsd_maskz:
6375 case X86::BI__builtin_ia32_fixupimmss_mask:
6376 case X86::BI__builtin_ia32_fixupimmss_maskz:
6377 case X86::BI__builtin_ia32_getmantsd_round_mask:
6378 case X86::BI__builtin_ia32_getmantss_round_mask:
6379 case X86::BI__builtin_ia32_getmantsh_round_mask:
6380 case X86::BI__builtin_ia32_rangepd512_mask:
6381 case X86::BI__builtin_ia32_rangeps512_mask:
6382 case X86::BI__builtin_ia32_rangesd128_round_mask:
6383 case X86::BI__builtin_ia32_rangess128_round_mask:
6384 case X86::BI__builtin_ia32_reducesd_mask:
6385 case X86::BI__builtin_ia32_reducess_mask:
6386 case X86::BI__builtin_ia32_reducesh_mask:
6387 case X86::BI__builtin_ia32_rndscalesd_round_mask:
6388 case X86::BI__builtin_ia32_rndscaless_round_mask:
6389 case X86::BI__builtin_ia32_rndscalesh_round_mask:
6390 ArgNum = 5;
6391 break;
6392 case X86::BI__builtin_ia32_vcvtsd2si64:
6393 case X86::BI__builtin_ia32_vcvtsd2si32:
6394 case X86::BI__builtin_ia32_vcvtsd2usi32:
6395 case X86::BI__builtin_ia32_vcvtsd2usi64:
6396 case X86::BI__builtin_ia32_vcvtss2si32:
6397 case X86::BI__builtin_ia32_vcvtss2si64:
6398 case X86::BI__builtin_ia32_vcvtss2usi32:
6399 case X86::BI__builtin_ia32_vcvtss2usi64:
6400 case X86::BI__builtin_ia32_vcvtsh2si32:
6401 case X86::BI__builtin_ia32_vcvtsh2si64:
6402 case X86::BI__builtin_ia32_vcvtsh2usi32:
6403 case X86::BI__builtin_ia32_vcvtsh2usi64:
6404 case X86::BI__builtin_ia32_sqrtpd512:
6405 case X86::BI__builtin_ia32_sqrtps512:
6406 case X86::BI__builtin_ia32_sqrtph512:
6407 ArgNum = 1;
6408 HasRC = true;
6409 break;
6410 case X86::BI__builtin_ia32_addph512:
6411 case X86::BI__builtin_ia32_divph512:
6412 case X86::BI__builtin_ia32_mulph512:
6413 case X86::BI__builtin_ia32_subph512:
6414 case X86::BI__builtin_ia32_addpd512:
6415 case X86::BI__builtin_ia32_addps512:
6416 case X86::BI__builtin_ia32_divpd512:
6417 case X86::BI__builtin_ia32_divps512:
6418 case X86::BI__builtin_ia32_mulpd512:
6419 case X86::BI__builtin_ia32_mulps512:
6420 case X86::BI__builtin_ia32_subpd512:
6421 case X86::BI__builtin_ia32_subps512:
6422 case X86::BI__builtin_ia32_cvtsi2sd64:
6423 case X86::BI__builtin_ia32_cvtsi2ss32:
6424 case X86::BI__builtin_ia32_cvtsi2ss64:
6425 case X86::BI__builtin_ia32_cvtusi2sd64:
6426 case X86::BI__builtin_ia32_cvtusi2ss32:
6427 case X86::BI__builtin_ia32_cvtusi2ss64:
6428 case X86::BI__builtin_ia32_vcvtusi2sh:
6429 case X86::BI__builtin_ia32_vcvtusi642sh:
6430 case X86::BI__builtin_ia32_vcvtsi2sh:
6431 case X86::BI__builtin_ia32_vcvtsi642sh:
6432 ArgNum = 2;
6433 HasRC = true;
6434 break;
6435 case X86::BI__builtin_ia32_cvtdq2ps512_mask:
6436 case X86::BI__builtin_ia32_cvtudq2ps512_mask:
6437 case X86::BI__builtin_ia32_vcvtpd2ph512_mask:
6438 case X86::BI__builtin_ia32_vcvtps2phx512_mask:
6439 case X86::BI__builtin_ia32_cvtpd2ps512_mask:
6440 case X86::BI__builtin_ia32_cvtpd2dq512_mask:
6441 case X86::BI__builtin_ia32_cvtpd2qq512_mask:
6442 case X86::BI__builtin_ia32_cvtpd2udq512_mask:
6443 case X86::BI__builtin_ia32_cvtpd2uqq512_mask:
6444 case X86::BI__builtin_ia32_cvtps2dq512_mask:
6445 case X86::BI__builtin_ia32_cvtps2qq512_mask:
6446 case X86::BI__builtin_ia32_cvtps2udq512_mask:
6447 case X86::BI__builtin_ia32_cvtps2uqq512_mask:
6448 case X86::BI__builtin_ia32_cvtqq2pd512_mask:
6449 case X86::BI__builtin_ia32_cvtqq2ps512_mask:
6450 case X86::BI__builtin_ia32_cvtuqq2pd512_mask:
6451 case X86::BI__builtin_ia32_cvtuqq2ps512_mask:
6452 case X86::BI__builtin_ia32_vcvtdq2ph512_mask:
6453 case X86::BI__builtin_ia32_vcvtudq2ph512_mask:
6454 case X86::BI__builtin_ia32_vcvtw2ph512_mask:
6455 case X86::BI__builtin_ia32_vcvtuw2ph512_mask:
6456 case X86::BI__builtin_ia32_vcvtph2w512_mask:
6457 case X86::BI__builtin_ia32_vcvtph2uw512_mask:
6458 case X86::BI__builtin_ia32_vcvtph2dq512_mask:
6459 case X86::BI__builtin_ia32_vcvtph2udq512_mask:
6460 case X86::BI__builtin_ia32_vcvtph2qq512_mask:
6461 case X86::BI__builtin_ia32_vcvtph2uqq512_mask:
6462 case X86::BI__builtin_ia32_vcvtqq2ph512_mask:
6463 case X86::BI__builtin_ia32_vcvtuqq2ph512_mask:
6464 ArgNum = 3;
6465 HasRC = true;
6466 break;
6467 case X86::BI__builtin_ia32_addsh_round_mask:
6468 case X86::BI__builtin_ia32_addss_round_mask:
6469 case X86::BI__builtin_ia32_addsd_round_mask:
6470 case X86::BI__builtin_ia32_divsh_round_mask:
6471 case X86::BI__builtin_ia32_divss_round_mask:
6472 case X86::BI__builtin_ia32_divsd_round_mask:
6473 case X86::BI__builtin_ia32_mulsh_round_mask:
6474 case X86::BI__builtin_ia32_mulss_round_mask:
6475 case X86::BI__builtin_ia32_mulsd_round_mask:
6476 case X86::BI__builtin_ia32_subsh_round_mask:
6477 case X86::BI__builtin_ia32_subss_round_mask:
6478 case X86::BI__builtin_ia32_subsd_round_mask:
6479 case X86::BI__builtin_ia32_scalefph512_mask:
6480 case X86::BI__builtin_ia32_scalefpd512_mask:
6481 case X86::BI__builtin_ia32_scalefps512_mask:
6482 case X86::BI__builtin_ia32_scalefsd_round_mask:
6483 case X86::BI__builtin_ia32_scalefss_round_mask:
6484 case X86::BI__builtin_ia32_scalefsh_round_mask:
6485 case X86::BI__builtin_ia32_cvtsd2ss_round_mask:
6486 case X86::BI__builtin_ia32_vcvtss2sh_round_mask:
6487 case X86::BI__builtin_ia32_vcvtsd2sh_round_mask:
6488 case X86::BI__builtin_ia32_sqrtsd_round_mask:
6489 case X86::BI__builtin_ia32_sqrtss_round_mask:
6490 case X86::BI__builtin_ia32_sqrtsh_round_mask:
6491 case X86::BI__builtin_ia32_vfmaddsd3_mask:
6492 case X86::BI__builtin_ia32_vfmaddsd3_maskz:
6493 case X86::BI__builtin_ia32_vfmaddsd3_mask3:
6494 case X86::BI__builtin_ia32_vfmaddss3_mask:
6495 case X86::BI__builtin_ia32_vfmaddss3_maskz:
6496 case X86::BI__builtin_ia32_vfmaddss3_mask3:
6497 case X86::BI__builtin_ia32_vfmaddsh3_mask:
6498 case X86::BI__builtin_ia32_vfmaddsh3_maskz:
6499 case X86::BI__builtin_ia32_vfmaddsh3_mask3:
6500 case X86::BI__builtin_ia32_vfmaddpd512_mask:
6501 case X86::BI__builtin_ia32_vfmaddpd512_maskz:
6502 case X86::BI__builtin_ia32_vfmaddpd512_mask3:
6503 case X86::BI__builtin_ia32_vfmsubpd512_mask3:
6504 case X86::BI__builtin_ia32_vfmaddps512_mask:
6505 case X86::BI__builtin_ia32_vfmaddps512_maskz:
6506 case X86::BI__builtin_ia32_vfmaddps512_mask3:
6507 case X86::BI__builtin_ia32_vfmsubps512_mask3:
6508 case X86::BI__builtin_ia32_vfmaddph512_mask:
6509 case X86::BI__builtin_ia32_vfmaddph512_maskz:
6510 case X86::BI__builtin_ia32_vfmaddph512_mask3:
6511 case X86::BI__builtin_ia32_vfmsubph512_mask3:
6512 case X86::BI__builtin_ia32_vfmaddsubpd512_mask:
6513 case X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
6514 case X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
6515 case X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
6516 case X86::BI__builtin_ia32_vfmaddsubps512_mask:
6517 case X86::BI__builtin_ia32_vfmaddsubps512_maskz:
6518 case X86::BI__builtin_ia32_vfmaddsubps512_mask3:
6519 case X86::BI__builtin_ia32_vfmsubaddps512_mask3:
6520 case X86::BI__builtin_ia32_vfmaddsubph512_mask:
6521 case X86::BI__builtin_ia32_vfmaddsubph512_maskz:
6522 case X86::BI__builtin_ia32_vfmaddsubph512_mask3:
6523 case X86::BI__builtin_ia32_vfmsubaddph512_mask3:
6524 case X86::BI__builtin_ia32_vfmaddcsh_mask:
6525 case X86::BI__builtin_ia32_vfmaddcsh_round_mask:
6526 case X86::BI__builtin_ia32_vfmaddcsh_round_mask3:
6527 case X86::BI__builtin_ia32_vfmaddcph512_mask:
6528 case X86::BI__builtin_ia32_vfmaddcph512_maskz:
6529 case X86::BI__builtin_ia32_vfmaddcph512_mask3:
6530 case X86::BI__builtin_ia32_vfcmaddcsh_mask:
6531 case X86::BI__builtin_ia32_vfcmaddcsh_round_mask:
6532 case X86::BI__builtin_ia32_vfcmaddcsh_round_mask3:
6533 case X86::BI__builtin_ia32_vfcmaddcph512_mask:
6534 case X86::BI__builtin_ia32_vfcmaddcph512_maskz:
6535 case X86::BI__builtin_ia32_vfcmaddcph512_mask3:
6536 case X86::BI__builtin_ia32_vfmulcsh_mask:
6537 case X86::BI__builtin_ia32_vfmulcph512_mask:
6538 case X86::BI__builtin_ia32_vfcmulcsh_mask:
6539 case X86::BI__builtin_ia32_vfcmulcph512_mask:
6540 ArgNum = 4;
6541 HasRC = true;
6542 break;
6543 }
6544
6545 llvm::APSInt Result;
6546
6547 // We can't check the value of a dependent argument.
6548 Expr *Arg = TheCall->getArg(Arg: ArgNum);
6549 if (Arg->isTypeDependent() || Arg->isValueDependent())
6550 return false;
6551
6552 // Check constant-ness first.
6553 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
6554 return true;
6555
6556 // Make sure rounding mode is either ROUND_CUR_DIRECTION or ROUND_NO_EXC bit
6557 // is set. If the intrinsic has rounding control(bits 1:0), make sure its only
6558 // combined with ROUND_NO_EXC. If the intrinsic does not have rounding
6559 // control, allow ROUND_NO_EXC and ROUND_CUR_DIRECTION together.
6560 if (Result == 4/*ROUND_CUR_DIRECTION*/ ||
6561 Result == 8/*ROUND_NO_EXC*/ ||
6562 (!HasRC && Result == 12/*ROUND_CUR_DIRECTION|ROUND_NO_EXC*/) ||
6563 (HasRC && Result.getZExtValue() >= 8 && Result.getZExtValue() <= 11))
6564 return false;
6565
6566 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_rounding)
6567 << Arg->getSourceRange();
6568}
6569
6570// Check if the gather/scatter scale is legal.
6571bool Sema::CheckX86BuiltinGatherScatterScale(unsigned BuiltinID,
6572 CallExpr *TheCall) {
6573 unsigned ArgNum = 0;
6574 switch (BuiltinID) {
6575 default:
6576 return false;
6577 case X86::BI__builtin_ia32_gatherpfdpd:
6578 case X86::BI__builtin_ia32_gatherpfdps:
6579 case X86::BI__builtin_ia32_gatherpfqpd:
6580 case X86::BI__builtin_ia32_gatherpfqps:
6581 case X86::BI__builtin_ia32_scatterpfdpd:
6582 case X86::BI__builtin_ia32_scatterpfdps:
6583 case X86::BI__builtin_ia32_scatterpfqpd:
6584 case X86::BI__builtin_ia32_scatterpfqps:
6585 ArgNum = 3;
6586 break;
6587 case X86::BI__builtin_ia32_gatherd_pd:
6588 case X86::BI__builtin_ia32_gatherd_pd256:
6589 case X86::BI__builtin_ia32_gatherq_pd:
6590 case X86::BI__builtin_ia32_gatherq_pd256:
6591 case X86::BI__builtin_ia32_gatherd_ps:
6592 case X86::BI__builtin_ia32_gatherd_ps256:
6593 case X86::BI__builtin_ia32_gatherq_ps:
6594 case X86::BI__builtin_ia32_gatherq_ps256:
6595 case X86::BI__builtin_ia32_gatherd_q:
6596 case X86::BI__builtin_ia32_gatherd_q256:
6597 case X86::BI__builtin_ia32_gatherq_q:
6598 case X86::BI__builtin_ia32_gatherq_q256:
6599 case X86::BI__builtin_ia32_gatherd_d:
6600 case X86::BI__builtin_ia32_gatherd_d256:
6601 case X86::BI__builtin_ia32_gatherq_d:
6602 case X86::BI__builtin_ia32_gatherq_d256:
6603 case X86::BI__builtin_ia32_gather3div2df:
6604 case X86::BI__builtin_ia32_gather3div2di:
6605 case X86::BI__builtin_ia32_gather3div4df:
6606 case X86::BI__builtin_ia32_gather3div4di:
6607 case X86::BI__builtin_ia32_gather3div4sf:
6608 case X86::BI__builtin_ia32_gather3div4si:
6609 case X86::BI__builtin_ia32_gather3div8sf:
6610 case X86::BI__builtin_ia32_gather3div8si:
6611 case X86::BI__builtin_ia32_gather3siv2df:
6612 case X86::BI__builtin_ia32_gather3siv2di:
6613 case X86::BI__builtin_ia32_gather3siv4df:
6614 case X86::BI__builtin_ia32_gather3siv4di:
6615 case X86::BI__builtin_ia32_gather3siv4sf:
6616 case X86::BI__builtin_ia32_gather3siv4si:
6617 case X86::BI__builtin_ia32_gather3siv8sf:
6618 case X86::BI__builtin_ia32_gather3siv8si:
6619 case X86::BI__builtin_ia32_gathersiv8df:
6620 case X86::BI__builtin_ia32_gathersiv16sf:
6621 case X86::BI__builtin_ia32_gatherdiv8df:
6622 case X86::BI__builtin_ia32_gatherdiv16sf:
6623 case X86::BI__builtin_ia32_gathersiv8di:
6624 case X86::BI__builtin_ia32_gathersiv16si:
6625 case X86::BI__builtin_ia32_gatherdiv8di:
6626 case X86::BI__builtin_ia32_gatherdiv16si:
6627 case X86::BI__builtin_ia32_scatterdiv2df:
6628 case X86::BI__builtin_ia32_scatterdiv2di:
6629 case X86::BI__builtin_ia32_scatterdiv4df:
6630 case X86::BI__builtin_ia32_scatterdiv4di:
6631 case X86::BI__builtin_ia32_scatterdiv4sf:
6632 case X86::BI__builtin_ia32_scatterdiv4si:
6633 case X86::BI__builtin_ia32_scatterdiv8sf:
6634 case X86::BI__builtin_ia32_scatterdiv8si:
6635 case X86::BI__builtin_ia32_scattersiv2df:
6636 case X86::BI__builtin_ia32_scattersiv2di:
6637 case X86::BI__builtin_ia32_scattersiv4df:
6638 case X86::BI__builtin_ia32_scattersiv4di:
6639 case X86::BI__builtin_ia32_scattersiv4sf:
6640 case X86::BI__builtin_ia32_scattersiv4si:
6641 case X86::BI__builtin_ia32_scattersiv8sf:
6642 case X86::BI__builtin_ia32_scattersiv8si:
6643 case X86::BI__builtin_ia32_scattersiv8df:
6644 case X86::BI__builtin_ia32_scattersiv16sf:
6645 case X86::BI__builtin_ia32_scatterdiv8df:
6646 case X86::BI__builtin_ia32_scatterdiv16sf:
6647 case X86::BI__builtin_ia32_scattersiv8di:
6648 case X86::BI__builtin_ia32_scattersiv16si:
6649 case X86::BI__builtin_ia32_scatterdiv8di:
6650 case X86::BI__builtin_ia32_scatterdiv16si:
6651 ArgNum = 4;
6652 break;
6653 }
6654
6655 llvm::APSInt Result;
6656
6657 // We can't check the value of a dependent argument.
6658 Expr *Arg = TheCall->getArg(Arg: ArgNum);
6659 if (Arg->isTypeDependent() || Arg->isValueDependent())
6660 return false;
6661
6662 // Check constant-ness first.
6663 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
6664 return true;
6665
6666 if (Result == 1 || Result == 2 || Result == 4 || Result == 8)
6667 return false;
6668
6669 return Diag(TheCall->getBeginLoc(), diag::err_x86_builtin_invalid_scale)
6670 << Arg->getSourceRange();
6671}
6672
6673enum { TileRegLow = 0, TileRegHigh = 7 };
6674
6675bool Sema::CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall,
6676 ArrayRef<int> ArgNums) {
6677 for (int ArgNum : ArgNums) {
6678 if (SemaBuiltinConstantArgRange(TheCall, ArgNum, Low: TileRegLow, High: TileRegHigh))
6679 return true;
6680 }
6681 return false;
6682}
6683
6684bool Sema::CheckX86BuiltinTileDuplicate(CallExpr *TheCall,
6685 ArrayRef<int> ArgNums) {
6686 // Because the max number of tile register is TileRegHigh + 1, so here we use
6687 // each bit to represent the usage of them in bitset.
6688 std::bitset<TileRegHigh + 1> ArgValues;
6689 for (int ArgNum : ArgNums) {
6690 Expr *Arg = TheCall->getArg(Arg: ArgNum);
6691 if (Arg->isTypeDependent() || Arg->isValueDependent())
6692 continue;
6693
6694 llvm::APSInt Result;
6695 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
6696 return true;
6697 int ArgExtValue = Result.getExtValue();
6698 assert((ArgExtValue >= TileRegLow && ArgExtValue <= TileRegHigh) &&
6699 "Incorrect tile register num.");
6700 if (ArgValues.test(ArgExtValue))
6701 return Diag(TheCall->getBeginLoc(),
6702 diag::err_x86_builtin_tile_arg_duplicate)
6703 << TheCall->getArg(ArgNum)->getSourceRange();
6704 ArgValues.set(position: ArgExtValue);
6705 }
6706 return false;
6707}
6708
6709bool Sema::CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall,
6710 ArrayRef<int> ArgNums) {
6711 return CheckX86BuiltinTileArgumentsRange(TheCall, ArgNums) ||
6712 CheckX86BuiltinTileDuplicate(TheCall, ArgNums);
6713}
6714
6715bool Sema::CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall) {
6716 switch (BuiltinID) {
6717 default:
6718 return false;
6719 case X86::BI__builtin_ia32_tileloadd64:
6720 case X86::BI__builtin_ia32_tileloaddt164:
6721 case X86::BI__builtin_ia32_tilestored64:
6722 case X86::BI__builtin_ia32_tilezero:
6723 return CheckX86BuiltinTileArgumentsRange(TheCall, ArgNums: 0);
6724 case X86::BI__builtin_ia32_tdpbssd:
6725 case X86::BI__builtin_ia32_tdpbsud:
6726 case X86::BI__builtin_ia32_tdpbusd:
6727 case X86::BI__builtin_ia32_tdpbuud:
6728 case X86::BI__builtin_ia32_tdpbf16ps:
6729 case X86::BI__builtin_ia32_tdpfp16ps:
6730 case X86::BI__builtin_ia32_tcmmimfp16ps:
6731 case X86::BI__builtin_ia32_tcmmrlfp16ps:
6732 return CheckX86BuiltinTileRangeAndDuplicate(TheCall, ArgNums: {0, 1, 2});
6733 }
6734}
6735static bool isX86_32Builtin(unsigned BuiltinID) {
6736 // These builtins only work on x86-32 targets.
6737 switch (BuiltinID) {
6738 case X86::BI__builtin_ia32_readeflags_u32:
6739 case X86::BI__builtin_ia32_writeeflags_u32:
6740 return true;
6741 }
6742
6743 return false;
6744}
6745
6746bool Sema::CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
6747 CallExpr *TheCall) {
6748 // Check for 32-bit only builtins on a 64-bit target.
6749 const llvm::Triple &TT = TI.getTriple();
6750 if (TT.getArch() != llvm::Triple::x86 && isX86_32Builtin(BuiltinID))
6751 return Diag(TheCall->getCallee()->getBeginLoc(),
6752 diag::err_32_bit_builtin_64_bit_tgt);
6753
6754 // If the intrinsic has rounding or SAE make sure its valid.
6755 if (CheckX86BuiltinRoundingOrSAE(BuiltinID, TheCall))
6756 return true;
6757
6758 // If the intrinsic has a gather/scatter scale immediate make sure its valid.
6759 if (CheckX86BuiltinGatherScatterScale(BuiltinID, TheCall))
6760 return true;
6761
6762 // If the intrinsic has a tile arguments, make sure they are valid.
6763 if (CheckX86BuiltinTileArguments(BuiltinID, TheCall))
6764 return true;
6765
6766 // For intrinsics which take an immediate value as part of the instruction,
6767 // range check them here.
6768 int i = 0, l = 0, u = 0;
6769 switch (BuiltinID) {
6770 default:
6771 return false;
6772 case X86::BI__builtin_ia32_vec_ext_v2si:
6773 case X86::BI__builtin_ia32_vec_ext_v2di:
6774 case X86::BI__builtin_ia32_vextractf128_pd256:
6775 case X86::BI__builtin_ia32_vextractf128_ps256:
6776 case X86::BI__builtin_ia32_vextractf128_si256:
6777 case X86::BI__builtin_ia32_extract128i256:
6778 case X86::BI__builtin_ia32_extractf64x4_mask:
6779 case X86::BI__builtin_ia32_extracti64x4_mask:
6780 case X86::BI__builtin_ia32_extractf32x8_mask:
6781 case X86::BI__builtin_ia32_extracti32x8_mask:
6782 case X86::BI__builtin_ia32_extractf64x2_256_mask:
6783 case X86::BI__builtin_ia32_extracti64x2_256_mask:
6784 case X86::BI__builtin_ia32_extractf32x4_256_mask:
6785 case X86::BI__builtin_ia32_extracti32x4_256_mask:
6786 i = 1; l = 0; u = 1;
6787 break;
6788 case X86::BI__builtin_ia32_vec_set_v2di:
6789 case X86::BI__builtin_ia32_vinsertf128_pd256:
6790 case X86::BI__builtin_ia32_vinsertf128_ps256:
6791 case X86::BI__builtin_ia32_vinsertf128_si256:
6792 case X86::BI__builtin_ia32_insert128i256:
6793 case X86::BI__builtin_ia32_insertf32x8:
6794 case X86::BI__builtin_ia32_inserti32x8:
6795 case X86::BI__builtin_ia32_insertf64x4:
6796 case X86::BI__builtin_ia32_inserti64x4:
6797 case X86::BI__builtin_ia32_insertf64x2_256:
6798 case X86::BI__builtin_ia32_inserti64x2_256:
6799 case X86::BI__builtin_ia32_insertf32x4_256:
6800 case X86::BI__builtin_ia32_inserti32x4_256:
6801 i = 2; l = 0; u = 1;
6802 break;
6803 case X86::BI__builtin_ia32_vpermilpd:
6804 case X86::BI__builtin_ia32_vec_ext_v4hi:
6805 case X86::BI__builtin_ia32_vec_ext_v4si:
6806 case X86::BI__builtin_ia32_vec_ext_v4sf:
6807 case X86::BI__builtin_ia32_vec_ext_v4di:
6808 case X86::BI__builtin_ia32_extractf32x4_mask:
6809 case X86::BI__builtin_ia32_extracti32x4_mask:
6810 case X86::BI__builtin_ia32_extractf64x2_512_mask:
6811 case X86::BI__builtin_ia32_extracti64x2_512_mask:
6812 i = 1; l = 0; u = 3;
6813 break;
6814 case X86::BI_mm_prefetch:
6815 case X86::BI__builtin_ia32_vec_ext_v8hi:
6816 case X86::BI__builtin_ia32_vec_ext_v8si:
6817 i = 1; l = 0; u = 7;
6818 break;
6819 case X86::BI__builtin_ia32_sha1rnds4:
6820 case X86::BI__builtin_ia32_blendpd:
6821 case X86::BI__builtin_ia32_shufpd:
6822 case X86::BI__builtin_ia32_vec_set_v4hi:
6823 case X86::BI__builtin_ia32_vec_set_v4si:
6824 case X86::BI__builtin_ia32_vec_set_v4di:
6825 case X86::BI__builtin_ia32_shuf_f32x4_256:
6826 case X86::BI__builtin_ia32_shuf_f64x2_256:
6827 case X86::BI__builtin_ia32_shuf_i32x4_256:
6828 case X86::BI__builtin_ia32_shuf_i64x2_256:
6829 case X86::BI__builtin_ia32_insertf64x2_512:
6830 case X86::BI__builtin_ia32_inserti64x2_512:
6831 case X86::BI__builtin_ia32_insertf32x4:
6832 case X86::BI__builtin_ia32_inserti32x4:
6833 i = 2; l = 0; u = 3;
6834 break;
6835 case X86::BI__builtin_ia32_vpermil2pd:
6836 case X86::BI__builtin_ia32_vpermil2pd256:
6837 case X86::BI__builtin_ia32_vpermil2ps:
6838 case X86::BI__builtin_ia32_vpermil2ps256:
6839 i = 3; l = 0; u = 3;
6840 break;
6841 case X86::BI__builtin_ia32_cmpb128_mask:
6842 case X86::BI__builtin_ia32_cmpw128_mask:
6843 case X86::BI__builtin_ia32_cmpd128_mask:
6844 case X86::BI__builtin_ia32_cmpq128_mask:
6845 case X86::BI__builtin_ia32_cmpb256_mask:
6846 case X86::BI__builtin_ia32_cmpw256_mask:
6847 case X86::BI__builtin_ia32_cmpd256_mask:
6848 case X86::BI__builtin_ia32_cmpq256_mask:
6849 case X86::BI__builtin_ia32_cmpb512_mask:
6850 case X86::BI__builtin_ia32_cmpw512_mask:
6851 case X86::BI__builtin_ia32_cmpd512_mask:
6852 case X86::BI__builtin_ia32_cmpq512_mask:
6853 case X86::BI__builtin_ia32_ucmpb128_mask:
6854 case X86::BI__builtin_ia32_ucmpw128_mask:
6855 case X86::BI__builtin_ia32_ucmpd128_mask:
6856 case X86::BI__builtin_ia32_ucmpq128_mask:
6857 case X86::BI__builtin_ia32_ucmpb256_mask:
6858 case X86::BI__builtin_ia32_ucmpw256_mask:
6859 case X86::BI__builtin_ia32_ucmpd256_mask:
6860 case X86::BI__builtin_ia32_ucmpq256_mask:
6861 case X86::BI__builtin_ia32_ucmpb512_mask:
6862 case X86::BI__builtin_ia32_ucmpw512_mask:
6863 case X86::BI__builtin_ia32_ucmpd512_mask:
6864 case X86::BI__builtin_ia32_ucmpq512_mask:
6865 case X86::BI__builtin_ia32_vpcomub:
6866 case X86::BI__builtin_ia32_vpcomuw:
6867 case X86::BI__builtin_ia32_vpcomud:
6868 case X86::BI__builtin_ia32_vpcomuq:
6869 case X86::BI__builtin_ia32_vpcomb:
6870 case X86::BI__builtin_ia32_vpcomw:
6871 case X86::BI__builtin_ia32_vpcomd:
6872 case X86::BI__builtin_ia32_vpcomq:
6873 case X86::BI__builtin_ia32_vec_set_v8hi:
6874 case X86::BI__builtin_ia32_vec_set_v8si:
6875 i = 2; l = 0; u = 7;
6876 break;
6877 case X86::BI__builtin_ia32_vpermilpd256:
6878 case X86::BI__builtin_ia32_roundps:
6879 case X86::BI__builtin_ia32_roundpd:
6880 case X86::BI__builtin_ia32_roundps256:
6881 case X86::BI__builtin_ia32_roundpd256:
6882 case X86::BI__builtin_ia32_getmantpd128_mask:
6883 case X86::BI__builtin_ia32_getmantpd256_mask:
6884 case X86::BI__builtin_ia32_getmantps128_mask:
6885 case X86::BI__builtin_ia32_getmantps256_mask:
6886 case X86::BI__builtin_ia32_getmantpd512_mask:
6887 case X86::BI__builtin_ia32_getmantps512_mask:
6888 case X86::BI__builtin_ia32_getmantph128_mask:
6889 case X86::BI__builtin_ia32_getmantph256_mask:
6890 case X86::BI__builtin_ia32_getmantph512_mask:
6891 case X86::BI__builtin_ia32_vec_ext_v16qi:
6892 case X86::BI__builtin_ia32_vec_ext_v16hi:
6893 i = 1; l = 0; u = 15;
6894 break;
6895 case X86::BI__builtin_ia32_pblendd128:
6896 case X86::BI__builtin_ia32_blendps:
6897 case X86::BI__builtin_ia32_blendpd256:
6898 case X86::BI__builtin_ia32_shufpd256:
6899 case X86::BI__builtin_ia32_roundss:
6900 case X86::BI__builtin_ia32_roundsd:
6901 case X86::BI__builtin_ia32_rangepd128_mask:
6902 case X86::BI__builtin_ia32_rangepd256_mask:
6903 case X86::BI__builtin_ia32_rangepd512_mask:
6904 case X86::BI__builtin_ia32_rangeps128_mask:
6905 case X86::BI__builtin_ia32_rangeps256_mask:
6906 case X86::BI__builtin_ia32_rangeps512_mask:
6907 case X86::BI__builtin_ia32_getmantsd_round_mask:
6908 case X86::BI__builtin_ia32_getmantss_round_mask:
6909 case X86::BI__builtin_ia32_getmantsh_round_mask:
6910 case X86::BI__builtin_ia32_vec_set_v16qi:
6911 case X86::BI__builtin_ia32_vec_set_v16hi:
6912 i = 2; l = 0; u = 15;
6913 break;
6914 case X86::BI__builtin_ia32_vec_ext_v32qi:
6915 i = 1; l = 0; u = 31;
6916 break;
6917 case X86::BI__builtin_ia32_cmpps:
6918 case X86::BI__builtin_ia32_cmpss:
6919 case X86::BI__builtin_ia32_cmppd:
6920 case X86::BI__builtin_ia32_cmpsd:
6921 case X86::BI__builtin_ia32_cmpps256:
6922 case X86::BI__builtin_ia32_cmppd256:
6923 case X86::BI__builtin_ia32_cmpps128_mask:
6924 case X86::BI__builtin_ia32_cmppd128_mask:
6925 case X86::BI__builtin_ia32_cmpps256_mask:
6926 case X86::BI__builtin_ia32_cmppd256_mask:
6927 case X86::BI__builtin_ia32_cmpps512_mask:
6928 case X86::BI__builtin_ia32_cmppd512_mask:
6929 case X86::BI__builtin_ia32_cmpsd_mask:
6930 case X86::BI__builtin_ia32_cmpss_mask:
6931 case X86::BI__builtin_ia32_vec_set_v32qi:
6932 i = 2; l = 0; u = 31;
6933 break;
6934 case X86::BI__builtin_ia32_permdf256:
6935 case X86::BI__builtin_ia32_permdi256:
6936 case X86::BI__builtin_ia32_permdf512:
6937 case X86::BI__builtin_ia32_permdi512:
6938 case X86::BI__builtin_ia32_vpermilps:
6939 case X86::BI__builtin_ia32_vpermilps256:
6940 case X86::BI__builtin_ia32_vpermilpd512:
6941 case X86::BI__builtin_ia32_vpermilps512:
6942 case X86::BI__builtin_ia32_pshufd:
6943 case X86::BI__builtin_ia32_pshufd256:
6944 case X86::BI__builtin_ia32_pshufd512:
6945 case X86::BI__builtin_ia32_pshufhw:
6946 case X86::BI__builtin_ia32_pshufhw256:
6947 case X86::BI__builtin_ia32_pshufhw512:
6948 case X86::BI__builtin_ia32_pshuflw:
6949 case X86::BI__builtin_ia32_pshuflw256:
6950 case X86::BI__builtin_ia32_pshuflw512:
6951 case X86::BI__builtin_ia32_vcvtps2ph:
6952 case X86::BI__builtin_ia32_vcvtps2ph_mask:
6953 case X86::BI__builtin_ia32_vcvtps2ph256:
6954 case X86::BI__builtin_ia32_vcvtps2ph256_mask:
6955 case X86::BI__builtin_ia32_vcvtps2ph512_mask:
6956 case X86::BI__builtin_ia32_rndscaleps_128_mask:
6957 case X86::BI__builtin_ia32_rndscalepd_128_mask:
6958 case X86::BI__builtin_ia32_rndscaleps_256_mask:
6959 case X86::BI__builtin_ia32_rndscalepd_256_mask:
6960 case X86::BI__builtin_ia32_rndscaleps_mask:
6961 case X86::BI__builtin_ia32_rndscalepd_mask:
6962 case X86::BI__builtin_ia32_rndscaleph_mask:
6963 case X86::BI__builtin_ia32_reducepd128_mask:
6964 case X86::BI__builtin_ia32_reducepd256_mask:
6965 case X86::BI__builtin_ia32_reducepd512_mask:
6966 case X86::BI__builtin_ia32_reduceps128_mask:
6967 case X86::BI__builtin_ia32_reduceps256_mask:
6968 case X86::BI__builtin_ia32_reduceps512_mask:
6969 case X86::BI__builtin_ia32_reduceph128_mask:
6970 case X86::BI__builtin_ia32_reduceph256_mask:
6971 case X86::BI__builtin_ia32_reduceph512_mask:
6972 case X86::BI__builtin_ia32_prold512:
6973 case X86::BI__builtin_ia32_prolq512:
6974 case X86::BI__builtin_ia32_prold128:
6975 case X86::BI__builtin_ia32_prold256:
6976 case X86::BI__builtin_ia32_prolq128:
6977 case X86::BI__builtin_ia32_prolq256:
6978 case X86::BI__builtin_ia32_prord512:
6979 case X86::BI__builtin_ia32_prorq512:
6980 case X86::BI__builtin_ia32_prord128:
6981 case X86::BI__builtin_ia32_prord256:
6982 case X86::BI__builtin_ia32_prorq128:
6983 case X86::BI__builtin_ia32_prorq256:
6984 case X86::BI__builtin_ia32_fpclasspd128_mask:
6985 case X86::BI__builtin_ia32_fpclasspd256_mask:
6986 case X86::BI__builtin_ia32_fpclassps128_mask:
6987 case X86::BI__builtin_ia32_fpclassps256_mask:
6988 case X86::BI__builtin_ia32_fpclassps512_mask:
6989 case X86::BI__builtin_ia32_fpclasspd512_mask:
6990 case X86::BI__builtin_ia32_fpclassph128_mask:
6991 case X86::BI__builtin_ia32_fpclassph256_mask:
6992 case X86::BI__builtin_ia32_fpclassph512_mask:
6993 case X86::BI__builtin_ia32_fpclasssd_mask:
6994 case X86::BI__builtin_ia32_fpclassss_mask:
6995 case X86::BI__builtin_ia32_fpclasssh_mask:
6996 case X86::BI__builtin_ia32_pslldqi128_byteshift:
6997 case X86::BI__builtin_ia32_pslldqi256_byteshift:
6998 case X86::BI__builtin_ia32_pslldqi512_byteshift:
6999 case X86::BI__builtin_ia32_psrldqi128_byteshift:
7000 case X86::BI__builtin_ia32_psrldqi256_byteshift:
7001 case X86::BI__builtin_ia32_psrldqi512_byteshift:
7002 case X86::BI__builtin_ia32_kshiftliqi:
7003 case X86::BI__builtin_ia32_kshiftlihi:
7004 case X86::BI__builtin_ia32_kshiftlisi:
7005 case X86::BI__builtin_ia32_kshiftlidi:
7006 case X86::BI__builtin_ia32_kshiftriqi:
7007 case X86::BI__builtin_ia32_kshiftrihi:
7008 case X86::BI__builtin_ia32_kshiftrisi:
7009 case X86::BI__builtin_ia32_kshiftridi:
7010 i = 1; l = 0; u = 255;
7011 break;
7012 case X86::BI__builtin_ia32_vperm2f128_pd256:
7013 case X86::BI__builtin_ia32_vperm2f128_ps256:
7014 case X86::BI__builtin_ia32_vperm2f128_si256:
7015 case X86::BI__builtin_ia32_permti256:
7016 case X86::BI__builtin_ia32_pblendw128:
7017 case X86::BI__builtin_ia32_pblendw256:
7018 case X86::BI__builtin_ia32_blendps256:
7019 case X86::BI__builtin_ia32_pblendd256:
7020 case X86::BI__builtin_ia32_palignr128:
7021 case X86::BI__builtin_ia32_palignr256:
7022 case X86::BI__builtin_ia32_palignr512:
7023 case X86::BI__builtin_ia32_alignq512:
7024 case X86::BI__builtin_ia32_alignd512:
7025 case X86::BI__builtin_ia32_alignd128:
7026 case X86::BI__builtin_ia32_alignd256:
7027 case X86::BI__builtin_ia32_alignq128:
7028 case X86::BI__builtin_ia32_alignq256:
7029 case X86::BI__builtin_ia32_vcomisd:
7030 case X86::BI__builtin_ia32_vcomiss:
7031 case X86::BI__builtin_ia32_shuf_f32x4:
7032 case X86::BI__builtin_ia32_shuf_f64x2:
7033 case X86::BI__builtin_ia32_shuf_i32x4:
7034 case X86::BI__builtin_ia32_shuf_i64x2:
7035 case X86::BI__builtin_ia32_shufpd512:
7036 case X86::BI__builtin_ia32_shufps:
7037 case X86::BI__builtin_ia32_shufps256:
7038 case X86::BI__builtin_ia32_shufps512:
7039 case X86::BI__builtin_ia32_dbpsadbw128:
7040 case X86::BI__builtin_ia32_dbpsadbw256:
7041 case X86::BI__builtin_ia32_dbpsadbw512:
7042 case X86::BI__builtin_ia32_vpshldd128:
7043 case X86::BI__builtin_ia32_vpshldd256:
7044 case X86::BI__builtin_ia32_vpshldd512:
7045 case X86::BI__builtin_ia32_vpshldq128:
7046 case X86::BI__builtin_ia32_vpshldq256:
7047 case X86::BI__builtin_ia32_vpshldq512:
7048 case X86::BI__builtin_ia32_vpshldw128:
7049 case X86::BI__builtin_ia32_vpshldw256:
7050 case X86::BI__builtin_ia32_vpshldw512:
7051 case X86::BI__builtin_ia32_vpshrdd128:
7052 case X86::BI__builtin_ia32_vpshrdd256:
7053 case X86::BI__builtin_ia32_vpshrdd512:
7054 case X86::BI__builtin_ia32_vpshrdq128:
7055 case X86::BI__builtin_ia32_vpshrdq256:
7056 case X86::BI__builtin_ia32_vpshrdq512:
7057 case X86::BI__builtin_ia32_vpshrdw128:
7058 case X86::BI__builtin_ia32_vpshrdw256:
7059 case X86::BI__builtin_ia32_vpshrdw512:
7060 i = 2; l = 0; u = 255;
7061 break;
7062 case X86::BI__builtin_ia32_fixupimmpd512_mask:
7063 case X86::BI__builtin_ia32_fixupimmpd512_maskz:
7064 case X86::BI__builtin_ia32_fixupimmps512_mask:
7065 case X86::BI__builtin_ia32_fixupimmps512_maskz:
7066 case X86::BI__builtin_ia32_fixupimmsd_mask:
7067 case X86::BI__builtin_ia32_fixupimmsd_maskz:
7068 case X86::BI__builtin_ia32_fixupimmss_mask:
7069 case X86::BI__builtin_ia32_fixupimmss_maskz:
7070 case X86::BI__builtin_ia32_fixupimmpd128_mask:
7071 case X86::BI__builtin_ia32_fixupimmpd128_maskz:
7072 case X86::BI__builtin_ia32_fixupimmpd256_mask:
7073 case X86::BI__builtin_ia32_fixupimmpd256_maskz:
7074 case X86::BI__builtin_ia32_fixupimmps128_mask:
7075 case X86::BI__builtin_ia32_fixupimmps128_maskz:
7076 case X86::BI__builtin_ia32_fixupimmps256_mask:
7077 case X86::BI__builtin_ia32_fixupimmps256_maskz:
7078 case X86::BI__builtin_ia32_pternlogd512_mask:
7079 case X86::BI__builtin_ia32_pternlogd512_maskz:
7080 case X86::BI__builtin_ia32_pternlogq512_mask:
7081 case X86::BI__builtin_ia32_pternlogq512_maskz:
7082 case X86::BI__builtin_ia32_pternlogd128_mask:
7083 case X86::BI__builtin_ia32_pternlogd128_maskz:
7084 case X86::BI__builtin_ia32_pternlogd256_mask:
7085 case X86::BI__builtin_ia32_pternlogd256_maskz:
7086 case X86::BI__builtin_ia32_pternlogq128_mask:
7087 case X86::BI__builtin_ia32_pternlogq128_maskz:
7088 case X86::BI__builtin_ia32_pternlogq256_mask:
7089 case X86::BI__builtin_ia32_pternlogq256_maskz:
7090 case X86::BI__builtin_ia32_vsm3rnds2:
7091 i = 3; l = 0; u = 255;
7092 break;
7093 case X86::BI__builtin_ia32_gatherpfdpd:
7094 case X86::BI__builtin_ia32_gatherpfdps:
7095 case X86::BI__builtin_ia32_gatherpfqpd:
7096 case X86::BI__builtin_ia32_gatherpfqps:
7097 case X86::BI__builtin_ia32_scatterpfdpd:
7098 case X86::BI__builtin_ia32_scatterpfdps:
7099 case X86::BI__builtin_ia32_scatterpfqpd:
7100 case X86::BI__builtin_ia32_scatterpfqps:
7101 i = 4; l = 2; u = 3;
7102 break;
7103 case X86::BI__builtin_ia32_reducesd_mask:
7104 case X86::BI__builtin_ia32_reducess_mask:
7105 case X86::BI__builtin_ia32_rndscalesd_round_mask:
7106 case X86::BI__builtin_ia32_rndscaless_round_mask:
7107 case X86::BI__builtin_ia32_rndscalesh_round_mask:
7108 case X86::BI__builtin_ia32_reducesh_mask:
7109 i = 4; l = 0; u = 255;
7110 break;
7111 case X86::BI__builtin_ia32_cmpccxadd32:
7112 case X86::BI__builtin_ia32_cmpccxadd64:
7113 i = 3; l = 0; u = 15;
7114 break;
7115 }
7116
7117 // Note that we don't force a hard error on the range check here, allowing
7118 // template-generated or macro-generated dead code to potentially have out-of-
7119 // range values. These need to code generate, but don't need to necessarily
7120 // make any sense. We use a warning that defaults to an error.
7121 return SemaBuiltinConstantArgRange(TheCall, ArgNum: i, Low: l, High: u, /*RangeIsError*/ false);
7122}
7123
7124/// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo
7125/// parameter with the FormatAttr's correct format_idx and firstDataArg.
7126/// Returns true when the format fits the function and the FormatStringInfo has
7127/// been populated.
7128bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
7129 bool IsVariadic, FormatStringInfo *FSI) {
7130 if (Format->getFirstArg() == 0)
7131 FSI->ArgPassingKind = FAPK_VAList;
7132 else if (IsVariadic)
7133 FSI->ArgPassingKind = FAPK_Variadic;
7134 else
7135 FSI->ArgPassingKind = FAPK_Fixed;
7136 FSI->FormatIdx = Format->getFormatIdx() - 1;
7137 FSI->FirstDataArg =
7138 FSI->ArgPassingKind == FAPK_VAList ? 0 : Format->getFirstArg() - 1;
7139
7140 // The way the format attribute works in GCC, the implicit this argument
7141 // of member functions is counted. However, it doesn't appear in our own
7142 // lists, so decrement format_idx in that case.
7143 if (IsCXXMember) {
7144 if(FSI->FormatIdx == 0)
7145 return false;
7146 --FSI->FormatIdx;
7147 if (FSI->FirstDataArg != 0)
7148 --FSI->FirstDataArg;
7149 }
7150 return true;
7151}
7152
7153/// Checks if a the given expression evaluates to null.
7154///
7155/// Returns true if the value evaluates to null.
7156static bool CheckNonNullExpr(Sema &S, const Expr *Expr) {
7157 // If the expression has non-null type, it doesn't evaluate to null.
7158 if (auto nullability = Expr->IgnoreImplicit()->getType()->getNullability()) {
7159 if (*nullability == NullabilityKind::NonNull)
7160 return false;
7161 }
7162
7163 // As a special case, transparent unions initialized with zero are
7164 // considered null for the purposes of the nonnull attribute.
7165 if (const RecordType *UT = Expr->getType()->getAsUnionType();
7166 UT && UT->getDecl()->hasAttr<TransparentUnionAttr>()) {
7167 if (const auto *CLE = dyn_cast<CompoundLiteralExpr>(Val: Expr))
7168 if (const auto *ILE = dyn_cast<InitListExpr>(Val: CLE->getInitializer()))
7169 Expr = ILE->getInit(Init: 0);
7170 }
7171
7172 bool Result;
7173 return (!Expr->isValueDependent() &&
7174 Expr->EvaluateAsBooleanCondition(Result, Ctx: S.Context) &&
7175 !Result);
7176}
7177
7178static void CheckNonNullArgument(Sema &S,
7179 const Expr *ArgExpr,
7180 SourceLocation CallSiteLoc) {
7181 if (CheckNonNullExpr(S, ArgExpr))
7182 S.DiagRuntimeBehavior(CallSiteLoc, ArgExpr,
7183 S.PDiag(diag::warn_null_arg)
7184 << ArgExpr->getSourceRange());
7185}
7186
7187bool Sema::GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx) {
7188 FormatStringInfo FSI;
7189 if ((GetFormatStringType(Format) == FST_NSString) &&
7190 getFormatStringInfo(Format, IsCXXMember: false, IsVariadic: true, FSI: &FSI)) {
7191 Idx = FSI.FormatIdx;
7192 return true;
7193 }
7194 return false;
7195}
7196
7197/// Diagnose use of %s directive in an NSString which is being passed
7198/// as formatting string to formatting method.
7199static void
7200DiagnoseCStringFormatDirectiveInCFAPI(Sema &S,
7201 const NamedDecl *FDecl,
7202 Expr **Args,
7203 unsigned NumArgs) {
7204 unsigned Idx = 0;
7205 bool Format = false;
7206 ObjCStringFormatFamily SFFamily = FDecl->getObjCFStringFormattingFamily();
7207 if (SFFamily == ObjCStringFormatFamily::SFF_CFString) {
7208 Idx = 2;
7209 Format = true;
7210 }
7211 else
7212 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) {
7213 if (S.GetFormatNSStringIdx(I, Idx)) {
7214 Format = true;
7215 break;
7216 }
7217 }
7218 if (!Format || NumArgs <= Idx)
7219 return;
7220 const Expr *FormatExpr = Args[Idx];
7221 if (const CStyleCastExpr *CSCE = dyn_cast<CStyleCastExpr>(Val: FormatExpr))
7222 FormatExpr = CSCE->getSubExpr();
7223 const StringLiteral *FormatString;
7224 if (const ObjCStringLiteral *OSL =
7225 dyn_cast<ObjCStringLiteral>(Val: FormatExpr->IgnoreParenImpCasts()))
7226 FormatString = OSL->getString();
7227 else
7228 FormatString = dyn_cast<StringLiteral>(Val: FormatExpr->IgnoreParenImpCasts());
7229 if (!FormatString)
7230 return;
7231 if (S.FormatStringHasSArg(FExpr: FormatString)) {
7232 S.Diag(FormatExpr->getExprLoc(), diag::warn_objc_cdirective_format_string)
7233 << "%s" << 1 << 1;
7234 S.Diag(FDecl->getLocation(), diag::note_entity_declared_at)
7235 << FDecl->getDeclName();
7236 }
7237}
7238
7239/// Determine whether the given type has a non-null nullability annotation.
7240static bool isNonNullType(QualType type) {
7241 if (auto nullability = type->getNullability())
7242 return *nullability == NullabilityKind::NonNull;
7243
7244 return false;
7245}
7246
7247static void CheckNonNullArguments(Sema &S,
7248 const NamedDecl *FDecl,
7249 const FunctionProtoType *Proto,
7250 ArrayRef<const Expr *> Args,
7251 SourceLocation CallSiteLoc) {
7252 assert((FDecl || Proto) && "Need a function declaration or prototype");
7253
7254 // Already checked by constant evaluator.
7255 if (S.isConstantEvaluatedContext())
7256 return;
7257 // Check the attributes attached to the method/function itself.
7258 llvm::SmallBitVector NonNullArgs;
7259 if (FDecl) {
7260 // Handle the nonnull attribute on the function/method declaration itself.
7261 for (const auto *NonNull : FDecl->specific_attrs<NonNullAttr>()) {
7262 if (!NonNull->args_size()) {
7263 // Easy case: all pointer arguments are nonnull.
7264 for (const auto *Arg : Args)
7265 if (S.isValidPointerAttrType(Arg->getType()))
7266 CheckNonNullArgument(S, Arg, CallSiteLoc);
7267 return;
7268 }
7269
7270 for (const ParamIdx &Idx : NonNull->args()) {
7271 unsigned IdxAST = Idx.getASTIndex();
7272 if (IdxAST >= Args.size())
7273 continue;
7274 if (NonNullArgs.empty())
7275 NonNullArgs.resize(Args.size());
7276 NonNullArgs.set(IdxAST);
7277 }
7278 }
7279 }
7280
7281 if (FDecl && (isa<FunctionDecl>(Val: FDecl) || isa<ObjCMethodDecl>(Val: FDecl))) {
7282 // Handle the nonnull attribute on the parameters of the
7283 // function/method.
7284 ArrayRef<ParmVarDecl*> parms;
7285 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Val: FDecl))
7286 parms = FD->parameters();
7287 else
7288 parms = cast<ObjCMethodDecl>(Val: FDecl)->parameters();
7289
7290 unsigned ParamIndex = 0;
7291 for (ArrayRef<ParmVarDecl*>::iterator I = parms.begin(), E = parms.end();
7292 I != E; ++I, ++ParamIndex) {
7293 const ParmVarDecl *PVD = *I;
7294 if (PVD->hasAttr<NonNullAttr>() || isNonNullType(PVD->getType())) {
7295 if (NonNullArgs.empty())
7296 NonNullArgs.resize(N: Args.size());
7297
7298 NonNullArgs.set(ParamIndex);
7299 }
7300 }
7301 } else {
7302 // If we have a non-function, non-method declaration but no
7303 // function prototype, try to dig out the function prototype.
7304 if (!Proto) {
7305 if (const ValueDecl *VD = dyn_cast<ValueDecl>(Val: FDecl)) {
7306 QualType type = VD->getType().getNonReferenceType();
7307 if (auto pointerType = type->getAs<PointerType>())
7308 type = pointerType->getPointeeType();
7309 else if (auto blockType = type->getAs<BlockPointerType>())
7310 type = blockType->getPointeeType();
7311 // FIXME: data member pointers?
7312
7313 // Dig out the function prototype, if there is one.
7314 Proto = type->getAs<FunctionProtoType>();
7315 }
7316 }
7317
7318 // Fill in non-null argument information from the nullability
7319 // information on the parameter types (if we have them).
7320 if (Proto) {
7321 unsigned Index = 0;
7322 for (auto paramType : Proto->getParamTypes()) {
7323 if (isNonNullType(type: paramType)) {
7324 if (NonNullArgs.empty())
7325 NonNullArgs.resize(N: Args.size());
7326
7327 NonNullArgs.set(Index);
7328 }
7329
7330 ++Index;
7331 }
7332 }
7333 }
7334
7335 // Check for non-null arguments.
7336 for (unsigned ArgIndex = 0, ArgIndexEnd = NonNullArgs.size();
7337 ArgIndex != ArgIndexEnd; ++ArgIndex) {
7338 if (NonNullArgs[ArgIndex])
7339 CheckNonNullArgument(S, ArgExpr: Args[ArgIndex], CallSiteLoc: Args[ArgIndex]->getExprLoc());
7340 }
7341}
7342
7343// 16 byte ByVal alignment not due to a vector member is not honoured by XL
7344// on AIX. Emit a warning here that users are generating binary incompatible
7345// code to be safe.
7346// Here we try to get information about the alignment of the struct member
7347// from the struct passed to the caller function. We only warn when the struct
7348// is passed byval, hence the series of checks and early returns if we are a not
7349// passing a struct byval.
7350void Sema::checkAIXMemberAlignment(SourceLocation Loc, const Expr *Arg) {
7351 const auto *ICE = dyn_cast<ImplicitCastExpr>(Val: Arg->IgnoreParens());
7352 if (!ICE)
7353 return;
7354
7355 const auto *DR = dyn_cast<DeclRefExpr>(ICE->getSubExpr());
7356 if (!DR)
7357 return;
7358
7359 const auto *PD = dyn_cast<ParmVarDecl>(DR->getDecl());
7360 if (!PD || !PD->getType()->isRecordType())
7361 return;
7362
7363 QualType ArgType = Arg->getType();
7364 for (const FieldDecl *FD :
7365 ArgType->castAs<RecordType>()->getDecl()->fields()) {
7366 if (const auto *AA = FD->getAttr<AlignedAttr>()) {
7367 CharUnits Alignment =
7368 Context.toCharUnitsFromBits(BitSize: AA->getAlignment(Context));
7369 if (Alignment.getQuantity() == 16) {
7370 Diag(FD->getLocation(), diag::warn_not_xl_compatible) << FD;
7371 Diag(Loc, diag::note_misaligned_member_used_here) << PD;
7372 }
7373 }
7374 }
7375}
7376
7377/// Warn if a pointer or reference argument passed to a function points to an
7378/// object that is less aligned than the parameter. This can happen when
7379/// creating a typedef with a lower alignment than the original type and then
7380/// calling functions defined in terms of the original type.
7381void Sema::CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl,
7382 StringRef ParamName, QualType ArgTy,
7383 QualType ParamTy) {
7384
7385 // If a function accepts a pointer or reference type
7386 if (!ParamTy->isPointerType() && !ParamTy->isReferenceType())
7387 return;
7388
7389 // If the parameter is a pointer type, get the pointee type for the
7390 // argument too. If the parameter is a reference type, don't try to get
7391 // the pointee type for the argument.
7392 if (ParamTy->isPointerType())
7393 ArgTy = ArgTy->getPointeeType();
7394
7395 // Remove reference or pointer
7396 ParamTy = ParamTy->getPointeeType();
7397
7398 // Find expected alignment, and the actual alignment of the passed object.
7399 // getTypeAlignInChars requires complete types
7400 if (ArgTy.isNull() || ParamTy->isDependentType() ||
7401 ParamTy->isIncompleteType() || ArgTy->isIncompleteType() ||
7402 ParamTy->isUndeducedType() || ArgTy->isUndeducedType())
7403 return;
7404
7405 CharUnits ParamAlign = Context.getTypeAlignInChars(T: ParamTy);
7406 CharUnits ArgAlign = Context.getTypeAlignInChars(T: ArgTy);
7407
7408 // If the argument is less aligned than the parameter, there is a
7409 // potential alignment issue.
7410 if (ArgAlign < ParamAlign)
7411 Diag(Loc, diag::warn_param_mismatched_alignment)
7412 << (int)ArgAlign.getQuantity() << (int)ParamAlign.getQuantity()
7413 << ParamName << (FDecl != nullptr) << FDecl;
7414}
7415
7416/// Handles the checks for format strings, non-POD arguments to vararg
7417/// functions, NULL arguments passed to non-NULL parameters, diagnose_if
7418/// attributes and AArch64 SME attributes.
7419void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
7420 const Expr *ThisArg, ArrayRef<const Expr *> Args,
7421 bool IsMemberFunction, SourceLocation Loc,
7422 SourceRange Range, VariadicCallType CallType) {
7423 // FIXME: We should check as much as we can in the template definition.
7424 if (CurContext->isDependentContext())
7425 return;
7426
7427 // Printf and scanf checking.
7428 llvm::SmallBitVector CheckedVarArgs;
7429 if (FDecl) {
7430 for (const auto *I : FDecl->specific_attrs<FormatAttr>()) {
7431 // Only create vector if there are format attributes.
7432 CheckedVarArgs.resize(Args.size());
7433
7434 CheckFormatArguments(I, Args, IsMemberFunction, CallType, Loc, Range,
7435 CheckedVarArgs);
7436 }
7437 }
7438
7439 // Refuse POD arguments that weren't caught by the format string
7440 // checks above.
7441 auto *FD = dyn_cast_or_null<FunctionDecl>(Val: FDecl);
7442 if (CallType != VariadicDoesNotApply &&
7443 (!FD || FD->getBuiltinID() != Builtin::BI__noop)) {
7444 unsigned NumParams = Proto ? Proto->getNumParams()
7445 : FDecl && isa<FunctionDecl>(Val: FDecl)
7446 ? cast<FunctionDecl>(Val: FDecl)->getNumParams()
7447 : FDecl && isa<ObjCMethodDecl>(Val: FDecl)
7448 ? cast<ObjCMethodDecl>(Val: FDecl)->param_size()
7449 : 0;
7450
7451 for (unsigned ArgIdx = NumParams; ArgIdx < Args.size(); ++ArgIdx) {
7452 // Args[ArgIdx] can be null in malformed code.
7453 if (const Expr *Arg = Args[ArgIdx]) {
7454 if (CheckedVarArgs.empty() || !CheckedVarArgs[ArgIdx])
7455 checkVariadicArgument(E: Arg, CT: CallType);
7456 }
7457 }
7458 }
7459
7460 if (FDecl || Proto) {
7461 CheckNonNullArguments(S&: *this, FDecl, Proto, Args, CallSiteLoc: Loc);
7462
7463 // Type safety checking.
7464 if (FDecl) {
7465 for (const auto *I : FDecl->specific_attrs<ArgumentWithTypeTagAttr>())
7466 CheckArgumentWithTypeTag(I, Args, Loc);
7467 }
7468 }
7469
7470 // Check that passed arguments match the alignment of original arguments.
7471 // Try to get the missing prototype from the declaration.
7472 if (!Proto && FDecl) {
7473 const auto *FT = FDecl->getFunctionType();
7474 if (isa_and_nonnull<FunctionProtoType>(FT))
7475 Proto = cast<FunctionProtoType>(FDecl->getFunctionType());
7476 }
7477 if (Proto) {
7478 // For variadic functions, we may have more args than parameters.
7479 // For some K&R functions, we may have less args than parameters.
7480 const auto N = std::min<unsigned>(a: Proto->getNumParams(), b: Args.size());
7481 for (unsigned ArgIdx = 0; ArgIdx < N; ++ArgIdx) {
7482 // Args[ArgIdx] can be null in malformed code.
7483 if (const Expr *Arg = Args[ArgIdx]) {
7484 if (Arg->containsErrors())
7485 continue;
7486
7487 if (Context.getTargetInfo().getTriple().isOSAIX() && FDecl && Arg &&
7488 FDecl->hasLinkage() &&
7489 FDecl->getFormalLinkage() != Linkage::Internal &&
7490 CallType == VariadicDoesNotApply)
7491 checkAIXMemberAlignment(Loc: (Arg->getExprLoc()), Arg);
7492
7493 QualType ParamTy = Proto->getParamType(i: ArgIdx);
7494 QualType ArgTy = Arg->getType();
7495 CheckArgAlignment(Loc: Arg->getExprLoc(), FDecl, ParamName: std::to_string(val: ArgIdx + 1),
7496 ArgTy, ParamTy);
7497 }
7498 }
7499
7500 // If the callee has an AArch64 SME attribute to indicate that it is an
7501 // __arm_streaming function, then the caller requires SME to be available.
7502 FunctionProtoType::ExtProtoInfo ExtInfo = Proto->getExtProtoInfo();
7503 if (ExtInfo.AArch64SMEAttributes & FunctionType::SME_PStateSMEnabledMask) {
7504 if (auto *CallerFD = dyn_cast<FunctionDecl>(Val: CurContext)) {
7505 llvm::StringMap<bool> CallerFeatureMap;
7506 Context.getFunctionFeatureMap(FeatureMap&: CallerFeatureMap, CallerFD);
7507 if (!CallerFeatureMap.contains("sme"))
7508 Diag(Loc, diag::err_sme_call_in_non_sme_target);
7509 } else if (!Context.getTargetInfo().hasFeature(Feature: "sme")) {
7510 Diag(Loc, diag::err_sme_call_in_non_sme_target);
7511 }
7512 }
7513
7514 FunctionType::ArmStateValue CalleeArmZAState =
7515 FunctionType::getArmZAState(AttrBits: ExtInfo.AArch64SMEAttributes);
7516 FunctionType::ArmStateValue CalleeArmZT0State =
7517 FunctionType::getArmZT0State(AttrBits: ExtInfo.AArch64SMEAttributes);
7518 if (CalleeArmZAState != FunctionType::ARM_None ||
7519 CalleeArmZT0State != FunctionType::ARM_None) {
7520 bool CallerHasZAState = false;
7521 bool CallerHasZT0State = false;
7522 if (const auto *CallerFD = dyn_cast<FunctionDecl>(Val: CurContext)) {
7523 auto *Attr = CallerFD->getAttr<ArmNewAttr>();
7524 if (Attr && Attr->isNewZA())
7525 CallerHasZAState = true;
7526 if (Attr && Attr->isNewZT0())
7527 CallerHasZT0State = true;
7528 if (const auto *FPT = CallerFD->getType()->getAs<FunctionProtoType>()) {
7529 CallerHasZAState |=
7530 FunctionType::getArmZAState(
7531 AttrBits: FPT->getExtProtoInfo().AArch64SMEAttributes) !=
7532 FunctionType::ARM_None;
7533 CallerHasZT0State |=
7534 FunctionType::getArmZT0State(
7535 AttrBits: FPT->getExtProtoInfo().AArch64SMEAttributes) !=
7536 FunctionType::ARM_None;
7537 }
7538 }
7539
7540 if (CalleeArmZAState != FunctionType::ARM_None && !CallerHasZAState)
7541 Diag(Loc, diag::err_sme_za_call_no_za_state);
7542
7543 if (CalleeArmZT0State != FunctionType::ARM_None && !CallerHasZT0State)
7544 Diag(Loc, diag::err_sme_zt0_call_no_zt0_state);
7545
7546 if (CallerHasZAState && CalleeArmZAState == FunctionType::ARM_None &&
7547 CalleeArmZT0State != FunctionType::ARM_None) {
7548 Diag(Loc, diag::err_sme_unimplemented_za_save_restore);
7549 Diag(Loc, diag::note_sme_use_preserves_za);
7550 }
7551 }
7552 }
7553
7554 if (FDecl && FDecl->hasAttr<AllocAlignAttr>()) {
7555 auto *AA = FDecl->getAttr<AllocAlignAttr>();
7556 const Expr *Arg = Args[AA->getParamIndex().getASTIndex()];
7557 if (!Arg->isValueDependent()) {
7558 Expr::EvalResult Align;
7559 if (Arg->EvaluateAsInt(Result&: Align, Ctx: Context)) {
7560 const llvm::APSInt &I = Align.Val.getInt();
7561 if (!I.isPowerOf2())
7562 Diag(Arg->getExprLoc(), diag::warn_alignment_not_power_of_two)
7563 << Arg->getSourceRange();
7564
7565 if (I > Sema::MaximumAlignment)
7566 Diag(Arg->getExprLoc(), diag::warn_assume_aligned_too_great)
7567 << Arg->getSourceRange() << Sema::MaximumAlignment;
7568 }
7569 }
7570 }
7571
7572 if (FD)
7573 diagnoseArgDependentDiagnoseIfAttrs(Function: FD, ThisArg, Args, Loc);
7574}
7575
7576/// CheckConstructorCall - Check a constructor call for correctness and safety
7577/// properties not enforced by the C type system.
7578void Sema::CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType,
7579 ArrayRef<const Expr *> Args,
7580 const FunctionProtoType *Proto,
7581 SourceLocation Loc) {
7582 VariadicCallType CallType =
7583 Proto->isVariadic() ? VariadicConstructor : VariadicDoesNotApply;
7584
7585 auto *Ctor = cast<CXXConstructorDecl>(Val: FDecl);
7586 CheckArgAlignment(
7587 Loc, FDecl, ParamName: "'this'", ArgTy: Context.getPointerType(T: ThisType),
7588 ParamTy: Context.getPointerType(Ctor->getFunctionObjectParameterType()));
7589
7590 checkCall(FDecl, Proto, /*ThisArg=*/nullptr, Args, /*IsMemberFunction=*/true,
7591 Loc, SourceRange(), CallType);
7592}
7593
7594/// CheckFunctionCall - Check a direct function call for various correctness
7595/// and safety properties not strictly enforced by the C type system.
7596bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
7597 const FunctionProtoType *Proto) {
7598 bool IsMemberOperatorCall = isa<CXXOperatorCallExpr>(Val: TheCall) &&
7599 isa<CXXMethodDecl>(Val: FDecl);
7600 bool IsMemberFunction = isa<CXXMemberCallExpr>(Val: TheCall) ||
7601 IsMemberOperatorCall;
7602 VariadicCallType CallType = getVariadicCallType(FDecl, Proto,
7603 Fn: TheCall->getCallee());
7604 Expr** Args = TheCall->getArgs();
7605 unsigned NumArgs = TheCall->getNumArgs();
7606
7607 Expr *ImplicitThis = nullptr;
7608 if (IsMemberOperatorCall && !FDecl->hasCXXExplicitFunctionObjectParameter()) {
7609 // If this is a call to a member operator, hide the first
7610 // argument from checkCall.
7611 // FIXME: Our choice of AST representation here is less than ideal.
7612 ImplicitThis = Args[0];
7613 ++Args;
7614 --NumArgs;
7615 } else if (IsMemberFunction && !FDecl->isStatic() &&
7616 !FDecl->hasCXXExplicitFunctionObjectParameter())
7617 ImplicitThis =
7618 cast<CXXMemberCallExpr>(Val: TheCall)->getImplicitObjectArgument();
7619
7620 if (ImplicitThis) {
7621 // ImplicitThis may or may not be a pointer, depending on whether . or -> is
7622 // used.
7623 QualType ThisType = ImplicitThis->getType();
7624 if (!ThisType->isPointerType()) {
7625 assert(!ThisType->isReferenceType());
7626 ThisType = Context.getPointerType(T: ThisType);
7627 }
7628
7629 QualType ThisTypeFromDecl = Context.getPointerType(
7630 T: cast<CXXMethodDecl>(Val: FDecl)->getFunctionObjectParameterType());
7631
7632 CheckArgAlignment(TheCall->getRParenLoc(), FDecl, "'this'", ThisType,
7633 ThisTypeFromDecl);
7634 }
7635
7636 checkCall(FDecl, Proto, ThisArg: ImplicitThis, Args: llvm::ArrayRef(Args, NumArgs),
7637 IsMemberFunction, Loc: TheCall->getRParenLoc(),
7638 Range: TheCall->getCallee()->getSourceRange(), CallType);
7639
7640 IdentifierInfo *FnInfo = FDecl->getIdentifier();
7641 // None of the checks below are needed for functions that don't have
7642 // simple names (e.g., C++ conversion functions).
7643 if (!FnInfo)
7644 return false;
7645
7646 // Enforce TCB except for builtin calls, which are always allowed.
7647 if (FDecl->getBuiltinID() == 0)
7648 CheckTCBEnforcement(CallExprLoc: TheCall->getExprLoc(), Callee: FDecl);
7649
7650 CheckAbsoluteValueFunction(Call: TheCall, FDecl);
7651 CheckMaxUnsignedZero(Call: TheCall, FDecl);
7652 CheckInfNaNFunction(Call: TheCall, FDecl);
7653
7654 if (getLangOpts().ObjC)
7655 DiagnoseCStringFormatDirectiveInCFAPI(*this, FDecl, Args, NumArgs);
7656
7657 unsigned CMId = FDecl->getMemoryFunctionKind();
7658
7659 // Handle memory setting and copying functions.
7660 switch (CMId) {
7661 case 0:
7662 return false;
7663 case Builtin::BIstrlcpy: // fallthrough
7664 case Builtin::BIstrlcat:
7665 CheckStrlcpycatArguments(Call: TheCall, FnName: FnInfo);
7666 break;
7667 case Builtin::BIstrncat:
7668 CheckStrncatArguments(Call: TheCall, FnName: FnInfo);
7669 break;
7670 case Builtin::BIfree:
7671 CheckFreeArguments(E: TheCall);
7672 break;
7673 default:
7674 CheckMemaccessArguments(Call: TheCall, BId: CMId, FnName: FnInfo);
7675 }
7676
7677 return false;
7678}
7679
7680bool Sema::CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation lbrac,
7681 ArrayRef<const Expr *> Args) {
7682 VariadicCallType CallType =
7683 Method->isVariadic() ? VariadicMethod : VariadicDoesNotApply;
7684
7685 checkCall(Method, nullptr, /*ThisArg=*/nullptr, Args,
7686 /*IsMemberFunction=*/false, lbrac, Method->getSourceRange(),
7687 CallType);
7688
7689 CheckTCBEnforcement(lbrac, Method);
7690
7691 return false;
7692}
7693
7694bool Sema::CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
7695 const FunctionProtoType *Proto) {
7696 QualType Ty;
7697 if (const auto *V = dyn_cast<VarDecl>(Val: NDecl))
7698 Ty = V->getType().getNonReferenceType();
7699 else if (const auto *F = dyn_cast<FieldDecl>(Val: NDecl))
7700 Ty = F->getType().getNonReferenceType();
7701 else
7702 return false;
7703
7704 if (!Ty->isBlockPointerType() && !Ty->isFunctionPointerType() &&
7705 !Ty->isFunctionProtoType())
7706 return false;
7707
7708 VariadicCallType CallType;
7709 if (!Proto || !Proto->isVariadic()) {
7710 CallType = VariadicDoesNotApply;
7711 } else if (Ty->isBlockPointerType()) {
7712 CallType = VariadicBlock;
7713 } else { // Ty->isFunctionPointerType()
7714 CallType = VariadicFunction;
7715 }
7716
7717 checkCall(FDecl: NDecl, Proto, /*ThisArg=*/nullptr,
7718 Args: llvm::ArrayRef(TheCall->getArgs(), TheCall->getNumArgs()),
7719 /*IsMemberFunction=*/false, Loc: TheCall->getRParenLoc(),
7720 Range: TheCall->getCallee()->getSourceRange(), CallType);
7721
7722 return false;
7723}
7724
7725/// Checks function calls when a FunctionDecl or a NamedDecl is not available,
7726/// such as function pointers returned from functions.
7727bool Sema::CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto) {
7728 VariadicCallType CallType = getVariadicCallType(/*FDecl=*/nullptr, Proto,
7729 Fn: TheCall->getCallee());
7730 checkCall(/*FDecl=*/nullptr, Proto, /*ThisArg=*/nullptr,
7731 Args: llvm::ArrayRef(TheCall->getArgs(), TheCall->getNumArgs()),
7732 /*IsMemberFunction=*/false, Loc: TheCall->getRParenLoc(),
7733 Range: TheCall->getCallee()->getSourceRange(), CallType);
7734
7735 return false;
7736}
7737
7738static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) {
7739 if (!llvm::isValidAtomicOrderingCABI(I: Ordering))
7740 return false;
7741
7742 auto OrderingCABI = (llvm::AtomicOrderingCABI)Ordering;
7743 switch (Op) {
7744 case AtomicExpr::AO__c11_atomic_init:
7745 case AtomicExpr::AO__opencl_atomic_init:
7746 llvm_unreachable("There is no ordering argument for an init");
7747
7748 case AtomicExpr::AO__c11_atomic_load:
7749 case AtomicExpr::AO__opencl_atomic_load:
7750 case AtomicExpr::AO__hip_atomic_load:
7751 case AtomicExpr::AO__atomic_load_n:
7752 case AtomicExpr::AO__atomic_load:
7753 case AtomicExpr::AO__scoped_atomic_load_n:
7754 case AtomicExpr::AO__scoped_atomic_load:
7755 return OrderingCABI != llvm::AtomicOrderingCABI::release &&
7756 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel;
7757
7758 case AtomicExpr::AO__c11_atomic_store:
7759 case AtomicExpr::AO__opencl_atomic_store:
7760 case AtomicExpr::AO__hip_atomic_store:
7761 case AtomicExpr::AO__atomic_store:
7762 case AtomicExpr::AO__atomic_store_n:
7763 case AtomicExpr::AO__scoped_atomic_store:
7764 case AtomicExpr::AO__scoped_atomic_store_n:
7765 return OrderingCABI != llvm::AtomicOrderingCABI::consume &&
7766 OrderingCABI != llvm::AtomicOrderingCABI::acquire &&
7767 OrderingCABI != llvm::AtomicOrderingCABI::acq_rel;
7768
7769 default:
7770 return true;
7771 }
7772}
7773
7774ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
7775 AtomicExpr::AtomicOp Op) {
7776 CallExpr *TheCall = cast<CallExpr>(Val: TheCallResult.get());
7777 DeclRefExpr *DRE =cast<DeclRefExpr>(Val: TheCall->getCallee()->IgnoreParenCasts());
7778 MultiExprArg Args{TheCall->getArgs(), TheCall->getNumArgs()};
7779 return BuildAtomicExpr(CallRange: {TheCall->getBeginLoc(), TheCall->getEndLoc()},
7780 ExprRange: DRE->getSourceRange(), RParenLoc: TheCall->getRParenLoc(), Args,
7781 Op);
7782}
7783
7784ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
7785 SourceLocation RParenLoc, MultiExprArg Args,
7786 AtomicExpr::AtomicOp Op,
7787 AtomicArgumentOrder ArgOrder) {
7788 // All the non-OpenCL operations take one of the following forms.
7789 // The OpenCL operations take the __c11 forms with one extra argument for
7790 // synchronization scope.
7791 enum {
7792 // C __c11_atomic_init(A *, C)
7793 Init,
7794
7795 // C __c11_atomic_load(A *, int)
7796 Load,
7797
7798 // void __atomic_load(A *, CP, int)
7799 LoadCopy,
7800
7801 // void __atomic_store(A *, CP, int)
7802 Copy,
7803
7804 // C __c11_atomic_add(A *, M, int)
7805 Arithmetic,
7806
7807 // C __atomic_exchange_n(A *, CP, int)
7808 Xchg,
7809
7810 // void __atomic_exchange(A *, C *, CP, int)
7811 GNUXchg,
7812
7813 // bool __c11_atomic_compare_exchange_strong(A *, C *, CP, int, int)
7814 C11CmpXchg,
7815
7816 // bool __atomic_compare_exchange(A *, C *, CP, bool, int, int)
7817 GNUCmpXchg
7818 } Form = Init;
7819
7820 const unsigned NumForm = GNUCmpXchg + 1;
7821 const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 3, 4, 5, 6 };
7822 const unsigned NumVals[] = { 1, 0, 1, 1, 1, 1, 2, 2, 3 };
7823 // where:
7824 // C is an appropriate type,
7825 // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins,
7826 // CP is C for __c11 builtins and GNU _n builtins and is C * otherwise,
7827 // M is C if C is an integer, and ptrdiff_t if C is a pointer, and
7828 // the int parameters are for orderings.
7829
7830 static_assert(sizeof(NumArgs)/sizeof(NumArgs[0]) == NumForm
7831 && sizeof(NumVals)/sizeof(NumVals[0]) == NumForm,
7832 "need to update code for modified forms");
7833 static_assert(AtomicExpr::AO__atomic_add_fetch == 0 &&
7834 AtomicExpr::AO__atomic_xor_fetch + 1 ==
7835 AtomicExpr::AO__c11_atomic_compare_exchange_strong,
7836 "need to update code for modified C11 atomics");
7837 bool IsOpenCL = Op >= AtomicExpr::AO__opencl_atomic_compare_exchange_strong &&
7838 Op <= AtomicExpr::AO__opencl_atomic_store;
7839 bool IsHIP = Op >= AtomicExpr::AO__hip_atomic_compare_exchange_strong &&
7840 Op <= AtomicExpr::AO__hip_atomic_store;
7841 bool IsScoped = Op >= AtomicExpr::AO__scoped_atomic_add_fetch &&
7842 Op <= AtomicExpr::AO__scoped_atomic_xor_fetch;
7843 bool IsC11 = (Op >= AtomicExpr::AO__c11_atomic_compare_exchange_strong &&
7844 Op <= AtomicExpr::AO__c11_atomic_store) ||
7845 IsOpenCL;
7846 bool IsN = Op == AtomicExpr::AO__atomic_load_n ||
7847 Op == AtomicExpr::AO__atomic_store_n ||
7848 Op == AtomicExpr::AO__atomic_exchange_n ||
7849 Op == AtomicExpr::AO__atomic_compare_exchange_n ||
7850 Op == AtomicExpr::AO__scoped_atomic_load_n ||
7851 Op == AtomicExpr::AO__scoped_atomic_store_n ||
7852 Op == AtomicExpr::AO__scoped_atomic_exchange_n ||
7853 Op == AtomicExpr::AO__scoped_atomic_compare_exchange_n;
7854 // Bit mask for extra allowed value types other than integers for atomic
7855 // arithmetic operations. Add/sub allow pointer and floating point. Min/max
7856 // allow floating point.
7857 enum ArithOpExtraValueType {
7858 AOEVT_None = 0,
7859 AOEVT_Pointer = 1,
7860 AOEVT_FP = 2,
7861 };
7862 unsigned ArithAllows = AOEVT_None;
7863
7864 switch (Op) {
7865 case AtomicExpr::AO__c11_atomic_init:
7866 case AtomicExpr::AO__opencl_atomic_init:
7867 Form = Init;
7868 break;
7869
7870 case AtomicExpr::AO__c11_atomic_load:
7871 case AtomicExpr::AO__opencl_atomic_load:
7872 case AtomicExpr::AO__hip_atomic_load:
7873 case AtomicExpr::AO__atomic_load_n:
7874 case AtomicExpr::AO__scoped_atomic_load_n:
7875 Form = Load;
7876 break;
7877
7878 case AtomicExpr::AO__atomic_load:
7879 case AtomicExpr::AO__scoped_atomic_load:
7880 Form = LoadCopy;
7881 break;
7882
7883 case AtomicExpr::AO__c11_atomic_store:
7884 case AtomicExpr::AO__opencl_atomic_store:
7885 case AtomicExpr::AO__hip_atomic_store:
7886 case AtomicExpr::AO__atomic_store:
7887 case AtomicExpr::AO__atomic_store_n:
7888 case AtomicExpr::AO__scoped_atomic_store:
7889 case AtomicExpr::AO__scoped_atomic_store_n:
7890 Form = Copy;
7891 break;
7892 case AtomicExpr::AO__atomic_fetch_add:
7893 case AtomicExpr::AO__atomic_fetch_sub:
7894 case AtomicExpr::AO__atomic_add_fetch:
7895 case AtomicExpr::AO__atomic_sub_fetch:
7896 case AtomicExpr::AO__scoped_atomic_fetch_add:
7897 case AtomicExpr::AO__scoped_atomic_fetch_sub:
7898 case AtomicExpr::AO__scoped_atomic_add_fetch:
7899 case AtomicExpr::AO__scoped_atomic_sub_fetch:
7900 case AtomicExpr::AO__c11_atomic_fetch_add:
7901 case AtomicExpr::AO__c11_atomic_fetch_sub:
7902 case AtomicExpr::AO__opencl_atomic_fetch_add:
7903 case AtomicExpr::AO__opencl_atomic_fetch_sub:
7904 case AtomicExpr::AO__hip_atomic_fetch_add:
7905 case AtomicExpr::AO__hip_atomic_fetch_sub:
7906 ArithAllows = AOEVT_Pointer | AOEVT_FP;
7907 Form = Arithmetic;
7908 break;
7909 case AtomicExpr::AO__atomic_fetch_max:
7910 case AtomicExpr::AO__atomic_fetch_min:
7911 case AtomicExpr::AO__atomic_max_fetch:
7912 case AtomicExpr::AO__atomic_min_fetch:
7913 case AtomicExpr::AO__scoped_atomic_fetch_max:
7914 case AtomicExpr::AO__scoped_atomic_fetch_min:
7915 case AtomicExpr::AO__scoped_atomic_max_fetch:
7916 case AtomicExpr::AO__scoped_atomic_min_fetch:
7917 case AtomicExpr::AO__c11_atomic_fetch_max:
7918 case AtomicExpr::AO__c11_atomic_fetch_min:
7919 case AtomicExpr::AO__opencl_atomic_fetch_max:
7920 case AtomicExpr::AO__opencl_atomic_fetch_min:
7921 case AtomicExpr::AO__hip_atomic_fetch_max:
7922 case AtomicExpr::AO__hip_atomic_fetch_min:
7923 ArithAllows = AOEVT_FP;
7924 Form = Arithmetic;
7925 break;
7926 case AtomicExpr::AO__c11_atomic_fetch_and:
7927 case AtomicExpr::AO__c11_atomic_fetch_or:
7928 case AtomicExpr::AO__c11_atomic_fetch_xor:
7929 case AtomicExpr::AO__hip_atomic_fetch_and:
7930 case AtomicExpr::AO__hip_atomic_fetch_or:
7931 case AtomicExpr::AO__hip_atomic_fetch_xor:
7932 case AtomicExpr::AO__c11_atomic_fetch_nand:
7933 case AtomicExpr::AO__opencl_atomic_fetch_and:
7934 case AtomicExpr::AO__opencl_atomic_fetch_or:
7935 case AtomicExpr::AO__opencl_atomic_fetch_xor:
7936 case AtomicExpr::AO__atomic_fetch_and:
7937 case AtomicExpr::AO__atomic_fetch_or:
7938 case AtomicExpr::AO__atomic_fetch_xor:
7939 case AtomicExpr::AO__atomic_fetch_nand:
7940 case AtomicExpr::AO__atomic_and_fetch:
7941 case AtomicExpr::AO__atomic_or_fetch:
7942 case AtomicExpr::AO__atomic_xor_fetch:
7943 case AtomicExpr::AO__atomic_nand_fetch:
7944 case AtomicExpr::AO__scoped_atomic_fetch_and:
7945 case AtomicExpr::AO__scoped_atomic_fetch_or:
7946 case AtomicExpr::AO__scoped_atomic_fetch_xor:
7947 case AtomicExpr::AO__scoped_atomic_fetch_nand:
7948 case AtomicExpr::AO__scoped_atomic_and_fetch:
7949 case AtomicExpr::AO__scoped_atomic_or_fetch:
7950 case AtomicExpr::AO__scoped_atomic_xor_fetch:
7951 case AtomicExpr::AO__scoped_atomic_nand_fetch:
7952 Form = Arithmetic;
7953 break;
7954
7955 case AtomicExpr::AO__c11_atomic_exchange:
7956 case AtomicExpr::AO__hip_atomic_exchange:
7957 case AtomicExpr::AO__opencl_atomic_exchange:
7958 case AtomicExpr::AO__atomic_exchange_n:
7959 case AtomicExpr::AO__scoped_atomic_exchange_n:
7960 Form = Xchg;
7961 break;
7962
7963 case AtomicExpr::AO__atomic_exchange:
7964 case AtomicExpr::AO__scoped_atomic_exchange:
7965 Form = GNUXchg;
7966 break;
7967
7968 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
7969 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
7970 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
7971 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
7972 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
7973 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
7974 Form = C11CmpXchg;
7975 break;
7976
7977 case AtomicExpr::AO__atomic_compare_exchange:
7978 case AtomicExpr::AO__atomic_compare_exchange_n:
7979 case AtomicExpr::AO__scoped_atomic_compare_exchange:
7980 case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
7981 Form = GNUCmpXchg;
7982 break;
7983 }
7984
7985 unsigned AdjustedNumArgs = NumArgs[Form];
7986 if ((IsOpenCL || IsHIP || IsScoped) &&
7987 Op != AtomicExpr::AO__opencl_atomic_init)
7988 ++AdjustedNumArgs;
7989 // Check we have the right number of arguments.
7990 if (Args.size() < AdjustedNumArgs) {
7991 Diag(CallRange.getEnd(), diag::err_typecheck_call_too_few_args)
7992 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size())
7993 << /*is non object*/ 0 << ExprRange;
7994 return ExprError();
7995 } else if (Args.size() > AdjustedNumArgs) {
7996 Diag(Args[AdjustedNumArgs]->getBeginLoc(),
7997 diag::err_typecheck_call_too_many_args)
7998 << 0 << AdjustedNumArgs << static_cast<unsigned>(Args.size())
7999 << /*is non object*/ 0 << ExprRange;
8000 return ExprError();
8001 }
8002
8003 // Inspect the first argument of the atomic operation.
8004 Expr *Ptr = Args[0];
8005 ExprResult ConvertedPtr = DefaultFunctionArrayLvalueConversion(E: Ptr);
8006 if (ConvertedPtr.isInvalid())
8007 return ExprError();
8008
8009 Ptr = ConvertedPtr.get();
8010 const PointerType *pointerType = Ptr->getType()->getAs<PointerType>();
8011 if (!pointerType) {
8012 Diag(ExprRange.getBegin(), diag::err_atomic_builtin_must_be_pointer)
8013 << Ptr->getType() << Ptr->getSourceRange();
8014 return ExprError();
8015 }
8016
8017 // For a __c11 builtin, this should be a pointer to an _Atomic type.
8018 QualType AtomTy = pointerType->getPointeeType(); // 'A'
8019 QualType ValType = AtomTy; // 'C'
8020 if (IsC11) {
8021 if (!AtomTy->isAtomicType()) {
8022 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic)
8023 << Ptr->getType() << Ptr->getSourceRange();
8024 return ExprError();
8025 }
8026 if ((Form != Load && Form != LoadCopy && AtomTy.isConstQualified()) ||
8027 AtomTy.getAddressSpace() == LangAS::opencl_constant) {
8028 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_atomic)
8029 << (AtomTy.isConstQualified() ? 0 : 1) << Ptr->getType()
8030 << Ptr->getSourceRange();
8031 return ExprError();
8032 }
8033 ValType = AtomTy->castAs<AtomicType>()->getValueType();
8034 } else if (Form != Load && Form != LoadCopy) {
8035 if (ValType.isConstQualified()) {
8036 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_non_const_pointer)
8037 << Ptr->getType() << Ptr->getSourceRange();
8038 return ExprError();
8039 }
8040 }
8041
8042 // For an arithmetic operation, the implied arithmetic must be well-formed.
8043 if (Form == Arithmetic) {
8044 // GCC does not enforce these rules for GNU atomics, but we do to help catch
8045 // trivial type errors.
8046 auto IsAllowedValueType = [&](QualType ValType,
8047 unsigned AllowedType) -> bool {
8048 if (ValType->isIntegerType())
8049 return true;
8050 if (ValType->isPointerType())
8051 return AllowedType & AOEVT_Pointer;
8052 if (!(ValType->isFloatingType() && (AllowedType & AOEVT_FP)))
8053 return false;
8054 // LLVM Parser does not allow atomicrmw with x86_fp80 type.
8055 if (ValType->isSpecificBuiltinType(K: BuiltinType::LongDouble) &&
8056 &Context.getTargetInfo().getLongDoubleFormat() ==
8057 &llvm::APFloat::x87DoubleExtended())
8058 return false;
8059 return true;
8060 };
8061 if (!IsAllowedValueType(ValType, ArithAllows)) {
8062 auto DID = ArithAllows & AOEVT_FP
8063 ? (ArithAllows & AOEVT_Pointer
8064 ? diag::err_atomic_op_needs_atomic_int_ptr_or_fp
8065 : diag::err_atomic_op_needs_atomic_int_or_fp)
8066 : diag::err_atomic_op_needs_atomic_int;
8067 Diag(ExprRange.getBegin(), DID)
8068 << IsC11 << Ptr->getType() << Ptr->getSourceRange();
8069 return ExprError();
8070 }
8071 if (IsC11 && ValType->isPointerType() &&
8072 RequireCompleteType(Ptr->getBeginLoc(), ValType->getPointeeType(),
8073 diag::err_incomplete_type)) {
8074 return ExprError();
8075 }
8076 } else if (IsN && !ValType->isIntegerType() && !ValType->isPointerType()) {
8077 // For __atomic_*_n operations, the value type must be a scalar integral or
8078 // pointer type which is 1, 2, 4, 8 or 16 bytes in length.
8079 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_or_ptr)
8080 << IsC11 << Ptr->getType() << Ptr->getSourceRange();
8081 return ExprError();
8082 }
8083
8084 if (!IsC11 && !AtomTy.isTriviallyCopyableType(Context) &&
8085 !AtomTy->isScalarType()) {
8086 // For GNU atomics, require a trivially-copyable type. This is not part of
8087 // the GNU atomics specification but we enforce it for consistency with
8088 // other atomics which generally all require a trivially-copyable type. This
8089 // is because atomics just copy bits.
8090 Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_trivial_copy)
8091 << Ptr->getType() << Ptr->getSourceRange();
8092 return ExprError();
8093 }
8094
8095 switch (ValType.getObjCLifetime()) {
8096 case Qualifiers::OCL_None:
8097 case Qualifiers::OCL_ExplicitNone:
8098 // okay
8099 break;
8100
8101 case Qualifiers::OCL_Weak:
8102 case Qualifiers::OCL_Strong:
8103 case Qualifiers::OCL_Autoreleasing:
8104 // FIXME: Can this happen? By this point, ValType should be known
8105 // to be trivially copyable.
8106 Diag(ExprRange.getBegin(), diag::err_arc_atomic_ownership)
8107 << ValType << Ptr->getSourceRange();
8108 return ExprError();
8109 }
8110
8111 // All atomic operations have an overload which takes a pointer to a volatile
8112 // 'A'. We shouldn't let the volatile-ness of the pointee-type inject itself
8113 // into the result or the other operands. Similarly atomic_load takes a
8114 // pointer to a const 'A'.
8115 ValType.removeLocalVolatile();
8116 ValType.removeLocalConst();
8117 QualType ResultType = ValType;
8118 if (Form == Copy || Form == LoadCopy || Form == GNUXchg ||
8119 Form == Init)
8120 ResultType = Context.VoidTy;
8121 else if (Form == C11CmpXchg || Form == GNUCmpXchg)
8122 ResultType = Context.BoolTy;
8123
8124 // The type of a parameter passed 'by value'. In the GNU atomics, such
8125 // arguments are actually passed as pointers.
8126 QualType ByValType = ValType; // 'CP'
8127 bool IsPassedByAddress = false;
8128 if (!IsC11 && !IsHIP && !IsN) {
8129 ByValType = Ptr->getType();
8130 IsPassedByAddress = true;
8131 }
8132
8133 SmallVector<Expr *, 5> APIOrderedArgs;
8134 if (ArgOrder == Sema::AtomicArgumentOrder::AST) {
8135 APIOrderedArgs.push_back(Elt: Args[0]);
8136 switch (Form) {
8137 case Init:
8138 case Load:
8139 APIOrderedArgs.push_back(Elt: Args[1]); // Val1/Order
8140 break;
8141 case LoadCopy:
8142 case Copy:
8143 case Arithmetic:
8144 case Xchg:
8145 APIOrderedArgs.push_back(Elt: Args[2]); // Val1
8146 APIOrderedArgs.push_back(Elt: Args[1]); // Order
8147 break;
8148 case GNUXchg:
8149 APIOrderedArgs.push_back(Elt: Args[2]); // Val1
8150 APIOrderedArgs.push_back(Elt: Args[3]); // Val2
8151 APIOrderedArgs.push_back(Elt: Args[1]); // Order
8152 break;
8153 case C11CmpXchg:
8154 APIOrderedArgs.push_back(Elt: Args[2]); // Val1
8155 APIOrderedArgs.push_back(Elt: Args[4]); // Val2
8156 APIOrderedArgs.push_back(Elt: Args[1]); // Order
8157 APIOrderedArgs.push_back(Elt: Args[3]); // OrderFail
8158 break;
8159 case GNUCmpXchg:
8160 APIOrderedArgs.push_back(Elt: Args[2]); // Val1
8161 APIOrderedArgs.push_back(Elt: Args[4]); // Val2
8162 APIOrderedArgs.push_back(Elt: Args[5]); // Weak
8163 APIOrderedArgs.push_back(Elt: Args[1]); // Order
8164 APIOrderedArgs.push_back(Elt: Args[3]); // OrderFail
8165 break;
8166 }
8167 } else
8168 APIOrderedArgs.append(in_start: Args.begin(), in_end: Args.end());
8169
8170 // The first argument's non-CV pointer type is used to deduce the type of
8171 // subsequent arguments, except for:
8172 // - weak flag (always converted to bool)
8173 // - memory order (always converted to int)
8174 // - scope (always converted to int)
8175 for (unsigned i = 0; i != APIOrderedArgs.size(); ++i) {
8176 QualType Ty;
8177 if (i < NumVals[Form] + 1) {
8178 switch (i) {
8179 case 0:
8180 // The first argument is always a pointer. It has a fixed type.
8181 // It is always dereferenced, a nullptr is undefined.
8182 CheckNonNullArgument(S&: *this, ArgExpr: APIOrderedArgs[i], CallSiteLoc: ExprRange.getBegin());
8183 // Nothing else to do: we already know all we want about this pointer.
8184 continue;
8185 case 1:
8186 // The second argument is the non-atomic operand. For arithmetic, this
8187 // is always passed by value, and for a compare_exchange it is always
8188 // passed by address. For the rest, GNU uses by-address and C11 uses
8189 // by-value.
8190 assert(Form != Load);
8191 if (Form == Arithmetic && ValType->isPointerType())
8192 Ty = Context.getPointerDiffType();
8193 else if (Form == Init || Form == Arithmetic)
8194 Ty = ValType;
8195 else if (Form == Copy || Form == Xchg) {
8196 if (IsPassedByAddress) {
8197 // The value pointer is always dereferenced, a nullptr is undefined.
8198 CheckNonNullArgument(S&: *this, ArgExpr: APIOrderedArgs[i],
8199 CallSiteLoc: ExprRange.getBegin());
8200 }
8201 Ty = ByValType;
8202 } else {
8203 Expr *ValArg = APIOrderedArgs[i];
8204 // The value pointer is always dereferenced, a nullptr is undefined.
8205 CheckNonNullArgument(S&: *this, ArgExpr: ValArg, CallSiteLoc: ExprRange.getBegin());
8206 LangAS AS = LangAS::Default;
8207 // Keep address space of non-atomic pointer type.
8208 if (const PointerType *PtrTy =
8209 ValArg->getType()->getAs<PointerType>()) {
8210 AS = PtrTy->getPointeeType().getAddressSpace();
8211 }
8212 Ty = Context.getPointerType(
8213 T: Context.getAddrSpaceQualType(T: ValType.getUnqualifiedType(), AddressSpace: AS));
8214 }
8215 break;
8216 case 2:
8217 // The third argument to compare_exchange / GNU exchange is the desired
8218 // value, either by-value (for the C11 and *_n variant) or as a pointer.
8219 if (IsPassedByAddress)
8220 CheckNonNullArgument(S&: *this, ArgExpr: APIOrderedArgs[i], CallSiteLoc: ExprRange.getBegin());
8221 Ty = ByValType;
8222 break;
8223 case 3:
8224 // The fourth argument to GNU compare_exchange is a 'weak' flag.
8225 Ty = Context.BoolTy;
8226 break;
8227 }
8228 } else {
8229 // The order(s) and scope are always converted to int.
8230 Ty = Context.IntTy;
8231 }
8232
8233 InitializedEntity Entity =
8234 InitializedEntity::InitializeParameter(Context, Type: Ty, Consumed: false);
8235 ExprResult Arg = APIOrderedArgs[i];
8236 Arg = PerformCopyInitialization(Entity, EqualLoc: SourceLocation(), Init: Arg);
8237 if (Arg.isInvalid())
8238 return true;
8239 APIOrderedArgs[i] = Arg.get();
8240 }
8241
8242 // Permute the arguments into a 'consistent' order.
8243 SmallVector<Expr*, 5> SubExprs;
8244 SubExprs.push_back(Elt: Ptr);
8245 switch (Form) {
8246 case Init:
8247 // Note, AtomicExpr::getVal1() has a special case for this atomic.
8248 SubExprs.push_back(Elt: APIOrderedArgs[1]); // Val1
8249 break;
8250 case Load:
8251 SubExprs.push_back(Elt: APIOrderedArgs[1]); // Order
8252 break;
8253 case LoadCopy:
8254 case Copy:
8255 case Arithmetic:
8256 case Xchg:
8257 SubExprs.push_back(Elt: APIOrderedArgs[2]); // Order
8258 SubExprs.push_back(Elt: APIOrderedArgs[1]); // Val1
8259 break;
8260 case GNUXchg:
8261 // Note, AtomicExpr::getVal2() has a special case for this atomic.
8262 SubExprs.push_back(Elt: APIOrderedArgs[3]); // Order
8263 SubExprs.push_back(Elt: APIOrderedArgs[1]); // Val1
8264 SubExprs.push_back(Elt: APIOrderedArgs[2]); // Val2
8265 break;
8266 case C11CmpXchg:
8267 SubExprs.push_back(Elt: APIOrderedArgs[3]); // Order
8268 SubExprs.push_back(Elt: APIOrderedArgs[1]); // Val1
8269 SubExprs.push_back(Elt: APIOrderedArgs[4]); // OrderFail
8270 SubExprs.push_back(Elt: APIOrderedArgs[2]); // Val2
8271 break;
8272 case GNUCmpXchg:
8273 SubExprs.push_back(Elt: APIOrderedArgs[4]); // Order
8274 SubExprs.push_back(Elt: APIOrderedArgs[1]); // Val1
8275 SubExprs.push_back(Elt: APIOrderedArgs[5]); // OrderFail
8276 SubExprs.push_back(Elt: APIOrderedArgs[2]); // Val2
8277 SubExprs.push_back(Elt: APIOrderedArgs[3]); // Weak
8278 break;
8279 }
8280
8281 // If the memory orders are constants, check they are valid.
8282 if (SubExprs.size() >= 2 && Form != Init) {
8283 std::optional<llvm::APSInt> Success =
8284 SubExprs[1]->getIntegerConstantExpr(Ctx: Context);
8285 if (Success && !isValidOrderingForOp(Ordering: Success->getSExtValue(), Op)) {
8286 Diag(SubExprs[1]->getBeginLoc(),
8287 diag::warn_atomic_op_has_invalid_memory_order)
8288 << /*success=*/(Form == C11CmpXchg || Form == GNUCmpXchg)
8289 << SubExprs[1]->getSourceRange();
8290 }
8291 if (SubExprs.size() >= 5) {
8292 if (std::optional<llvm::APSInt> Failure =
8293 SubExprs[3]->getIntegerConstantExpr(Ctx: Context)) {
8294 if (!llvm::is_contained(
8295 Set: {llvm::AtomicOrderingCABI::relaxed,
8296 llvm::AtomicOrderingCABI::consume,
8297 llvm::AtomicOrderingCABI::acquire,
8298 llvm::AtomicOrderingCABI::seq_cst},
8299 Element: (llvm::AtomicOrderingCABI)Failure->getSExtValue())) {
8300 Diag(SubExprs[3]->getBeginLoc(),
8301 diag::warn_atomic_op_has_invalid_memory_order)
8302 << /*failure=*/2 << SubExprs[3]->getSourceRange();
8303 }
8304 }
8305 }
8306 }
8307
8308 if (auto ScopeModel = AtomicExpr::getScopeModel(Op)) {
8309 auto *Scope = Args[Args.size() - 1];
8310 if (std::optional<llvm::APSInt> Result =
8311 Scope->getIntegerConstantExpr(Ctx: Context)) {
8312 if (!ScopeModel->isValid(Result->getZExtValue()))
8313 Diag(Scope->getBeginLoc(), diag::err_atomic_op_has_invalid_synch_scope)
8314 << Scope->getSourceRange();
8315 }
8316 SubExprs.push_back(Elt: Scope);
8317 }
8318
8319 AtomicExpr *AE = new (Context)
8320 AtomicExpr(ExprRange.getBegin(), SubExprs, ResultType, Op, RParenLoc);
8321
8322 if ((Op == AtomicExpr::AO__c11_atomic_load ||
8323 Op == AtomicExpr::AO__c11_atomic_store ||
8324 Op == AtomicExpr::AO__opencl_atomic_load ||
8325 Op == AtomicExpr::AO__hip_atomic_load ||
8326 Op == AtomicExpr::AO__opencl_atomic_store ||
8327 Op == AtomicExpr::AO__hip_atomic_store) &&
8328 Context.AtomicUsesUnsupportedLibcall(AE))
8329 Diag(AE->getBeginLoc(), diag::err_atomic_load_store_uses_lib)
8330 << ((Op == AtomicExpr::AO__c11_atomic_load ||
8331 Op == AtomicExpr::AO__opencl_atomic_load ||
8332 Op == AtomicExpr::AO__hip_atomic_load)
8333 ? 0
8334 : 1);
8335
8336 if (ValType->isBitIntType()) {
8337 Diag(Ptr->getExprLoc(), diag::err_atomic_builtin_bit_int_prohibit);
8338 return ExprError();
8339 }
8340
8341 return AE;
8342}
8343
8344/// checkBuiltinArgument - Given a call to a builtin function, perform
8345/// normal type-checking on the given argument, updating the call in
8346/// place. This is useful when a builtin function requires custom
8347/// type-checking for some of its arguments but not necessarily all of
8348/// them.
8349///
8350/// Returns true on error.
8351static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) {
8352 FunctionDecl *Fn = E->getDirectCallee();
8353 assert(Fn && "builtin call without direct callee!");
8354
8355 ParmVarDecl *Param = Fn->getParamDecl(i: ArgIndex);
8356 InitializedEntity Entity =
8357 InitializedEntity::InitializeParameter(Context&: S.Context, Parm: Param);
8358
8359 ExprResult Arg = E->getArg(Arg: ArgIndex);
8360 Arg = S.PerformCopyInitialization(Entity, EqualLoc: SourceLocation(), Init: Arg);
8361 if (Arg.isInvalid())
8362 return true;
8363
8364 E->setArg(Arg: ArgIndex, ArgExpr: Arg.get());
8365 return false;
8366}
8367
8368bool Sema::BuiltinWasmRefNullExtern(CallExpr *TheCall) {
8369 if (TheCall->getNumArgs() != 0)
8370 return true;
8371
8372 TheCall->setType(Context.getWebAssemblyExternrefType());
8373
8374 return false;
8375}
8376
8377bool Sema::BuiltinWasmRefNullFunc(CallExpr *TheCall) {
8378 if (TheCall->getNumArgs() != 0) {
8379 Diag(TheCall->getBeginLoc(), diag::err_typecheck_call_too_many_args)
8380 << 0 /*function call*/ << /*expected*/ 0 << TheCall->getNumArgs()
8381 << /*is non object*/ 0;
8382 return true;
8383 }
8384
8385 // This custom type checking code ensures that the nodes are as expected
8386 // in order to later on generate the necessary builtin.
8387 QualType Pointee = Context.getFunctionType(ResultTy: Context.VoidTy, Args: {}, EPI: {});
8388 QualType Type = Context.getPointerType(T: Pointee);
8389 Pointee = Context.getAddrSpaceQualType(T: Pointee, AddressSpace: LangAS::wasm_funcref);
8390 Type = Context.getAttributedType(attr::WebAssemblyFuncref, Type,
8391 Context.getPointerType(Pointee));
8392 TheCall->setType(Type);
8393
8394 return false;
8395}
8396
8397/// We have a call to a function like __sync_fetch_and_add, which is an
8398/// overloaded function based on the pointer type of its first argument.
8399/// The main BuildCallExpr routines have already promoted the types of
8400/// arguments because all of these calls are prototyped as void(...).
8401///
8402/// This function goes through and does final semantic checking for these
8403/// builtins, as well as generating any warnings.
8404ExprResult
8405Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
8406 CallExpr *TheCall = static_cast<CallExpr *>(TheCallResult.get());
8407 Expr *Callee = TheCall->getCallee();
8408 DeclRefExpr *DRE = cast<DeclRefExpr>(Val: Callee->IgnoreParenCasts());
8409 FunctionDecl *FDecl = cast<FunctionDecl>(Val: DRE->getDecl());
8410
8411 // Ensure that we have at least one argument to do type inference from.
8412 if (TheCall->getNumArgs() < 1) {
8413 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least)
8414 << 0 << 1 << TheCall->getNumArgs() << /*is non object*/ 0
8415 << Callee->getSourceRange();
8416 return ExprError();
8417 }
8418
8419 // Inspect the first argument of the atomic builtin. This should always be
8420 // a pointer type, whose element is an integral scalar or pointer type.
8421 // Because it is a pointer type, we don't have to worry about any implicit
8422 // casts here.
8423 // FIXME: We don't allow floating point scalars as input.
8424 Expr *FirstArg = TheCall->getArg(Arg: 0);
8425 ExprResult FirstArgResult = DefaultFunctionArrayLvalueConversion(E: FirstArg);
8426 if (FirstArgResult.isInvalid())
8427 return ExprError();
8428 FirstArg = FirstArgResult.get();
8429 TheCall->setArg(Arg: 0, ArgExpr: FirstArg);
8430
8431 const PointerType *pointerType = FirstArg->getType()->getAs<PointerType>();
8432 if (!pointerType) {
8433 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer)
8434 << FirstArg->getType() << FirstArg->getSourceRange();
8435 return ExprError();
8436 }
8437
8438 QualType ValType = pointerType->getPointeeType();
8439 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
8440 !ValType->isBlockPointerType()) {
8441 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_must_be_pointer_intptr)
8442 << FirstArg->getType() << FirstArg->getSourceRange();
8443 return ExprError();
8444 }
8445
8446 if (ValType.isConstQualified()) {
8447 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_cannot_be_const)
8448 << FirstArg->getType() << FirstArg->getSourceRange();
8449 return ExprError();
8450 }
8451
8452 switch (ValType.getObjCLifetime()) {
8453 case Qualifiers::OCL_None:
8454 case Qualifiers::OCL_ExplicitNone:
8455 // okay
8456 break;
8457
8458 case Qualifiers::OCL_Weak:
8459 case Qualifiers::OCL_Strong:
8460 case Qualifiers::OCL_Autoreleasing:
8461 Diag(DRE->getBeginLoc(), diag::err_arc_atomic_ownership)
8462 << ValType << FirstArg->getSourceRange();
8463 return ExprError();
8464 }
8465
8466 // Strip any qualifiers off ValType.
8467 ValType = ValType.getUnqualifiedType();
8468
8469 // The majority of builtins return a value, but a few have special return
8470 // types, so allow them to override appropriately below.
8471 QualType ResultType = ValType;
8472
8473 // We need to figure out which concrete builtin this maps onto. For example,
8474 // __sync_fetch_and_add with a 2 byte object turns into
8475 // __sync_fetch_and_add_2.
8476#define BUILTIN_ROW(x) \
8477 { Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \
8478 Builtin::BI##x##_8, Builtin::BI##x##_16 }
8479
8480 static const unsigned BuiltinIndices[][5] = {
8481 BUILTIN_ROW(__sync_fetch_and_add),
8482 BUILTIN_ROW(__sync_fetch_and_sub),
8483 BUILTIN_ROW(__sync_fetch_and_or),
8484 BUILTIN_ROW(__sync_fetch_and_and),
8485 BUILTIN_ROW(__sync_fetch_and_xor),
8486 BUILTIN_ROW(__sync_fetch_and_nand),
8487
8488 BUILTIN_ROW(__sync_add_and_fetch),
8489 BUILTIN_ROW(__sync_sub_and_fetch),
8490 BUILTIN_ROW(__sync_and_and_fetch),
8491 BUILTIN_ROW(__sync_or_and_fetch),
8492 BUILTIN_ROW(__sync_xor_and_fetch),
8493 BUILTIN_ROW(__sync_nand_and_fetch),
8494
8495 BUILTIN_ROW(__sync_val_compare_and_swap),
8496 BUILTIN_ROW(__sync_bool_compare_and_swap),
8497 BUILTIN_ROW(__sync_lock_test_and_set),
8498 BUILTIN_ROW(__sync_lock_release),
8499 BUILTIN_ROW(__sync_swap)
8500 };
8501#undef BUILTIN_ROW
8502
8503 // Determine the index of the size.
8504 unsigned SizeIndex;
8505 switch (Context.getTypeSizeInChars(T: ValType).getQuantity()) {
8506 case 1: SizeIndex = 0; break;
8507 case 2: SizeIndex = 1; break;
8508 case 4: SizeIndex = 2; break;
8509 case 8: SizeIndex = 3; break;
8510 case 16: SizeIndex = 4; break;
8511 default:
8512 Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_pointer_size)
8513 << FirstArg->getType() << FirstArg->getSourceRange();
8514 return ExprError();
8515 }
8516
8517 // Each of these builtins has one pointer argument, followed by some number of
8518 // values (0, 1 or 2) followed by a potentially empty varags list of stuff
8519 // that we ignore. Find out which row of BuiltinIndices to read from as well
8520 // as the number of fixed args.
8521 unsigned BuiltinID = FDecl->getBuiltinID();
8522 unsigned BuiltinIndex, NumFixed = 1;
8523 bool WarnAboutSemanticsChange = false;
8524 switch (BuiltinID) {
8525 default: llvm_unreachable("Unknown overloaded atomic builtin!");
8526 case Builtin::BI__sync_fetch_and_add:
8527 case Builtin::BI__sync_fetch_and_add_1:
8528 case Builtin::BI__sync_fetch_and_add_2:
8529 case Builtin::BI__sync_fetch_and_add_4:
8530 case Builtin::BI__sync_fetch_and_add_8:
8531 case Builtin::BI__sync_fetch_and_add_16:
8532 BuiltinIndex = 0;
8533 break;
8534
8535 case Builtin::BI__sync_fetch_and_sub:
8536 case Builtin::BI__sync_fetch_and_sub_1:
8537 case Builtin::BI__sync_fetch_and_sub_2:
8538 case Builtin::BI__sync_fetch_and_sub_4:
8539 case Builtin::BI__sync_fetch_and_sub_8:
8540 case Builtin::BI__sync_fetch_and_sub_16:
8541 BuiltinIndex = 1;
8542 break;
8543
8544 case Builtin::BI__sync_fetch_and_or:
8545 case Builtin::BI__sync_fetch_and_or_1:
8546 case Builtin::BI__sync_fetch_and_or_2:
8547 case Builtin::BI__sync_fetch_and_or_4:
8548 case Builtin::BI__sync_fetch_and_or_8:
8549 case Builtin::BI__sync_fetch_and_or_16:
8550 BuiltinIndex = 2;
8551 break;
8552
8553 case Builtin::BI__sync_fetch_and_and:
8554 case Builtin::BI__sync_fetch_and_and_1:
8555 case Builtin::BI__sync_fetch_and_and_2:
8556 case Builtin::BI__sync_fetch_and_and_4:
8557 case Builtin::BI__sync_fetch_and_and_8:
8558 case Builtin::BI__sync_fetch_and_and_16:
8559 BuiltinIndex = 3;
8560 break;
8561
8562 case Builtin::BI__sync_fetch_and_xor:
8563 case Builtin::BI__sync_fetch_and_xor_1:
8564 case Builtin::BI__sync_fetch_and_xor_2:
8565 case Builtin::BI__sync_fetch_and_xor_4:
8566 case Builtin::BI__sync_fetch_and_xor_8:
8567 case Builtin::BI__sync_fetch_and_xor_16:
8568 BuiltinIndex = 4;
8569 break;
8570
8571 case Builtin::BI__sync_fetch_and_nand:
8572 case Builtin::BI__sync_fetch_and_nand_1:
8573 case Builtin::BI__sync_fetch_and_nand_2:
8574 case Builtin::BI__sync_fetch_and_nand_4:
8575 case Builtin::BI__sync_fetch_and_nand_8:
8576 case Builtin::BI__sync_fetch_and_nand_16:
8577 BuiltinIndex = 5;
8578 WarnAboutSemanticsChange = true;
8579 break;
8580
8581 case Builtin::BI__sync_add_and_fetch:
8582 case Builtin::BI__sync_add_and_fetch_1:
8583 case Builtin::BI__sync_add_and_fetch_2:
8584 case Builtin::BI__sync_add_and_fetch_4:
8585 case Builtin::BI__sync_add_and_fetch_8:
8586 case Builtin::BI__sync_add_and_fetch_16:
8587 BuiltinIndex = 6;
8588 break;
8589
8590 case Builtin::BI__sync_sub_and_fetch:
8591 case Builtin::BI__sync_sub_and_fetch_1:
8592 case Builtin::BI__sync_sub_and_fetch_2:
8593 case Builtin::BI__sync_sub_and_fetch_4:
8594 case Builtin::BI__sync_sub_and_fetch_8:
8595 case Builtin::BI__sync_sub_and_fetch_16:
8596 BuiltinIndex = 7;
8597 break;
8598
8599 case Builtin::BI__sync_and_and_fetch:
8600 case Builtin::BI__sync_and_and_fetch_1:
8601 case Builtin::BI__sync_and_and_fetch_2:
8602 case Builtin::BI__sync_and_and_fetch_4:
8603 case Builtin::BI__sync_and_and_fetch_8:
8604 case Builtin::BI__sync_and_and_fetch_16:
8605 BuiltinIndex = 8;
8606 break;
8607
8608 case Builtin::BI__sync_or_and_fetch:
8609 case Builtin::BI__sync_or_and_fetch_1:
8610 case Builtin::BI__sync_or_and_fetch_2:
8611 case Builtin::BI__sync_or_and_fetch_4:
8612 case Builtin::BI__sync_or_and_fetch_8:
8613 case Builtin::BI__sync_or_and_fetch_16:
8614 BuiltinIndex = 9;
8615 break;
8616
8617 case Builtin::BI__sync_xor_and_fetch:
8618 case Builtin::BI__sync_xor_and_fetch_1:
8619 case Builtin::BI__sync_xor_and_fetch_2:
8620 case Builtin::BI__sync_xor_and_fetch_4:
8621 case Builtin::BI__sync_xor_and_fetch_8:
8622 case Builtin::BI__sync_xor_and_fetch_16:
8623 BuiltinIndex = 10;
8624 break;
8625
8626 case Builtin::BI__sync_nand_and_fetch:
8627 case Builtin::BI__sync_nand_and_fetch_1:
8628 case Builtin::BI__sync_nand_and_fetch_2:
8629 case Builtin::BI__sync_nand_and_fetch_4:
8630 case Builtin::BI__sync_nand_and_fetch_8:
8631 case Builtin::BI__sync_nand_and_fetch_16:
8632 BuiltinIndex = 11;
8633 WarnAboutSemanticsChange = true;
8634 break;
8635
8636 case Builtin::BI__sync_val_compare_and_swap:
8637 case Builtin::BI__sync_val_compare_and_swap_1:
8638 case Builtin::BI__sync_val_compare_and_swap_2:
8639 case Builtin::BI__sync_val_compare_and_swap_4:
8640 case Builtin::BI__sync_val_compare_and_swap_8:
8641 case Builtin::BI__sync_val_compare_and_swap_16:
8642 BuiltinIndex = 12;
8643 NumFixed = 2;
8644 break;
8645
8646 case Builtin::BI__sync_bool_compare_and_swap:
8647 case Builtin::BI__sync_bool_compare_and_swap_1:
8648 case Builtin::BI__sync_bool_compare_and_swap_2:
8649 case Builtin::BI__sync_bool_compare_and_swap_4:
8650 case Builtin::BI__sync_bool_compare_and_swap_8:
8651 case Builtin::BI__sync_bool_compare_and_swap_16:
8652 BuiltinIndex = 13;
8653 NumFixed = 2;
8654 ResultType = Context.BoolTy;
8655 break;
8656
8657 case Builtin::BI__sync_lock_test_and_set:
8658 case Builtin::BI__sync_lock_test_and_set_1:
8659 case Builtin::BI__sync_lock_test_and_set_2:
8660 case Builtin::BI__sync_lock_test_and_set_4:
8661 case Builtin::BI__sync_lock_test_and_set_8:
8662 case Builtin::BI__sync_lock_test_and_set_16:
8663 BuiltinIndex = 14;
8664 break;
8665
8666 case Builtin::BI__sync_lock_release:
8667 case Builtin::BI__sync_lock_release_1:
8668 case Builtin::BI__sync_lock_release_2:
8669 case Builtin::BI__sync_lock_release_4:
8670 case Builtin::BI__sync_lock_release_8:
8671 case Builtin::BI__sync_lock_release_16:
8672 BuiltinIndex = 15;
8673 NumFixed = 0;
8674 ResultType = Context.VoidTy;
8675 break;
8676
8677 case Builtin::BI__sync_swap:
8678 case Builtin::BI__sync_swap_1:
8679 case Builtin::BI__sync_swap_2:
8680 case Builtin::BI__sync_swap_4:
8681 case Builtin::BI__sync_swap_8:
8682 case Builtin::BI__sync_swap_16:
8683 BuiltinIndex = 16;
8684 break;
8685 }
8686
8687 // Now that we know how many fixed arguments we expect, first check that we
8688 // have at least that many.
8689 if (TheCall->getNumArgs() < 1+NumFixed) {
8690 Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least)
8691 << 0 << 1 + NumFixed << TheCall->getNumArgs() << /*is non object*/ 0
8692 << Callee->getSourceRange();
8693 return ExprError();
8694 }
8695
8696 Diag(TheCall->getEndLoc(), diag::warn_atomic_implicit_seq_cst)
8697 << Callee->getSourceRange();
8698
8699 if (WarnAboutSemanticsChange) {
8700 Diag(TheCall->getEndLoc(), diag::warn_sync_fetch_and_nand_semantics_change)
8701 << Callee->getSourceRange();
8702 }
8703
8704 // Get the decl for the concrete builtin from this, we can tell what the
8705 // concrete integer type we should convert to is.
8706 unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex];
8707 StringRef NewBuiltinName = Context.BuiltinInfo.getName(ID: NewBuiltinID);
8708 FunctionDecl *NewBuiltinDecl;
8709 if (NewBuiltinID == BuiltinID)
8710 NewBuiltinDecl = FDecl;
8711 else {
8712 // Perform builtin lookup to avoid redeclaring it.
8713 DeclarationName DN(&Context.Idents.get(Name: NewBuiltinName));
8714 LookupResult Res(*this, DN, DRE->getBeginLoc(), LookupOrdinaryName);
8715 LookupName(R&: Res, S: TUScope, /*AllowBuiltinCreation=*/true);
8716 assert(Res.getFoundDecl());
8717 NewBuiltinDecl = dyn_cast<FunctionDecl>(Val: Res.getFoundDecl());
8718 if (!NewBuiltinDecl)
8719 return ExprError();
8720 }
8721
8722 // The first argument --- the pointer --- has a fixed type; we
8723 // deduce the types of the rest of the arguments accordingly. Walk
8724 // the remaining arguments, converting them to the deduced value type.
8725 for (unsigned i = 0; i != NumFixed; ++i) {
8726 ExprResult Arg = TheCall->getArg(Arg: i+1);
8727
8728 // GCC does an implicit conversion to the pointer or integer ValType. This
8729 // can fail in some cases (1i -> int**), check for this error case now.
8730 // Initialize the argument.
8731 InitializedEntity Entity = InitializedEntity::InitializeParameter(Context,
8732 Type: ValType, /*consume*/ Consumed: false);
8733 Arg = PerformCopyInitialization(Entity, EqualLoc: SourceLocation(), Init: Arg);
8734 if (Arg.isInvalid())
8735 return ExprError();
8736
8737 // Okay, we have something that *can* be converted to the right type. Check
8738 // to see if there is a potentially weird extension going on here. This can
8739 // happen when you do an atomic operation on something like an char* and
8740 // pass in 42. The 42 gets converted to char. This is even more strange
8741 // for things like 45.123 -> char, etc.
8742 // FIXME: Do this check.
8743 TheCall->setArg(Arg: i+1, ArgExpr: Arg.get());
8744 }
8745
8746 // Create a new DeclRefExpr to refer to the new decl.
8747 DeclRefExpr *NewDRE = DeclRefExpr::Create(
8748 Context, DRE->getQualifierLoc(), SourceLocation(), NewBuiltinDecl,
8749 /*enclosing*/ false, DRE->getLocation(), Context.BuiltinFnTy,
8750 DRE->getValueKind(), nullptr, nullptr, DRE->isNonOdrUse());
8751
8752 // Set the callee in the CallExpr.
8753 // FIXME: This loses syntactic information.
8754 QualType CalleePtrTy = Context.getPointerType(NewBuiltinDecl->getType());
8755 ExprResult PromotedCall = ImpCastExprToType(NewDRE, CalleePtrTy,
8756 CK_BuiltinFnToFnPtr);
8757 TheCall->setCallee(PromotedCall.get());
8758
8759 // Change the result type of the call to match the original value type. This
8760 // is arbitrary, but the codegen for these builtins ins design to handle it
8761 // gracefully.
8762 TheCall->setType(ResultType);
8763
8764 // Prohibit problematic uses of bit-precise integer types with atomic
8765 // builtins. The arguments would have already been converted to the first
8766 // argument's type, so only need to check the first argument.
8767 const auto *BitIntValType = ValType->getAs<BitIntType>();
8768 if (BitIntValType && !llvm::isPowerOf2_64(Value: BitIntValType->getNumBits())) {
8769 Diag(FirstArg->getExprLoc(), diag::err_atomic_builtin_ext_int_size);
8770 return ExprError();
8771 }
8772
8773 return TheCallResult;
8774}
8775
8776/// SemaBuiltinNontemporalOverloaded - We have a call to
8777/// __builtin_nontemporal_store or __builtin_nontemporal_load, which is an
8778/// overloaded function based on the pointer type of its last argument.
8779///
8780/// This function goes through and does final semantic checking for these
8781/// builtins.
8782ExprResult Sema::SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult) {
8783 CallExpr *TheCall = (CallExpr *)TheCallResult.get();
8784 DeclRefExpr *DRE =
8785 cast<DeclRefExpr>(Val: TheCall->getCallee()->IgnoreParenCasts());
8786 FunctionDecl *FDecl = cast<FunctionDecl>(Val: DRE->getDecl());
8787 unsigned BuiltinID = FDecl->getBuiltinID();
8788 assert((BuiltinID == Builtin::BI__builtin_nontemporal_store ||
8789 BuiltinID == Builtin::BI__builtin_nontemporal_load) &&
8790 "Unexpected nontemporal load/store builtin!");
8791 bool isStore = BuiltinID == Builtin::BI__builtin_nontemporal_store;
8792 unsigned numArgs = isStore ? 2 : 1;
8793
8794 // Ensure that we have the proper number of arguments.
8795 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: numArgs))
8796 return ExprError();
8797
8798 // Inspect the last argument of the nontemporal builtin. This should always
8799 // be a pointer type, from which we imply the type of the memory access.
8800 // Because it is a pointer type, we don't have to worry about any implicit
8801 // casts here.
8802 Expr *PointerArg = TheCall->getArg(Arg: numArgs - 1);
8803 ExprResult PointerArgResult =
8804 DefaultFunctionArrayLvalueConversion(E: PointerArg);
8805
8806 if (PointerArgResult.isInvalid())
8807 return ExprError();
8808 PointerArg = PointerArgResult.get();
8809 TheCall->setArg(Arg: numArgs - 1, ArgExpr: PointerArg);
8810
8811 const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>();
8812 if (!pointerType) {
8813 Diag(DRE->getBeginLoc(), diag::err_nontemporal_builtin_must_be_pointer)
8814 << PointerArg->getType() << PointerArg->getSourceRange();
8815 return ExprError();
8816 }
8817
8818 QualType ValType = pointerType->getPointeeType();
8819
8820 // Strip any qualifiers off ValType.
8821 ValType = ValType.getUnqualifiedType();
8822 if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
8823 !ValType->isBlockPointerType() && !ValType->isFloatingType() &&
8824 !ValType->isVectorType()) {
8825 Diag(DRE->getBeginLoc(),
8826 diag::err_nontemporal_builtin_must_be_pointer_intfltptr_or_vector)
8827 << PointerArg->getType() << PointerArg->getSourceRange();
8828 return ExprError();
8829 }
8830
8831 if (!isStore) {
8832 TheCall->setType(ValType);
8833 return TheCallResult;
8834 }
8835
8836 ExprResult ValArg = TheCall->getArg(Arg: 0);
8837 InitializedEntity Entity = InitializedEntity::InitializeParameter(
8838 Context, Type: ValType, /*consume*/ Consumed: false);
8839 ValArg = PerformCopyInitialization(Entity, EqualLoc: SourceLocation(), Init: ValArg);
8840 if (ValArg.isInvalid())
8841 return ExprError();
8842
8843 TheCall->setArg(Arg: 0, ArgExpr: ValArg.get());
8844 TheCall->setType(Context.VoidTy);
8845 return TheCallResult;
8846}
8847
8848/// CheckObjCString - Checks that the argument to the builtin
8849/// CFString constructor is correct
8850/// Note: It might also make sense to do the UTF-16 conversion here (would
8851/// simplify the backend).
8852bool Sema::CheckObjCString(Expr *Arg) {
8853 Arg = Arg->IgnoreParenCasts();
8854 StringLiteral *Literal = dyn_cast<StringLiteral>(Val: Arg);
8855
8856 if (!Literal || !Literal->isOrdinary()) {
8857 Diag(Arg->getBeginLoc(), diag::err_cfstring_literal_not_string_constant)
8858 << Arg->getSourceRange();
8859 return true;
8860 }
8861
8862 if (Literal->containsNonAsciiOrNull()) {
8863 StringRef String = Literal->getString();
8864 unsigned NumBytes = String.size();
8865 SmallVector<llvm::UTF16, 128> ToBuf(NumBytes);
8866 const llvm::UTF8 *FromPtr = (const llvm::UTF8 *)String.data();
8867 llvm::UTF16 *ToPtr = &ToBuf[0];
8868
8869 llvm::ConversionResult Result =
8870 llvm::ConvertUTF8toUTF16(sourceStart: &FromPtr, sourceEnd: FromPtr + NumBytes, targetStart: &ToPtr,
8871 targetEnd: ToPtr + NumBytes, flags: llvm::strictConversion);
8872 // Check for conversion failure.
8873 if (Result != llvm::conversionOK)
8874 Diag(Arg->getBeginLoc(), diag::warn_cfstring_truncated)
8875 << Arg->getSourceRange();
8876 }
8877 return false;
8878}
8879
8880/// CheckObjCString - Checks that the format string argument to the os_log()
8881/// and os_trace() functions is correct, and converts it to const char *.
8882ExprResult Sema::CheckOSLogFormatStringArg(Expr *Arg) {
8883 Arg = Arg->IgnoreParenCasts();
8884 auto *Literal = dyn_cast<StringLiteral>(Val: Arg);
8885 if (!Literal) {
8886 if (auto *ObjcLiteral = dyn_cast<ObjCStringLiteral>(Val: Arg)) {
8887 Literal = ObjcLiteral->getString();
8888 }
8889 }
8890
8891 if (!Literal || (!Literal->isOrdinary() && !Literal->isUTF8())) {
8892 return ExprError(
8893 Diag(Arg->getBeginLoc(), diag::err_os_log_format_not_string_constant)
8894 << Arg->getSourceRange());
8895 }
8896
8897 ExprResult Result(Literal);
8898 QualType ResultTy = Context.getPointerType(Context.CharTy.withConst());
8899 InitializedEntity Entity =
8900 InitializedEntity::InitializeParameter(Context, Type: ResultTy, Consumed: false);
8901 Result = PerformCopyInitialization(Entity, EqualLoc: SourceLocation(), Init: Result);
8902 return Result;
8903}
8904
8905/// Check that the user is calling the appropriate va_start builtin for the
8906/// target and calling convention.
8907static bool checkVAStartABI(Sema &S, unsigned BuiltinID, Expr *Fn) {
8908 const llvm::Triple &TT = S.Context.getTargetInfo().getTriple();
8909 bool IsX64 = TT.getArch() == llvm::Triple::x86_64;
8910 bool IsAArch64 = (TT.getArch() == llvm::Triple::aarch64 ||
8911 TT.getArch() == llvm::Triple::aarch64_32);
8912 bool IsWindows = TT.isOSWindows();
8913 bool IsMSVAStart = BuiltinID == Builtin::BI__builtin_ms_va_start;
8914 if (IsX64 || IsAArch64) {
8915 CallingConv CC = CC_C;
8916 if (const FunctionDecl *FD = S.getCurFunctionDecl())
8917 CC = FD->getType()->castAs<FunctionType>()->getCallConv();
8918 if (IsMSVAStart) {
8919 // Don't allow this in System V ABI functions.
8920 if (CC == CC_X86_64SysV || (!IsWindows && CC != CC_Win64))
8921 return S.Diag(Fn->getBeginLoc(),
8922 diag::err_ms_va_start_used_in_sysv_function);
8923 } else {
8924 // On x86-64/AArch64 Unix, don't allow this in Win64 ABI functions.
8925 // On x64 Windows, don't allow this in System V ABI functions.
8926 // (Yes, that means there's no corresponding way to support variadic
8927 // System V ABI functions on Windows.)
8928 if ((IsWindows && CC == CC_X86_64SysV) ||
8929 (!IsWindows && CC == CC_Win64))
8930 return S.Diag(Fn->getBeginLoc(),
8931 diag::err_va_start_used_in_wrong_abi_function)
8932 << !IsWindows;
8933 }
8934 return false;
8935 }
8936
8937 if (IsMSVAStart)
8938 return S.Diag(Fn->getBeginLoc(), diag::err_builtin_x64_aarch64_only);
8939 return false;
8940}
8941
8942static bool checkVAStartIsInVariadicFunction(Sema &S, Expr *Fn,
8943 ParmVarDecl **LastParam = nullptr) {
8944 // Determine whether the current function, block, or obj-c method is variadic
8945 // and get its parameter list.
8946 bool IsVariadic = false;
8947 ArrayRef<ParmVarDecl *> Params;
8948 DeclContext *Caller = S.CurContext;
8949 if (auto *Block = dyn_cast<BlockDecl>(Val: Caller)) {
8950 IsVariadic = Block->isVariadic();
8951 Params = Block->parameters();
8952 } else if (auto *FD = dyn_cast<FunctionDecl>(Val: Caller)) {
8953 IsVariadic = FD->isVariadic();
8954 Params = FD->parameters();
8955 } else if (auto *MD = dyn_cast<ObjCMethodDecl>(Val: Caller)) {
8956 IsVariadic = MD->isVariadic();
8957 // FIXME: This isn't correct for methods (results in bogus warning).
8958 Params = MD->parameters();
8959 } else if (isa<CapturedDecl>(Val: Caller)) {
8960 // We don't support va_start in a CapturedDecl.
8961 S.Diag(Fn->getBeginLoc(), diag::err_va_start_captured_stmt);
8962 return true;
8963 } else {
8964 // This must be some other declcontext that parses exprs.
8965 S.Diag(Fn->getBeginLoc(), diag::err_va_start_outside_function);
8966 return true;
8967 }
8968
8969 if (!IsVariadic) {
8970 S.Diag(Fn->getBeginLoc(), diag::err_va_start_fixed_function);
8971 return true;
8972 }
8973
8974 if (LastParam)
8975 *LastParam = Params.empty() ? nullptr : Params.back();
8976
8977 return false;
8978}
8979
8980/// Check the arguments to '__builtin_va_start' or '__builtin_ms_va_start'
8981/// for validity. Emit an error and return true on failure; return false
8982/// on success.
8983bool Sema::SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) {
8984 Expr *Fn = TheCall->getCallee();
8985
8986 if (checkVAStartABI(S&: *this, BuiltinID, Fn))
8987 return true;
8988
8989 // In C23 mode, va_start only needs one argument. However, the builtin still
8990 // requires two arguments (which matches the behavior of the GCC builtin),
8991 // <stdarg.h> passes `0` as the second argument in C23 mode.
8992 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 2))
8993 return true;
8994
8995 // Type-check the first argument normally.
8996 if (checkBuiltinArgument(S&: *this, E: TheCall, ArgIndex: 0))
8997 return true;
8998
8999 // Check that the current function is variadic, and get its last parameter.
9000 ParmVarDecl *LastParam;
9001 if (checkVAStartIsInVariadicFunction(S&: *this, Fn, LastParam: &LastParam))
9002 return true;
9003
9004 // Verify that the second argument to the builtin is the last argument of the
9005 // current function or method. In C23 mode, if the second argument is an
9006 // integer constant expression with value 0, then we don't bother with this
9007 // check.
9008 bool SecondArgIsLastNamedArgument = false;
9009 const Expr *Arg = TheCall->getArg(Arg: 1)->IgnoreParenCasts();
9010 if (std::optional<llvm::APSInt> Val =
9011 TheCall->getArg(Arg: 1)->getIntegerConstantExpr(Ctx: Context);
9012 Val && LangOpts.C23 && *Val == 0)
9013 return false;
9014
9015 // These are valid if SecondArgIsLastNamedArgument is false after the next
9016 // block.
9017 QualType Type;
9018 SourceLocation ParamLoc;
9019 bool IsCRegister = false;
9020
9021 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Val: Arg)) {
9022 if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(Val: DR->getDecl())) {
9023 SecondArgIsLastNamedArgument = PV == LastParam;
9024
9025 Type = PV->getType();
9026 ParamLoc = PV->getLocation();
9027 IsCRegister =
9028 PV->getStorageClass() == SC_Register && !getLangOpts().CPlusPlus;
9029 }
9030 }
9031
9032 if (!SecondArgIsLastNamedArgument)
9033 Diag(TheCall->getArg(1)->getBeginLoc(),
9034 diag::warn_second_arg_of_va_start_not_last_named_param);
9035 else if (IsCRegister || Type->isReferenceType() ||
9036 Type->isSpecificBuiltinType(K: BuiltinType::Float) || [=] {
9037 // Promotable integers are UB, but enumerations need a bit of
9038 // extra checking to see what their promotable type actually is.
9039 if (!Context.isPromotableIntegerType(T: Type))
9040 return false;
9041 if (!Type->isEnumeralType())
9042 return true;
9043 const EnumDecl *ED = Type->castAs<EnumType>()->getDecl();
9044 return !(ED &&
9045 Context.typesAreCompatible(T1: ED->getPromotionType(), T2: Type));
9046 }()) {
9047 unsigned Reason = 0;
9048 if (Type->isReferenceType()) Reason = 1;
9049 else if (IsCRegister) Reason = 2;
9050 Diag(Arg->getBeginLoc(), diag::warn_va_start_type_is_undefined) << Reason;
9051 Diag(ParamLoc, diag::note_parameter_type) << Type;
9052 }
9053
9054 return false;
9055}
9056
9057bool Sema::SemaBuiltinVAStartARMMicrosoft(CallExpr *Call) {
9058 auto IsSuitablyTypedFormatArgument = [this](const Expr *Arg) -> bool {
9059 const LangOptions &LO = getLangOpts();
9060
9061 if (LO.CPlusPlus)
9062 return Arg->getType()
9063 .getCanonicalType()
9064 .getTypePtr()
9065 ->getPointeeType()
9066 .withoutLocalFastQualifiers() == Context.CharTy;
9067
9068 // In C, allow aliasing through `char *`, this is required for AArch64 at
9069 // least.
9070 return true;
9071 };
9072
9073 // void __va_start(va_list *ap, const char *named_addr, size_t slot_size,
9074 // const char *named_addr);
9075
9076 Expr *Func = Call->getCallee();
9077
9078 if (Call->getNumArgs() < 3)
9079 return Diag(Call->getEndLoc(),
9080 diag::err_typecheck_call_too_few_args_at_least)
9081 << 0 /*function call*/ << 3 << Call->getNumArgs()
9082 << /*is non object*/ 0;
9083
9084 // Type-check the first argument normally.
9085 if (checkBuiltinArgument(S&: *this, E: Call, ArgIndex: 0))
9086 return true;
9087
9088 // Check that the current function is variadic.
9089 if (checkVAStartIsInVariadicFunction(S&: *this, Fn: Func))
9090 return true;
9091
9092 // __va_start on Windows does not validate the parameter qualifiers
9093
9094 const Expr *Arg1 = Call->getArg(Arg: 1)->IgnoreParens();
9095 const Type *Arg1Ty = Arg1->getType().getCanonicalType().getTypePtr();
9096
9097 const Expr *Arg2 = Call->getArg(Arg: 2)->IgnoreParens();
9098 const Type *Arg2Ty = Arg2->getType().getCanonicalType().getTypePtr();
9099
9100 const QualType &ConstCharPtrTy =
9101 Context.getPointerType(Context.CharTy.withConst());
9102 if (!Arg1Ty->isPointerType() || !IsSuitablyTypedFormatArgument(Arg1))
9103 Diag(Arg1->getBeginLoc(), diag::err_typecheck_convert_incompatible)
9104 << Arg1->getType() << ConstCharPtrTy << 1 /* different class */
9105 << 0 /* qualifier difference */
9106 << 3 /* parameter mismatch */
9107 << 2 << Arg1->getType() << ConstCharPtrTy;
9108
9109 const QualType SizeTy = Context.getSizeType();
9110 if (Arg2Ty->getCanonicalTypeInternal().withoutLocalFastQualifiers() != SizeTy)
9111 Diag(Arg2->getBeginLoc(), diag::err_typecheck_convert_incompatible)
9112 << Arg2->getType() << SizeTy << 1 /* different class */
9113 << 0 /* qualifier difference */
9114 << 3 /* parameter mismatch */
9115 << 3 << Arg2->getType() << SizeTy;
9116
9117 return false;
9118}
9119
9120/// SemaBuiltinUnorderedCompare - Handle functions like __builtin_isgreater and
9121/// friends. This is declared to take (...), so we have to check everything.
9122bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall, unsigned BuiltinID) {
9123 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 2))
9124 return true;
9125
9126 if (BuiltinID == Builtin::BI__builtin_isunordered &&
9127 TheCall->getFPFeaturesInEffect(getLangOpts()).getNoHonorNaNs())
9128 Diag(TheCall->getBeginLoc(), diag::warn_fp_nan_inf_when_disabled)
9129 << 1 << 0 << TheCall->getSourceRange();
9130
9131 ExprResult OrigArg0 = TheCall->getArg(Arg: 0);
9132 ExprResult OrigArg1 = TheCall->getArg(Arg: 1);
9133
9134 // Do standard promotions between the two arguments, returning their common
9135 // type.
9136 QualType Res = UsualArithmeticConversions(
9137 LHS&: OrigArg0, RHS&: OrigArg1, Loc: TheCall->getExprLoc(), ACK: ACK_Comparison);
9138 if (OrigArg0.isInvalid() || OrigArg1.isInvalid())
9139 return true;
9140
9141 // Make sure any conversions are pushed back into the call; this is
9142 // type safe since unordered compare builtins are declared as "_Bool
9143 // foo(...)".
9144 TheCall->setArg(Arg: 0, ArgExpr: OrigArg0.get());
9145 TheCall->setArg(Arg: 1, ArgExpr: OrigArg1.get());
9146
9147 if (OrigArg0.get()->isTypeDependent() || OrigArg1.get()->isTypeDependent())
9148 return false;
9149
9150 // If the common type isn't a real floating type, then the arguments were
9151 // invalid for this operation.
9152 if (Res.isNull() || !Res->isRealFloatingType())
9153 return Diag(OrigArg0.get()->getBeginLoc(),
9154 diag::err_typecheck_call_invalid_ordered_compare)
9155 << OrigArg0.get()->getType() << OrigArg1.get()->getType()
9156 << SourceRange(OrigArg0.get()->getBeginLoc(),
9157 OrigArg1.get()->getEndLoc());
9158
9159 return false;
9160}
9161
9162/// SemaBuiltinSemaBuiltinFPClassification - Handle functions like
9163/// __builtin_isnan and friends. This is declared to take (...), so we have
9164/// to check everything.
9165bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs,
9166 unsigned BuiltinID) {
9167 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: NumArgs))
9168 return true;
9169
9170 FPOptions FPO = TheCall->getFPFeaturesInEffect(LO: getLangOpts());
9171 if (FPO.getNoHonorInfs() && (BuiltinID == Builtin::BI__builtin_isfinite ||
9172 BuiltinID == Builtin::BI__builtin_isinf ||
9173 BuiltinID == Builtin::BI__builtin_isinf_sign))
9174 Diag(TheCall->getBeginLoc(), diag::warn_fp_nan_inf_when_disabled)
9175 << 0 << 0 << TheCall->getSourceRange();
9176
9177 if (FPO.getNoHonorNaNs() && (BuiltinID == Builtin::BI__builtin_isnan ||
9178 BuiltinID == Builtin::BI__builtin_isunordered))
9179 Diag(TheCall->getBeginLoc(), diag::warn_fp_nan_inf_when_disabled)
9180 << 1 << 0 << TheCall->getSourceRange();
9181
9182 bool IsFPClass = NumArgs == 2;
9183
9184 // Find out position of floating-point argument.
9185 unsigned FPArgNo = IsFPClass ? 0 : NumArgs - 1;
9186
9187 // We can count on all parameters preceding the floating-point just being int.
9188 // Try all of those.
9189 for (unsigned i = 0; i < FPArgNo; ++i) {
9190 Expr *Arg = TheCall->getArg(Arg: i);
9191
9192 if (Arg->isTypeDependent())
9193 return false;
9194
9195 ExprResult Res = PerformImplicitConversion(Arg, Context.IntTy, AA_Passing);
9196
9197 if (Res.isInvalid())
9198 return true;
9199 TheCall->setArg(Arg: i, ArgExpr: Res.get());
9200 }
9201
9202 Expr *OrigArg = TheCall->getArg(Arg: FPArgNo);
9203
9204 if (OrigArg->isTypeDependent())
9205 return false;
9206
9207 // Usual Unary Conversions will convert half to float, which we want for
9208 // machines that use fp16 conversion intrinsics. Else, we wnat to leave the
9209 // type how it is, but do normal L->Rvalue conversions.
9210 if (Context.getTargetInfo().useFP16ConversionIntrinsics())
9211 OrigArg = UsualUnaryConversions(E: OrigArg).get();
9212 else
9213 OrigArg = DefaultFunctionArrayLvalueConversion(E: OrigArg).get();
9214 TheCall->setArg(Arg: FPArgNo, ArgExpr: OrigArg);
9215
9216 QualType VectorResultTy;
9217 QualType ElementTy = OrigArg->getType();
9218 // TODO: When all classification function are implemented with is_fpclass,
9219 // vector argument can be supported in all of them.
9220 if (ElementTy->isVectorType() && IsFPClass) {
9221 VectorResultTy = GetSignedVectorType(V: ElementTy);
9222 ElementTy = ElementTy->getAs<VectorType>()->getElementType();
9223 }
9224
9225 // This operation requires a non-_Complex floating-point number.
9226 if (!ElementTy->isRealFloatingType())
9227 return Diag(OrigArg->getBeginLoc(),
9228 diag::err_typecheck_call_invalid_unary_fp)
9229 << OrigArg->getType() << OrigArg->getSourceRange();
9230
9231 // __builtin_isfpclass has integer parameter that specify test mask. It is
9232 // passed in (...), so it should be analyzed completely here.
9233 if (IsFPClass)
9234 if (SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: llvm::fcAllFlags))
9235 return true;
9236
9237 // TODO: enable this code to all classification functions.
9238 if (IsFPClass) {
9239 QualType ResultTy;
9240 if (!VectorResultTy.isNull())
9241 ResultTy = VectorResultTy;
9242 else
9243 ResultTy = Context.IntTy;
9244 TheCall->setType(ResultTy);
9245 }
9246
9247 return false;
9248}
9249
9250/// Perform semantic analysis for a call to __builtin_complex.
9251bool Sema::SemaBuiltinComplex(CallExpr *TheCall) {
9252 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 2))
9253 return true;
9254
9255 bool Dependent = false;
9256 for (unsigned I = 0; I != 2; ++I) {
9257 Expr *Arg = TheCall->getArg(Arg: I);
9258 QualType T = Arg->getType();
9259 if (T->isDependentType()) {
9260 Dependent = true;
9261 continue;
9262 }
9263
9264 // Despite supporting _Complex int, GCC requires a real floating point type
9265 // for the operands of __builtin_complex.
9266 if (!T->isRealFloatingType()) {
9267 return Diag(Arg->getBeginLoc(), diag::err_typecheck_call_requires_real_fp)
9268 << Arg->getType() << Arg->getSourceRange();
9269 }
9270
9271 ExprResult Converted = DefaultLvalueConversion(E: Arg);
9272 if (Converted.isInvalid())
9273 return true;
9274 TheCall->setArg(Arg: I, ArgExpr: Converted.get());
9275 }
9276
9277 if (Dependent) {
9278 TheCall->setType(Context.DependentTy);
9279 return false;
9280 }
9281
9282 Expr *Real = TheCall->getArg(Arg: 0);
9283 Expr *Imag = TheCall->getArg(Arg: 1);
9284 if (!Context.hasSameType(T1: Real->getType(), T2: Imag->getType())) {
9285 return Diag(Real->getBeginLoc(),
9286 diag::err_typecheck_call_different_arg_types)
9287 << Real->getType() << Imag->getType()
9288 << Real->getSourceRange() << Imag->getSourceRange();
9289 }
9290
9291 // We don't allow _Complex _Float16 nor _Complex __fp16 as type specifiers;
9292 // don't allow this builtin to form those types either.
9293 // FIXME: Should we allow these types?
9294 if (Real->getType()->isFloat16Type())
9295 return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec)
9296 << "_Float16";
9297 if (Real->getType()->isHalfType())
9298 return Diag(TheCall->getBeginLoc(), diag::err_invalid_complex_spec)
9299 << "half";
9300
9301 TheCall->setType(Context.getComplexType(T: Real->getType()));
9302 return false;
9303}
9304
9305// Customized Sema Checking for VSX builtins that have the following signature:
9306// vector [...] builtinName(vector [...], vector [...], const int);
9307// Which takes the same type of vectors (any legal vector type) for the first
9308// two arguments and takes compile time constant for the third argument.
9309// Example builtins are :
9310// vector double vec_xxpermdi(vector double, vector double, int);
9311// vector short vec_xxsldwi(vector short, vector short, int);
9312bool Sema::SemaBuiltinVSX(CallExpr *TheCall) {
9313 unsigned ExpectedNumArgs = 3;
9314 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: ExpectedNumArgs))
9315 return true;
9316
9317 // Check the third argument is a compile time constant
9318 if (!TheCall->getArg(2)->isIntegerConstantExpr(Context))
9319 return Diag(TheCall->getBeginLoc(),
9320 diag::err_vsx_builtin_nonconstant_argument)
9321 << 3 /* argument index */ << TheCall->getDirectCallee()
9322 << SourceRange(TheCall->getArg(2)->getBeginLoc(),
9323 TheCall->getArg(2)->getEndLoc());
9324
9325 QualType Arg1Ty = TheCall->getArg(Arg: 0)->getType();
9326 QualType Arg2Ty = TheCall->getArg(Arg: 1)->getType();
9327
9328 // Check the type of argument 1 and argument 2 are vectors.
9329 SourceLocation BuiltinLoc = TheCall->getBeginLoc();
9330 if ((!Arg1Ty->isVectorType() && !Arg1Ty->isDependentType()) ||
9331 (!Arg2Ty->isVectorType() && !Arg2Ty->isDependentType())) {
9332 return Diag(BuiltinLoc, diag::err_vec_builtin_non_vector)
9333 << TheCall->getDirectCallee()
9334 << SourceRange(TheCall->getArg(0)->getBeginLoc(),
9335 TheCall->getArg(1)->getEndLoc());
9336 }
9337
9338 // Check the first two arguments are the same type.
9339 if (!Context.hasSameUnqualifiedType(T1: Arg1Ty, T2: Arg2Ty)) {
9340 return Diag(BuiltinLoc, diag::err_vec_builtin_incompatible_vector)
9341 << TheCall->getDirectCallee()
9342 << SourceRange(TheCall->getArg(0)->getBeginLoc(),
9343 TheCall->getArg(1)->getEndLoc());
9344 }
9345
9346 // When default clang type checking is turned off and the customized type
9347 // checking is used, the returning type of the function must be explicitly
9348 // set. Otherwise it is _Bool by default.
9349 TheCall->setType(Arg1Ty);
9350
9351 return false;
9352}
9353
9354/// SemaBuiltinShuffleVector - Handle __builtin_shufflevector.
9355// This is declared to take (...), so we have to check everything.
9356ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) {
9357 if (TheCall->getNumArgs() < 2)
9358 return ExprError(Diag(TheCall->getEndLoc(),
9359 diag::err_typecheck_call_too_few_args_at_least)
9360 << 0 /*function call*/ << 2 << TheCall->getNumArgs()
9361 << /*is non object*/ 0 << TheCall->getSourceRange());
9362
9363 // Determine which of the following types of shufflevector we're checking:
9364 // 1) unary, vector mask: (lhs, mask)
9365 // 2) binary, scalar mask: (lhs, rhs, index, ..., index)
9366 QualType resType = TheCall->getArg(Arg: 0)->getType();
9367 unsigned numElements = 0;
9368
9369 if (!TheCall->getArg(Arg: 0)->isTypeDependent() &&
9370 !TheCall->getArg(Arg: 1)->isTypeDependent()) {
9371 QualType LHSType = TheCall->getArg(Arg: 0)->getType();
9372 QualType RHSType = TheCall->getArg(Arg: 1)->getType();
9373
9374 if (!LHSType->isVectorType() || !RHSType->isVectorType())
9375 return ExprError(
9376 Diag(TheCall->getBeginLoc(), diag::err_vec_builtin_non_vector)
9377 << TheCall->getDirectCallee()
9378 << SourceRange(TheCall->getArg(0)->getBeginLoc(),
9379 TheCall->getArg(1)->getEndLoc()));
9380
9381 numElements = LHSType->castAs<VectorType>()->getNumElements();
9382 unsigned numResElements = TheCall->getNumArgs() - 2;
9383
9384 // Check to see if we have a call with 2 vector arguments, the unary shuffle
9385 // with mask. If so, verify that RHS is an integer vector type with the
9386 // same number of elts as lhs.
9387 if (TheCall->getNumArgs() == 2) {
9388 if (!RHSType->hasIntegerRepresentation() ||
9389 RHSType->castAs<VectorType>()->getNumElements() != numElements)
9390 return ExprError(Diag(TheCall->getBeginLoc(),
9391 diag::err_vec_builtin_incompatible_vector)
9392 << TheCall->getDirectCallee()
9393 << SourceRange(TheCall->getArg(1)->getBeginLoc(),
9394 TheCall->getArg(1)->getEndLoc()));
9395 } else if (!Context.hasSameUnqualifiedType(T1: LHSType, T2: RHSType)) {
9396 return ExprError(Diag(TheCall->getBeginLoc(),
9397 diag::err_vec_builtin_incompatible_vector)
9398 << TheCall->getDirectCallee()
9399 << SourceRange(TheCall->getArg(0)->getBeginLoc(),
9400 TheCall->getArg(1)->getEndLoc()));
9401 } else if (numElements != numResElements) {
9402 QualType eltType = LHSType->castAs<VectorType>()->getElementType();
9403 resType =
9404 Context.getVectorType(VectorType: eltType, NumElts: numResElements, VecKind: VectorKind::Generic);
9405 }
9406 }
9407
9408 for (unsigned i = 2; i < TheCall->getNumArgs(); i++) {
9409 if (TheCall->getArg(Arg: i)->isTypeDependent() ||
9410 TheCall->getArg(Arg: i)->isValueDependent())
9411 continue;
9412
9413 std::optional<llvm::APSInt> Result;
9414 if (!(Result = TheCall->getArg(i)->getIntegerConstantExpr(Context)))
9415 return ExprError(Diag(TheCall->getBeginLoc(),
9416 diag::err_shufflevector_nonconstant_argument)
9417 << TheCall->getArg(i)->getSourceRange());
9418
9419 // Allow -1 which will be translated to undef in the IR.
9420 if (Result->isSigned() && Result->isAllOnes())
9421 continue;
9422
9423 if (Result->getActiveBits() > 64 ||
9424 Result->getZExtValue() >= numElements * 2)
9425 return ExprError(Diag(TheCall->getBeginLoc(),
9426 diag::err_shufflevector_argument_too_large)
9427 << TheCall->getArg(i)->getSourceRange());
9428 }
9429
9430 SmallVector<Expr*, 32> exprs;
9431
9432 for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; i++) {
9433 exprs.push_back(Elt: TheCall->getArg(Arg: i));
9434 TheCall->setArg(Arg: i, ArgExpr: nullptr);
9435 }
9436
9437 return new (Context) ShuffleVectorExpr(Context, exprs, resType,
9438 TheCall->getCallee()->getBeginLoc(),
9439 TheCall->getRParenLoc());
9440}
9441
9442/// SemaConvertVectorExpr - Handle __builtin_convertvector
9443ExprResult Sema::SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
9444 SourceLocation BuiltinLoc,
9445 SourceLocation RParenLoc) {
9446 ExprValueKind VK = VK_PRValue;
9447 ExprObjectKind OK = OK_Ordinary;
9448 QualType DstTy = TInfo->getType();
9449 QualType SrcTy = E->getType();
9450
9451 if (!SrcTy->isVectorType() && !SrcTy->isDependentType())
9452 return ExprError(Diag(BuiltinLoc,
9453 diag::err_convertvector_non_vector)
9454 << E->getSourceRange());
9455 if (!DstTy->isVectorType() && !DstTy->isDependentType())
9456 return ExprError(Diag(BuiltinLoc, diag::err_builtin_non_vector_type)
9457 << "second"
9458 << "__builtin_convertvector");
9459
9460 if (!SrcTy->isDependentType() && !DstTy->isDependentType()) {
9461 unsigned SrcElts = SrcTy->castAs<VectorType>()->getNumElements();
9462 unsigned DstElts = DstTy->castAs<VectorType>()->getNumElements();
9463 if (SrcElts != DstElts)
9464 return ExprError(Diag(BuiltinLoc,
9465 diag::err_convertvector_incompatible_vector)
9466 << E->getSourceRange());
9467 }
9468
9469 return new (Context)
9470 ConvertVectorExpr(E, TInfo, DstTy, VK, OK, BuiltinLoc, RParenLoc);
9471}
9472
9473/// SemaBuiltinPrefetch - Handle __builtin_prefetch.
9474// This is declared to take (const void*, ...) and can take two
9475// optional constant int args.
9476bool Sema::SemaBuiltinPrefetch(CallExpr *TheCall) {
9477 unsigned NumArgs = TheCall->getNumArgs();
9478
9479 if (NumArgs > 3)
9480 return Diag(TheCall->getEndLoc(),
9481 diag::err_typecheck_call_too_many_args_at_most)
9482 << 0 /*function call*/ << 3 << NumArgs << /*is non object*/ 0
9483 << TheCall->getSourceRange();
9484
9485 // Argument 0 is checked for us and the remaining arguments must be
9486 // constant integers.
9487 for (unsigned i = 1; i != NumArgs; ++i)
9488 if (SemaBuiltinConstantArgRange(TheCall, ArgNum: i, Low: 0, High: i == 1 ? 1 : 3))
9489 return true;
9490
9491 return false;
9492}
9493
9494/// SemaBuiltinArithmeticFence - Handle __arithmetic_fence.
9495bool Sema::SemaBuiltinArithmeticFence(CallExpr *TheCall) {
9496 if (!Context.getTargetInfo().checkArithmeticFenceSupported())
9497 return Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported)
9498 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc());
9499 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 1))
9500 return true;
9501 Expr *Arg = TheCall->getArg(Arg: 0);
9502 if (Arg->isInstantiationDependent())
9503 return false;
9504
9505 QualType ArgTy = Arg->getType();
9506 if (!ArgTy->hasFloatingRepresentation())
9507 return Diag(TheCall->getEndLoc(), diag::err_typecheck_expect_flt_or_vector)
9508 << ArgTy;
9509 if (Arg->isLValue()) {
9510 ExprResult FirstArg = DefaultLvalueConversion(E: Arg);
9511 TheCall->setArg(Arg: 0, ArgExpr: FirstArg.get());
9512 }
9513 TheCall->setType(TheCall->getArg(Arg: 0)->getType());
9514 return false;
9515}
9516
9517/// SemaBuiltinAssume - Handle __assume (MS Extension).
9518// __assume does not evaluate its arguments, and should warn if its argument
9519// has side effects.
9520bool Sema::SemaBuiltinAssume(CallExpr *TheCall) {
9521 Expr *Arg = TheCall->getArg(Arg: 0);
9522 if (Arg->isInstantiationDependent()) return false;
9523
9524 if (Arg->HasSideEffects(Context))
9525 Diag(Arg->getBeginLoc(), diag::warn_assume_side_effects)
9526 << Arg->getSourceRange()
9527 << cast<FunctionDecl>(TheCall->getCalleeDecl())->getIdentifier();
9528
9529 return false;
9530}
9531
9532/// Handle __builtin_alloca_with_align. This is declared
9533/// as (size_t, size_t) where the second size_t must be a power of 2 greater
9534/// than 8.
9535bool Sema::SemaBuiltinAllocaWithAlign(CallExpr *TheCall) {
9536 // The alignment must be a constant integer.
9537 Expr *Arg = TheCall->getArg(Arg: 1);
9538
9539 // We can't check the value of a dependent argument.
9540 if (!Arg->isTypeDependent() && !Arg->isValueDependent()) {
9541 if (const auto *UE =
9542 dyn_cast<UnaryExprOrTypeTraitExpr>(Arg->IgnoreParenImpCasts()))
9543 if (UE->getKind() == UETT_AlignOf ||
9544 UE->getKind() == UETT_PreferredAlignOf)
9545 Diag(TheCall->getBeginLoc(), diag::warn_alloca_align_alignof)
9546 << Arg->getSourceRange();
9547
9548 llvm::APSInt Result = Arg->EvaluateKnownConstInt(Ctx: Context);
9549
9550 if (!Result.isPowerOf2())
9551 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two)
9552 << Arg->getSourceRange();
9553
9554 if (Result < Context.getCharWidth())
9555 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_small)
9556 << (unsigned)Context.getCharWidth() << Arg->getSourceRange();
9557
9558 if (Result > std::numeric_limits<int32_t>::max())
9559 return Diag(TheCall->getBeginLoc(), diag::err_alignment_too_big)
9560 << std::numeric_limits<int32_t>::max() << Arg->getSourceRange();
9561 }
9562
9563 return false;
9564}
9565
9566/// Handle __builtin_assume_aligned. This is declared
9567/// as (const void*, size_t, ...) and can take one optional constant int arg.
9568bool Sema::SemaBuiltinAssumeAligned(CallExpr *TheCall) {
9569 if (checkArgCountRange(S&: *this, Call: TheCall, MinArgCount: 2, MaxArgCount: 3))
9570 return true;
9571
9572 unsigned NumArgs = TheCall->getNumArgs();
9573 Expr *FirstArg = TheCall->getArg(Arg: 0);
9574
9575 {
9576 ExprResult FirstArgResult =
9577 DefaultFunctionArrayLvalueConversion(E: FirstArg);
9578 if (checkBuiltinArgument(S&: *this, E: TheCall, ArgIndex: 0))
9579 return true;
9580 /// In-place updation of FirstArg by checkBuiltinArgument is ignored.
9581 TheCall->setArg(Arg: 0, ArgExpr: FirstArgResult.get());
9582 }
9583
9584 // The alignment must be a constant integer.
9585 Expr *SecondArg = TheCall->getArg(Arg: 1);
9586
9587 // We can't check the value of a dependent argument.
9588 if (!SecondArg->isValueDependent()) {
9589 llvm::APSInt Result;
9590 if (SemaBuiltinConstantArg(TheCall, ArgNum: 1, Result))
9591 return true;
9592
9593 if (!Result.isPowerOf2())
9594 return Diag(TheCall->getBeginLoc(), diag::err_alignment_not_power_of_two)
9595 << SecondArg->getSourceRange();
9596
9597 if (Result > Sema::MaximumAlignment)
9598 Diag(TheCall->getBeginLoc(), diag::warn_assume_aligned_too_great)
9599 << SecondArg->getSourceRange() << Sema::MaximumAlignment;
9600 }
9601
9602 if (NumArgs > 2) {
9603 Expr *ThirdArg = TheCall->getArg(Arg: 2);
9604 if (convertArgumentToType(S&: *this, Value&: ThirdArg, Ty: Context.getSizeType()))
9605 return true;
9606 TheCall->setArg(Arg: 2, ArgExpr: ThirdArg);
9607 }
9608
9609 return false;
9610}
9611
9612bool Sema::SemaBuiltinOSLogFormat(CallExpr *TheCall) {
9613 unsigned BuiltinID =
9614 cast<FunctionDecl>(Val: TheCall->getCalleeDecl())->getBuiltinID();
9615 bool IsSizeCall = BuiltinID == Builtin::BI__builtin_os_log_format_buffer_size;
9616
9617 unsigned NumArgs = TheCall->getNumArgs();
9618 unsigned NumRequiredArgs = IsSizeCall ? 1 : 2;
9619 if (NumArgs < NumRequiredArgs) {
9620 return Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args)
9621 << 0 /* function call */ << NumRequiredArgs << NumArgs
9622 << /*is non object*/ 0 << TheCall->getSourceRange();
9623 }
9624 if (NumArgs >= NumRequiredArgs + 0x100) {
9625 return Diag(TheCall->getEndLoc(),
9626 diag::err_typecheck_call_too_many_args_at_most)
9627 << 0 /* function call */ << (NumRequiredArgs + 0xff) << NumArgs
9628 << /*is non object*/ 0 << TheCall->getSourceRange();
9629 }
9630 unsigned i = 0;
9631
9632 // For formatting call, check buffer arg.
9633 if (!IsSizeCall) {
9634 ExprResult Arg(TheCall->getArg(Arg: i));
9635 InitializedEntity Entity = InitializedEntity::InitializeParameter(
9636 Context, Context.VoidPtrTy, false);
9637 Arg = PerformCopyInitialization(Entity, EqualLoc: SourceLocation(), Init: Arg);
9638 if (Arg.isInvalid())
9639 return true;
9640 TheCall->setArg(Arg: i, ArgExpr: Arg.get());
9641 i++;
9642 }
9643
9644 // Check string literal arg.
9645 unsigned FormatIdx = i;
9646 {
9647 ExprResult Arg = CheckOSLogFormatStringArg(Arg: TheCall->getArg(Arg: i));
9648 if (Arg.isInvalid())
9649 return true;
9650 TheCall->setArg(Arg: i, ArgExpr: Arg.get());
9651 i++;
9652 }
9653
9654 // Make sure variadic args are scalar.
9655 unsigned FirstDataArg = i;
9656 while (i < NumArgs) {
9657 ExprResult Arg = DefaultVariadicArgumentPromotion(
9658 E: TheCall->getArg(Arg: i), CT: VariadicFunction, FDecl: nullptr);
9659 if (Arg.isInvalid())
9660 return true;
9661 CharUnits ArgSize = Context.getTypeSizeInChars(T: Arg.get()->getType());
9662 if (ArgSize.getQuantity() >= 0x100) {
9663 return Diag(Arg.get()->getEndLoc(), diag::err_os_log_argument_too_big)
9664 << i << (int)ArgSize.getQuantity() << 0xff
9665 << TheCall->getSourceRange();
9666 }
9667 TheCall->setArg(Arg: i, ArgExpr: Arg.get());
9668 i++;
9669 }
9670
9671 // Check formatting specifiers. NOTE: We're only doing this for the non-size
9672 // call to avoid duplicate diagnostics.
9673 if (!IsSizeCall) {
9674 llvm::SmallBitVector CheckedVarArgs(NumArgs, false);
9675 ArrayRef<const Expr *> Args(TheCall->getArgs(), TheCall->getNumArgs());
9676 bool Success = CheckFormatArguments(
9677 Args, FAPK: FAPK_Variadic, format_idx: FormatIdx, firstDataArg: FirstDataArg, Type: FST_OSLog,
9678 CallType: VariadicFunction, Loc: TheCall->getBeginLoc(), range: SourceRange(),
9679 CheckedVarArgs);
9680 if (!Success)
9681 return true;
9682 }
9683
9684 if (IsSizeCall) {
9685 TheCall->setType(Context.getSizeType());
9686 } else {
9687 TheCall->setType(Context.VoidPtrTy);
9688 }
9689 return false;
9690}
9691
9692/// SemaBuiltinConstantArg - Handle a check if argument ArgNum of CallExpr
9693/// TheCall is a constant expression.
9694bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
9695 llvm::APSInt &Result) {
9696 Expr *Arg = TheCall->getArg(Arg: ArgNum);
9697 DeclRefExpr *DRE =cast<DeclRefExpr>(Val: TheCall->getCallee()->IgnoreParenCasts());
9698 FunctionDecl *FDecl = cast<FunctionDecl>(Val: DRE->getDecl());
9699
9700 if (Arg->isTypeDependent() || Arg->isValueDependent()) return false;
9701
9702 std::optional<llvm::APSInt> R;
9703 if (!(R = Arg->getIntegerConstantExpr(Context)))
9704 return Diag(TheCall->getBeginLoc(), diag::err_constant_integer_arg_type)
9705 << FDecl->getDeclName() << Arg->getSourceRange();
9706 Result = *R;
9707 return false;
9708}
9709
9710/// SemaBuiltinConstantArgRange - Handle a check if argument ArgNum of CallExpr
9711/// TheCall is a constant expression in the range [Low, High].
9712bool Sema::SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum,
9713 int Low, int High, bool RangeIsError) {
9714 if (isConstantEvaluatedContext())
9715 return false;
9716 llvm::APSInt Result;
9717
9718 // We can't check the value of a dependent argument.
9719 Expr *Arg = TheCall->getArg(Arg: ArgNum);
9720 if (Arg->isTypeDependent() || Arg->isValueDependent())
9721 return false;
9722
9723 // Check constant-ness first.
9724 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
9725 return true;
9726
9727 if (Result.getSExtValue() < Low || Result.getSExtValue() > High) {
9728 if (RangeIsError)
9729 return Diag(TheCall->getBeginLoc(), diag::err_argument_invalid_range)
9730 << toString(Result, 10) << Low << High << Arg->getSourceRange();
9731 else
9732 // Defer the warning until we know if the code will be emitted so that
9733 // dead code can ignore this.
9734 DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall,
9735 PDiag(diag::warn_argument_invalid_range)
9736 << toString(Result, 10) << Low << High
9737 << Arg->getSourceRange());
9738 }
9739
9740 return false;
9741}
9742
9743/// SemaBuiltinConstantArgMultiple - Handle a check if argument ArgNum of CallExpr
9744/// TheCall is a constant expression is a multiple of Num..
9745bool Sema::SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
9746 unsigned Num) {
9747 llvm::APSInt Result;
9748
9749 // We can't check the value of a dependent argument.
9750 Expr *Arg = TheCall->getArg(Arg: ArgNum);
9751 if (Arg->isTypeDependent() || Arg->isValueDependent())
9752 return false;
9753
9754 // Check constant-ness first.
9755 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
9756 return true;
9757
9758 if (Result.getSExtValue() % Num != 0)
9759 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_multiple)
9760 << Num << Arg->getSourceRange();
9761
9762 return false;
9763}
9764
9765/// SemaBuiltinConstantArgPower2 - Check if argument ArgNum of TheCall is a
9766/// constant expression representing a power of 2.
9767bool Sema::SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum) {
9768 llvm::APSInt Result;
9769
9770 // We can't check the value of a dependent argument.
9771 Expr *Arg = TheCall->getArg(Arg: ArgNum);
9772 if (Arg->isTypeDependent() || Arg->isValueDependent())
9773 return false;
9774
9775 // Check constant-ness first.
9776 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
9777 return true;
9778
9779 // Bit-twiddling to test for a power of 2: for x > 0, x & (x-1) is zero if
9780 // and only if x is a power of 2.
9781 if (Result.isStrictlyPositive() && (Result & (Result - 1)) == 0)
9782 return false;
9783
9784 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_power_of_2)
9785 << Arg->getSourceRange();
9786}
9787
9788static bool IsShiftedByte(llvm::APSInt Value) {
9789 if (Value.isNegative())
9790 return false;
9791
9792 // Check if it's a shifted byte, by shifting it down
9793 while (true) {
9794 // If the value fits in the bottom byte, the check passes.
9795 if (Value < 0x100)
9796 return true;
9797
9798 // Otherwise, if the value has _any_ bits in the bottom byte, the check
9799 // fails.
9800 if ((Value & 0xFF) != 0)
9801 return false;
9802
9803 // If the bottom 8 bits are all 0, but something above that is nonzero,
9804 // then shifting the value right by 8 bits won't affect whether it's a
9805 // shifted byte or not. So do that, and go round again.
9806 Value >>= 8;
9807 }
9808}
9809
9810/// SemaBuiltinConstantArgShiftedByte - Check if argument ArgNum of TheCall is
9811/// a constant expression representing an arbitrary byte value shifted left by
9812/// a multiple of 8 bits.
9813bool Sema::SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum,
9814 unsigned ArgBits) {
9815 llvm::APSInt Result;
9816
9817 // We can't check the value of a dependent argument.
9818 Expr *Arg = TheCall->getArg(Arg: ArgNum);
9819 if (Arg->isTypeDependent() || Arg->isValueDependent())
9820 return false;
9821
9822 // Check constant-ness first.
9823 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
9824 return true;
9825
9826 // Truncate to the given size.
9827 Result = Result.getLoBits(numBits: ArgBits);
9828 Result.setIsUnsigned(true);
9829
9830 if (IsShiftedByte(Value: Result))
9831 return false;
9832
9833 return Diag(TheCall->getBeginLoc(), diag::err_argument_not_shifted_byte)
9834 << Arg->getSourceRange();
9835}
9836
9837/// SemaBuiltinConstantArgShiftedByteOr0xFF - Check if argument ArgNum of
9838/// TheCall is a constant expression representing either a shifted byte value,
9839/// or a value of the form 0x??FF (i.e. a member of the arithmetic progression
9840/// 0x00FF, 0x01FF, ..., 0xFFFF). This strange range check is needed for some
9841/// Arm MVE intrinsics.
9842bool Sema::SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall,
9843 int ArgNum,
9844 unsigned ArgBits) {
9845 llvm::APSInt Result;
9846
9847 // We can't check the value of a dependent argument.
9848 Expr *Arg = TheCall->getArg(Arg: ArgNum);
9849 if (Arg->isTypeDependent() || Arg->isValueDependent())
9850 return false;
9851
9852 // Check constant-ness first.
9853 if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
9854 return true;
9855
9856 // Truncate to the given size.
9857 Result = Result.getLoBits(numBits: ArgBits);
9858 Result.setIsUnsigned(true);
9859
9860 // Check to see if it's in either of the required forms.
9861 if (IsShiftedByte(Value: Result) ||
9862 (Result > 0 && Result < 0x10000 && (Result & 0xFF) == 0xFF))
9863 return false;
9864
9865 return Diag(TheCall->getBeginLoc(),
9866 diag::err_argument_not_shifted_byte_or_xxff)
9867 << Arg->getSourceRange();
9868}
9869
9870/// SemaBuiltinARMMemoryTaggingCall - Handle calls of memory tagging extensions
9871bool Sema::SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall) {
9872 if (BuiltinID == AArch64::BI__builtin_arm_irg) {
9873 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 2))
9874 return true;
9875 Expr *Arg0 = TheCall->getArg(Arg: 0);
9876 Expr *Arg1 = TheCall->getArg(Arg: 1);
9877
9878 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(E: Arg0);
9879 if (FirstArg.isInvalid())
9880 return true;
9881 QualType FirstArgType = FirstArg.get()->getType();
9882 if (!FirstArgType->isAnyPointerType())
9883 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer)
9884 << "first" << FirstArgType << Arg0->getSourceRange();
9885 TheCall->setArg(Arg: 0, ArgExpr: FirstArg.get());
9886
9887 ExprResult SecArg = DefaultLvalueConversion(E: Arg1);
9888 if (SecArg.isInvalid())
9889 return true;
9890 QualType SecArgType = SecArg.get()->getType();
9891 if (!SecArgType->isIntegerType())
9892 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer)
9893 << "second" << SecArgType << Arg1->getSourceRange();
9894
9895 // Derive the return type from the pointer argument.
9896 TheCall->setType(FirstArgType);
9897 return false;
9898 }
9899
9900 if (BuiltinID == AArch64::BI__builtin_arm_addg) {
9901 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 2))
9902 return true;
9903
9904 Expr *Arg0 = TheCall->getArg(Arg: 0);
9905 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(E: Arg0);
9906 if (FirstArg.isInvalid())
9907 return true;
9908 QualType FirstArgType = FirstArg.get()->getType();
9909 if (!FirstArgType->isAnyPointerType())
9910 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer)
9911 << "first" << FirstArgType << Arg0->getSourceRange();
9912 TheCall->setArg(Arg: 0, ArgExpr: FirstArg.get());
9913
9914 // Derive the return type from the pointer argument.
9915 TheCall->setType(FirstArgType);
9916
9917 // Second arg must be an constant in range [0,15]
9918 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: 15);
9919 }
9920
9921 if (BuiltinID == AArch64::BI__builtin_arm_gmi) {
9922 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 2))
9923 return true;
9924 Expr *Arg0 = TheCall->getArg(Arg: 0);
9925 Expr *Arg1 = TheCall->getArg(Arg: 1);
9926
9927 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(E: Arg0);
9928 if (FirstArg.isInvalid())
9929 return true;
9930 QualType FirstArgType = FirstArg.get()->getType();
9931 if (!FirstArgType->isAnyPointerType())
9932 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer)
9933 << "first" << FirstArgType << Arg0->getSourceRange();
9934
9935 QualType SecArgType = Arg1->getType();
9936 if (!SecArgType->isIntegerType())
9937 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer)
9938 << "second" << SecArgType << Arg1->getSourceRange();
9939 TheCall->setType(Context.IntTy);
9940 return false;
9941 }
9942
9943 if (BuiltinID == AArch64::BI__builtin_arm_ldg ||
9944 BuiltinID == AArch64::BI__builtin_arm_stg) {
9945 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 1))
9946 return true;
9947 Expr *Arg0 = TheCall->getArg(Arg: 0);
9948 ExprResult FirstArg = DefaultFunctionArrayLvalueConversion(E: Arg0);
9949 if (FirstArg.isInvalid())
9950 return true;
9951
9952 QualType FirstArgType = FirstArg.get()->getType();
9953 if (!FirstArgType->isAnyPointerType())
9954 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer)
9955 << "first" << FirstArgType << Arg0->getSourceRange();
9956 TheCall->setArg(Arg: 0, ArgExpr: FirstArg.get());
9957
9958 // Derive the return type from the pointer argument.
9959 if (BuiltinID == AArch64::BI__builtin_arm_ldg)
9960 TheCall->setType(FirstArgType);
9961 return false;
9962 }
9963
9964 if (BuiltinID == AArch64::BI__builtin_arm_subp) {
9965 Expr *ArgA = TheCall->getArg(Arg: 0);
9966 Expr *ArgB = TheCall->getArg(Arg: 1);
9967
9968 ExprResult ArgExprA = DefaultFunctionArrayLvalueConversion(E: ArgA);
9969 ExprResult ArgExprB = DefaultFunctionArrayLvalueConversion(E: ArgB);
9970
9971 if (ArgExprA.isInvalid() || ArgExprB.isInvalid())
9972 return true;
9973
9974 QualType ArgTypeA = ArgExprA.get()->getType();
9975 QualType ArgTypeB = ArgExprB.get()->getType();
9976
9977 auto isNull = [&] (Expr *E) -> bool {
9978 return E->isNullPointerConstant(
9979 Ctx&: Context, NPC: Expr::NPC_ValueDependentIsNotNull); };
9980
9981 // argument should be either a pointer or null
9982 if (!ArgTypeA->isAnyPointerType() && !isNull(ArgA))
9983 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer)
9984 << "first" << ArgTypeA << ArgA->getSourceRange();
9985
9986 if (!ArgTypeB->isAnyPointerType() && !isNull(ArgB))
9987 return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer)
9988 << "second" << ArgTypeB << ArgB->getSourceRange();
9989
9990 // Ensure Pointee types are compatible
9991 if (ArgTypeA->isAnyPointerType() && !isNull(ArgA) &&
9992 ArgTypeB->isAnyPointerType() && !isNull(ArgB)) {
9993 QualType pointeeA = ArgTypeA->getPointeeType();
9994 QualType pointeeB = ArgTypeB->getPointeeType();
9995 if (!Context.typesAreCompatible(
9996 T1: Context.getCanonicalType(T: pointeeA).getUnqualifiedType(),
9997 T2: Context.getCanonicalType(T: pointeeB).getUnqualifiedType())) {
9998 return Diag(TheCall->getBeginLoc(), diag::err_typecheck_sub_ptr_compatible)
9999 << ArgTypeA << ArgTypeB << ArgA->getSourceRange()
10000 << ArgB->getSourceRange();
10001 }
10002 }
10003
10004 // at least one argument should be pointer type
10005 if (!ArgTypeA->isAnyPointerType() && !ArgTypeB->isAnyPointerType())
10006 return Diag(TheCall->getBeginLoc(), diag::err_memtag_any2arg_pointer)
10007 << ArgTypeA << ArgTypeB << ArgA->getSourceRange();
10008
10009 if (isNull(ArgA)) // adopt type of the other pointer
10010 ArgExprA = ImpCastExprToType(E: ArgExprA.get(), Type: ArgTypeB, CK: CK_NullToPointer);
10011
10012 if (isNull(ArgB))
10013 ArgExprB = ImpCastExprToType(E: ArgExprB.get(), Type: ArgTypeA, CK: CK_NullToPointer);
10014
10015 TheCall->setArg(Arg: 0, ArgExpr: ArgExprA.get());
10016 TheCall->setArg(Arg: 1, ArgExpr: ArgExprB.get());
10017 TheCall->setType(Context.LongLongTy);
10018 return false;
10019 }
10020 assert(false && "Unhandled ARM MTE intrinsic");
10021 return true;
10022}
10023
10024/// SemaBuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr
10025/// TheCall is an ARM/AArch64 special register string literal.
10026bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
10027 int ArgNum, unsigned ExpectedFieldNum,
10028 bool AllowName) {
10029 bool IsARMBuiltin = BuiltinID == ARM::BI__builtin_arm_rsr64 ||
10030 BuiltinID == ARM::BI__builtin_arm_wsr64 ||
10031 BuiltinID == ARM::BI__builtin_arm_rsr ||
10032 BuiltinID == ARM::BI__builtin_arm_rsrp ||
10033 BuiltinID == ARM::BI__builtin_arm_wsr ||
10034 BuiltinID == ARM::BI__builtin_arm_wsrp;
10035 bool IsAArch64Builtin = BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
10036 BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
10037 BuiltinID == AArch64::BI__builtin_arm_rsr128 ||
10038 BuiltinID == AArch64::BI__builtin_arm_wsr128 ||
10039 BuiltinID == AArch64::BI__builtin_arm_rsr ||
10040 BuiltinID == AArch64::BI__builtin_arm_rsrp ||
10041 BuiltinID == AArch64::BI__builtin_arm_wsr ||
10042 BuiltinID == AArch64::BI__builtin_arm_wsrp;
10043 assert((IsARMBuiltin || IsAArch64Builtin) && "Unexpected ARM builtin.");
10044
10045 // We can't check the value of a dependent argument.
10046 Expr *Arg = TheCall->getArg(Arg: ArgNum);
10047 if (Arg->isTypeDependent() || Arg->isValueDependent())
10048 return false;
10049
10050 // Check if the argument is a string literal.
10051 if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts()))
10052 return Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal)
10053 << Arg->getSourceRange();
10054
10055 // Check the type of special register given.
10056 StringRef Reg = cast<StringLiteral>(Val: Arg->IgnoreParenImpCasts())->getString();
10057 SmallVector<StringRef, 6> Fields;
10058 Reg.split(A&: Fields, Separator: ":");
10059
10060 if (Fields.size() != ExpectedFieldNum && !(AllowName && Fields.size() == 1))
10061 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg)
10062 << Arg->getSourceRange();
10063
10064 // If the string is the name of a register then we cannot check that it is
10065 // valid here but if the string is of one the forms described in ACLE then we
10066 // can check that the supplied fields are integers and within the valid
10067 // ranges.
10068 if (Fields.size() > 1) {
10069 bool FiveFields = Fields.size() == 5;
10070
10071 bool ValidString = true;
10072 if (IsARMBuiltin) {
10073 ValidString &= Fields[0].starts_with_insensitive(Prefix: "cp") ||
10074 Fields[0].starts_with_insensitive(Prefix: "p");
10075 if (ValidString)
10076 Fields[0] = Fields[0].drop_front(
10077 N: Fields[0].starts_with_insensitive(Prefix: "cp") ? 2 : 1);
10078
10079 ValidString &= Fields[2].starts_with_insensitive(Prefix: "c");
10080 if (ValidString)
10081 Fields[2] = Fields[2].drop_front(N: 1);
10082
10083 if (FiveFields) {
10084 ValidString &= Fields[3].starts_with_insensitive(Prefix: "c");
10085 if (ValidString)
10086 Fields[3] = Fields[3].drop_front(N: 1);
10087 }
10088 }
10089
10090 SmallVector<int, 5> Ranges;
10091 if (FiveFields)
10092 Ranges.append(IL: {IsAArch64Builtin ? 1 : 15, 7, 15, 15, 7});
10093 else
10094 Ranges.append(IL: {15, 7, 15});
10095
10096 for (unsigned i=0; i<Fields.size(); ++i) {
10097 int IntField;
10098 ValidString &= !Fields[i].getAsInteger(Radix: 10, Result&: IntField);
10099 ValidString &= (IntField >= 0 && IntField <= Ranges[i]);
10100 }
10101
10102 if (!ValidString)
10103 return Diag(TheCall->getBeginLoc(), diag::err_arm_invalid_specialreg)
10104 << Arg->getSourceRange();
10105 } else if (IsAArch64Builtin && Fields.size() == 1) {
10106 // This code validates writes to PSTATE registers.
10107
10108 // Not a write.
10109 if (TheCall->getNumArgs() != 2)
10110 return false;
10111
10112 // The 128-bit system register accesses do not touch PSTATE.
10113 if (BuiltinID == AArch64::BI__builtin_arm_rsr128 ||
10114 BuiltinID == AArch64::BI__builtin_arm_wsr128)
10115 return false;
10116
10117 // These are the named PSTATE accesses using "MSR (immediate)" instructions,
10118 // along with the upper limit on the immediates allowed.
10119 auto MaxLimit = llvm::StringSwitch<std::optional<unsigned>>(Reg)
10120 .CaseLower(S: "spsel", Value: 15)
10121 .CaseLower(S: "daifclr", Value: 15)
10122 .CaseLower(S: "daifset", Value: 15)
10123 .CaseLower(S: "pan", Value: 15)
10124 .CaseLower(S: "uao", Value: 15)
10125 .CaseLower(S: "dit", Value: 15)
10126 .CaseLower(S: "ssbs", Value: 15)
10127 .CaseLower(S: "tco", Value: 15)
10128 .CaseLower(S: "allint", Value: 1)
10129 .CaseLower(S: "pm", Value: 1)
10130 .Default(Value: std::nullopt);
10131
10132 // If this is not a named PSTATE, just continue without validating, as this
10133 // will be lowered to an "MSR (register)" instruction directly
10134 if (!MaxLimit)
10135 return false;
10136
10137 // Here we only allow constants in the range for that pstate, as required by
10138 // the ACLE.
10139 //
10140 // While clang also accepts the names of system registers in its ACLE
10141 // intrinsics, we prevent this with the PSTATE names used in MSR (immediate)
10142 // as the value written via a register is different to the value used as an
10143 // immediate to have the same effect. e.g., for the instruction `msr tco,
10144 // x0`, it is bit 25 of register x0 that is written into PSTATE.TCO, but
10145 // with `msr tco, #imm`, it is bit 0 of xN that is written into PSTATE.TCO.
10146 //
10147 // If a programmer wants to codegen the MSR (register) form of `msr tco,
10148 // xN`, they can still do so by specifying the register using five
10149 // colon-separated numbers in a string.
10150 return SemaBuiltinConstantArgRange(TheCall, ArgNum: 1, Low: 0, High: *MaxLimit);
10151 }
10152
10153 return false;
10154}
10155
10156/// SemaBuiltinPPCMMACall - Check the call to a PPC MMA builtin for validity.
10157/// Emit an error and return true on failure; return false on success.
10158/// TypeStr is a string containing the type descriptor of the value returned by
10159/// the builtin and the descriptors of the expected type of the arguments.
10160bool Sema::SemaBuiltinPPCMMACall(CallExpr *TheCall, unsigned BuiltinID,
10161 const char *TypeStr) {
10162
10163 assert((TypeStr[0] != '\0') &&
10164 "Invalid types in PPC MMA builtin declaration");
10165
10166 unsigned Mask = 0;
10167 unsigned ArgNum = 0;
10168
10169 // The first type in TypeStr is the type of the value returned by the
10170 // builtin. So we first read that type and change the type of TheCall.
10171 QualType type = DecodePPCMMATypeFromStr(Context, Str&: TypeStr, Mask);
10172 TheCall->setType(type);
10173
10174 while (*TypeStr != '\0') {
10175 Mask = 0;
10176 QualType ExpectedType = DecodePPCMMATypeFromStr(Context, Str&: TypeStr, Mask);
10177 if (ArgNum >= TheCall->getNumArgs()) {
10178 ArgNum++;
10179 break;
10180 }
10181
10182 Expr *Arg = TheCall->getArg(Arg: ArgNum);
10183 QualType PassedType = Arg->getType();
10184 QualType StrippedRVType = PassedType.getCanonicalType();
10185
10186 // Strip Restrict/Volatile qualifiers.
10187 if (StrippedRVType.isRestrictQualified() ||
10188 StrippedRVType.isVolatileQualified())
10189 StrippedRVType = StrippedRVType.getCanonicalType().getUnqualifiedType();
10190
10191 // The only case where the argument type and expected type are allowed to
10192 // mismatch is if the argument type is a non-void pointer (or array) and
10193 // expected type is a void pointer.
10194 if (StrippedRVType != ExpectedType)
10195 if (!(ExpectedType->isVoidPointerType() &&
10196 (StrippedRVType->isPointerType() || StrippedRVType->isArrayType())))
10197 return Diag(Arg->getBeginLoc(),
10198 diag::err_typecheck_convert_incompatible)
10199 << PassedType << ExpectedType << 1 << 0 << 0;
10200
10201 // If the value of the Mask is not 0, we have a constraint in the size of
10202 // the integer argument so here we ensure the argument is a constant that
10203 // is in the valid range.
10204 if (Mask != 0 &&
10205 SemaBuiltinConstantArgRange(TheCall, ArgNum, Low: 0, High: Mask, RangeIsError: true))
10206 return true;
10207
10208 ArgNum++;
10209 }
10210
10211 // In case we exited early from the previous loop, there are other types to
10212 // read from TypeStr. So we need to read them all to ensure we have the right
10213 // number of arguments in TheCall and if it is not the case, to display a
10214 // better error message.
10215 while (*TypeStr != '\0') {
10216 (void) DecodePPCMMATypeFromStr(Context, Str&: TypeStr, Mask);
10217 ArgNum++;
10218 }
10219 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: ArgNum))
10220 return true;
10221
10222 return false;
10223}
10224
10225/// SemaBuiltinLongjmp - Handle __builtin_longjmp(void *env[5], int val).
10226/// This checks that the target supports __builtin_longjmp and
10227/// that val is a constant 1.
10228bool Sema::SemaBuiltinLongjmp(CallExpr *TheCall) {
10229 if (!Context.getTargetInfo().hasSjLjLowering())
10230 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_unsupported)
10231 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc());
10232
10233 Expr *Arg = TheCall->getArg(Arg: 1);
10234 llvm::APSInt Result;
10235
10236 // TODO: This is less than ideal. Overload this to take a value.
10237 if (SemaBuiltinConstantArg(TheCall, ArgNum: 1, Result))
10238 return true;
10239
10240 if (Result != 1)
10241 return Diag(TheCall->getBeginLoc(), diag::err_builtin_longjmp_invalid_val)
10242 << SourceRange(Arg->getBeginLoc(), Arg->getEndLoc());
10243
10244 return false;
10245}
10246
10247/// SemaBuiltinSetjmp - Handle __builtin_setjmp(void *env[5]).
10248/// This checks that the target supports __builtin_setjmp.
10249bool Sema::SemaBuiltinSetjmp(CallExpr *TheCall) {
10250 if (!Context.getTargetInfo().hasSjLjLowering())
10251 return Diag(TheCall->getBeginLoc(), diag::err_builtin_setjmp_unsupported)
10252 << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc());
10253 return false;
10254}
10255
10256namespace {
10257
10258class UncoveredArgHandler {
10259 enum { Unknown = -1, AllCovered = -2 };
10260
10261 signed FirstUncoveredArg = Unknown;
10262 SmallVector<const Expr *, 4> DiagnosticExprs;
10263
10264public:
10265 UncoveredArgHandler() = default;
10266
10267 bool hasUncoveredArg() const {
10268 return (FirstUncoveredArg >= 0);
10269 }
10270
10271 unsigned getUncoveredArg() const {
10272 assert(hasUncoveredArg() && "no uncovered argument");
10273 return FirstUncoveredArg;
10274 }
10275
10276 void setAllCovered() {
10277 // A string has been found with all arguments covered, so clear out
10278 // the diagnostics.
10279 DiagnosticExprs.clear();
10280 FirstUncoveredArg = AllCovered;
10281 }
10282
10283 void Update(signed NewFirstUncoveredArg, const Expr *StrExpr) {
10284 assert(NewFirstUncoveredArg >= 0 && "Outside range");
10285
10286 // Don't update if a previous string covers all arguments.
10287 if (FirstUncoveredArg == AllCovered)
10288 return;
10289
10290 // UncoveredArgHandler tracks the highest uncovered argument index
10291 // and with it all the strings that match this index.
10292 if (NewFirstUncoveredArg == FirstUncoveredArg)
10293 DiagnosticExprs.push_back(Elt: StrExpr);
10294 else if (NewFirstUncoveredArg > FirstUncoveredArg) {
10295 DiagnosticExprs.clear();
10296 DiagnosticExprs.push_back(Elt: StrExpr);
10297 FirstUncoveredArg = NewFirstUncoveredArg;
10298 }
10299 }
10300
10301 void Diagnose(Sema &S, bool IsFunctionCall, const Expr *ArgExpr);
10302};
10303
10304enum StringLiteralCheckType {
10305 SLCT_NotALiteral,
10306 SLCT_UncheckedLiteral,
10307 SLCT_CheckedLiteral
10308};
10309
10310} // namespace
10311
10312static void sumOffsets(llvm::APSInt &Offset, llvm::APSInt Addend,
10313 BinaryOperatorKind BinOpKind,
10314 bool AddendIsRight) {
10315 unsigned BitWidth = Offset.getBitWidth();
10316 unsigned AddendBitWidth = Addend.getBitWidth();
10317 // There might be negative interim results.
10318 if (Addend.isUnsigned()) {
10319 Addend = Addend.zext(width: ++AddendBitWidth);
10320 Addend.setIsSigned(true);
10321 }
10322 // Adjust the bit width of the APSInts.
10323 if (AddendBitWidth > BitWidth) {
10324 Offset = Offset.sext(width: AddendBitWidth);
10325 BitWidth = AddendBitWidth;
10326 } else if (BitWidth > AddendBitWidth) {
10327 Addend = Addend.sext(width: BitWidth);
10328 }
10329
10330 bool Ov = false;
10331 llvm::APSInt ResOffset = Offset;
10332 if (BinOpKind == BO_Add)
10333 ResOffset = Offset.sadd_ov(RHS: Addend, Overflow&: Ov);
10334 else {
10335 assert(AddendIsRight && BinOpKind == BO_Sub &&
10336 "operator must be add or sub with addend on the right");
10337 ResOffset = Offset.ssub_ov(RHS: Addend, Overflow&: Ov);
10338 }
10339
10340 // We add an offset to a pointer here so we should support an offset as big as
10341 // possible.
10342 if (Ov) {
10343 assert(BitWidth <= std::numeric_limits<unsigned>::max() / 2 &&
10344 "index (intermediate) result too big");
10345 Offset = Offset.sext(width: 2 * BitWidth);
10346 sumOffsets(Offset, Addend, BinOpKind, AddendIsRight);
10347 return;
10348 }
10349
10350 Offset = ResOffset;
10351}
10352
10353namespace {
10354
10355// This is a wrapper class around StringLiteral to support offsetted string
10356// literals as format strings. It takes the offset into account when returning
10357// the string and its length or the source locations to display notes correctly.
10358class FormatStringLiteral {
10359 const StringLiteral *FExpr;
10360 int64_t Offset;
10361
10362 public:
10363 FormatStringLiteral(const StringLiteral *fexpr, int64_t Offset = 0)
10364 : FExpr(fexpr), Offset(Offset) {}
10365
10366 StringRef getString() const {
10367 return FExpr->getString().drop_front(N: Offset);
10368 }
10369
10370 unsigned getByteLength() const {
10371 return FExpr->getByteLength() - getCharByteWidth() * Offset;
10372 }
10373
10374 unsigned getLength() const { return FExpr->getLength() - Offset; }
10375 unsigned getCharByteWidth() const { return FExpr->getCharByteWidth(); }
10376
10377 StringLiteralKind getKind() const { return FExpr->getKind(); }
10378
10379 QualType getType() const { return FExpr->getType(); }
10380
10381 bool isAscii() const { return FExpr->isOrdinary(); }
10382 bool isWide() const { return FExpr->isWide(); }
10383 bool isUTF8() const { return FExpr->isUTF8(); }
10384 bool isUTF16() const { return FExpr->isUTF16(); }
10385 bool isUTF32() const { return FExpr->isUTF32(); }
10386 bool isPascal() const { return FExpr->isPascal(); }
10387
10388 SourceLocation getLocationOfByte(
10389 unsigned ByteNo, const SourceManager &SM, const LangOptions &Features,
10390 const TargetInfo &Target, unsigned *StartToken = nullptr,
10391 unsigned *StartTokenByteOffset = nullptr) const {
10392 return FExpr->getLocationOfByte(ByteNo: ByteNo + Offset, SM, Features, Target,
10393 StartToken, StartTokenByteOffset);
10394 }
10395
10396 SourceLocation getBeginLoc() const LLVM_READONLY {
10397 return FExpr->getBeginLoc().getLocWithOffset(Offset);
10398 }
10399
10400 SourceLocation getEndLoc() const LLVM_READONLY { return FExpr->getEndLoc(); }
10401};
10402
10403} // namespace
10404
10405static void CheckFormatString(
10406 Sema &S, const FormatStringLiteral *FExpr, const Expr *OrigFormatExpr,
10407 ArrayRef<const Expr *> Args, Sema::FormatArgumentPassingKind APK,
10408 unsigned format_idx, unsigned firstDataArg, Sema::FormatStringType Type,
10409 bool inFunctionCall, Sema::VariadicCallType CallType,
10410 llvm::SmallBitVector &CheckedVarArgs, UncoveredArgHandler &UncoveredArg,
10411 bool IgnoreStringsWithoutSpecifiers);
10412
10413static const Expr *maybeConstEvalStringLiteral(ASTContext &Context,
10414 const Expr *E);
10415
10416// Determine if an expression is a string literal or constant string.
10417// If this function returns false on the arguments to a function expecting a
10418// format string, we will usually need to emit a warning.
10419// True string literals are then checked by CheckFormatString.
10420static StringLiteralCheckType
10421checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args,
10422 Sema::FormatArgumentPassingKind APK, unsigned format_idx,
10423 unsigned firstDataArg, Sema::FormatStringType Type,
10424 Sema::VariadicCallType CallType, bool InFunctionCall,
10425 llvm::SmallBitVector &CheckedVarArgs,
10426 UncoveredArgHandler &UncoveredArg, llvm::APSInt Offset,
10427 bool IgnoreStringsWithoutSpecifiers = false) {
10428 if (S.isConstantEvaluatedContext())
10429 return SLCT_NotALiteral;
10430tryAgain:
10431 assert(Offset.isSigned() && "invalid offset");
10432
10433 if (E->isTypeDependent() || E->isValueDependent())
10434 return SLCT_NotALiteral;
10435
10436 E = E->IgnoreParenCasts();
10437
10438 if (E->isNullPointerConstant(Ctx&: S.Context, NPC: Expr::NPC_ValueDependentIsNotNull))
10439 // Technically -Wformat-nonliteral does not warn about this case.
10440 // The behavior of printf and friends in this case is implementation
10441 // dependent. Ideally if the format string cannot be null then
10442 // it should have a 'nonnull' attribute in the function prototype.
10443 return SLCT_UncheckedLiteral;
10444
10445 switch (E->getStmtClass()) {
10446 case Stmt::InitListExprClass:
10447 // Handle expressions like {"foobar"}.
10448 if (const clang::Expr *SLE = maybeConstEvalStringLiteral(Context&: S.Context, E)) {
10449 return checkFormatStringExpr(S, E: SLE, Args, APK, format_idx, firstDataArg,
10450 Type, CallType, /*InFunctionCall*/ false,
10451 CheckedVarArgs, UncoveredArg, Offset,
10452 IgnoreStringsWithoutSpecifiers);
10453 }
10454 return SLCT_NotALiteral;
10455 case Stmt::BinaryConditionalOperatorClass:
10456 case Stmt::ConditionalOperatorClass: {
10457 // The expression is a literal if both sub-expressions were, and it was
10458 // completely checked only if both sub-expressions were checked.
10459 const AbstractConditionalOperator *C =
10460 cast<AbstractConditionalOperator>(Val: E);
10461
10462 // Determine whether it is necessary to check both sub-expressions, for
10463 // example, because the condition expression is a constant that can be
10464 // evaluated at compile time.
10465 bool CheckLeft = true, CheckRight = true;
10466
10467 bool Cond;
10468 if (C->getCond()->EvaluateAsBooleanCondition(
10469 Result&: Cond, Ctx: S.getASTContext(), InConstantContext: S.isConstantEvaluatedContext())) {
10470 if (Cond)
10471 CheckRight = false;
10472 else
10473 CheckLeft = false;
10474 }
10475
10476 // We need to maintain the offsets for the right and the left hand side
10477 // separately to check if every possible indexed expression is a valid
10478 // string literal. They might have different offsets for different string
10479 // literals in the end.
10480 StringLiteralCheckType Left;
10481 if (!CheckLeft)
10482 Left = SLCT_UncheckedLiteral;
10483 else {
10484 Left = checkFormatStringExpr(S, E: C->getTrueExpr(), Args, APK, format_idx,
10485 firstDataArg, Type, CallType, InFunctionCall,
10486 CheckedVarArgs, UncoveredArg, Offset,
10487 IgnoreStringsWithoutSpecifiers);
10488 if (Left == SLCT_NotALiteral || !CheckRight) {
10489 return Left;
10490 }
10491 }
10492
10493 StringLiteralCheckType Right = checkFormatStringExpr(
10494 S, E: C->getFalseExpr(), Args, APK, format_idx, firstDataArg, Type,
10495 CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset,
10496 IgnoreStringsWithoutSpecifiers);
10497
10498 return (CheckLeft && Left < Right) ? Left : Right;
10499 }
10500
10501 case Stmt::ImplicitCastExprClass:
10502 E = cast<ImplicitCastExpr>(Val: E)->getSubExpr();
10503 goto tryAgain;
10504
10505 case Stmt::OpaqueValueExprClass:
10506 if (const Expr *src = cast<OpaqueValueExpr>(Val: E)->getSourceExpr()) {
10507 E = src;
10508 goto tryAgain;
10509 }
10510 return SLCT_NotALiteral;
10511
10512 case Stmt::PredefinedExprClass:
10513 // While __func__, etc., are technically not string literals, they
10514 // cannot contain format specifiers and thus are not a security
10515 // liability.
10516 return SLCT_UncheckedLiteral;
10517
10518 case Stmt::DeclRefExprClass: {
10519 const DeclRefExpr *DR = cast<DeclRefExpr>(Val: E);
10520
10521 // As an exception, do not flag errors for variables binding to
10522 // const string literals.
10523 if (const VarDecl *VD = dyn_cast<VarDecl>(Val: DR->getDecl())) {
10524 bool isConstant = false;
10525 QualType T = DR->getType();
10526
10527 if (const ArrayType *AT = S.Context.getAsArrayType(T)) {
10528 isConstant = AT->getElementType().isConstant(Ctx: S.Context);
10529 } else if (const PointerType *PT = T->getAs<PointerType>()) {
10530 isConstant = T.isConstant(Ctx: S.Context) &&
10531 PT->getPointeeType().isConstant(Ctx: S.Context);
10532 } else if (T->isObjCObjectPointerType()) {
10533 // In ObjC, there is usually no "const ObjectPointer" type,
10534 // so don't check if the pointee type is constant.
10535 isConstant = T.isConstant(Ctx: S.Context);
10536 }
10537
10538 if (isConstant) {
10539 if (const Expr *Init = VD->getAnyInitializer()) {
10540 // Look through initializers like const char c[] = { "foo" }
10541 if (const InitListExpr *InitList = dyn_cast<InitListExpr>(Val: Init)) {
10542 if (InitList->isStringLiteralInit())
10543 Init = InitList->getInit(Init: 0)->IgnoreParenImpCasts();
10544 }
10545 return checkFormatStringExpr(
10546 S, E: Init, Args, APK, format_idx, firstDataArg, Type, CallType,
10547 /*InFunctionCall*/ false, CheckedVarArgs, UncoveredArg, Offset);
10548 }
10549 }
10550
10551 // When the format argument is an argument of this function, and this
10552 // function also has the format attribute, there are several interactions
10553 // for which there shouldn't be a warning. For instance, when calling
10554 // v*printf from a function that has the printf format attribute, we
10555 // should not emit a warning about using `fmt`, even though it's not
10556 // constant, because the arguments have already been checked for the
10557 // caller of `logmessage`:
10558 //
10559 // __attribute__((format(printf, 1, 2)))
10560 // void logmessage(char const *fmt, ...) {
10561 // va_list ap;
10562 // va_start(ap, fmt);
10563 // vprintf(fmt, ap); /* do not emit a warning about "fmt" */
10564 // ...
10565 // }
10566 //
10567 // Another interaction that we need to support is calling a variadic
10568 // format function from a format function that has fixed arguments. For
10569 // instance:
10570 //
10571 // __attribute__((format(printf, 1, 2)))
10572 // void logstring(char const *fmt, char const *str) {
10573 // printf(fmt, str); /* do not emit a warning about "fmt" */
10574 // }
10575 //
10576 // Same (and perhaps more relatably) for the variadic template case:
10577 //
10578 // template<typename... Args>
10579 // __attribute__((format(printf, 1, 2)))
10580 // void log(const char *fmt, Args&&... args) {
10581 // printf(fmt, forward<Args>(args)...);
10582 // /* do not emit a warning about "fmt" */
10583 // }
10584 //
10585 // Due to implementation difficulty, we only check the format, not the
10586 // format arguments, in all cases.
10587 //
10588 if (const auto *PV = dyn_cast<ParmVarDecl>(Val: VD)) {
10589 if (const auto *D = dyn_cast<Decl>(PV->getDeclContext())) {
10590 for (const auto *PVFormat : D->specific_attrs<FormatAttr>()) {
10591 bool IsCXXMember = false;
10592 if (const auto *MD = dyn_cast<CXXMethodDecl>(D))
10593 IsCXXMember = MD->isInstance();
10594
10595 bool IsVariadic = false;
10596 if (const FunctionType *FnTy = D->getFunctionType())
10597 IsVariadic = cast<FunctionProtoType>(FnTy)->isVariadic();
10598 else if (const auto *BD = dyn_cast<BlockDecl>(D))
10599 IsVariadic = BD->isVariadic();
10600 else if (const auto *OMD = dyn_cast<ObjCMethodDecl>(D))
10601 IsVariadic = OMD->isVariadic();
10602
10603 Sema::FormatStringInfo CallerFSI;
10604 if (Sema::getFormatStringInfo(PVFormat, IsCXXMember, IsVariadic,
10605 &CallerFSI)) {
10606 // We also check if the formats are compatible.
10607 // We can't pass a 'scanf' string to a 'printf' function.
10608 if (PV->getFunctionScopeIndex() == CallerFSI.FormatIdx &&
10609 Type == S.GetFormatStringType(PVFormat)) {
10610 // Lastly, check that argument passing kinds transition in a
10611 // way that makes sense:
10612 // from a caller with FAPK_VAList, allow FAPK_VAList
10613 // from a caller with FAPK_Fixed, allow FAPK_Fixed
10614 // from a caller with FAPK_Fixed, allow FAPK_Variadic
10615 // from a caller with FAPK_Variadic, allow FAPK_VAList
10616 switch (combineFAPK(CallerFSI.ArgPassingKind, APK)) {
10617 case combineFAPK(Sema::FAPK_VAList, Sema::FAPK_VAList):
10618 case combineFAPK(Sema::FAPK_Fixed, Sema::FAPK_Fixed):
10619 case combineFAPK(Sema::FAPK_Fixed, Sema::FAPK_Variadic):
10620 case combineFAPK(Sema::FAPK_Variadic, Sema::FAPK_VAList):
10621 return SLCT_UncheckedLiteral;
10622 }
10623 }
10624 }
10625 }
10626 }
10627 }
10628 }
10629
10630 return SLCT_NotALiteral;
10631 }
10632
10633 case Stmt::CallExprClass:
10634 case Stmt::CXXMemberCallExprClass: {
10635 const CallExpr *CE = cast<CallExpr>(Val: E);
10636 if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(Val: CE->getCalleeDecl())) {
10637 bool IsFirst = true;
10638 StringLiteralCheckType CommonResult;
10639 for (const auto *FA : ND->specific_attrs<FormatArgAttr>()) {
10640 const Expr *Arg = CE->getArg(FA->getFormatIdx().getASTIndex());
10641 StringLiteralCheckType Result = checkFormatStringExpr(
10642 S, Arg, Args, APK, format_idx, firstDataArg, Type, CallType,
10643 InFunctionCall, CheckedVarArgs, UncoveredArg, Offset,
10644 IgnoreStringsWithoutSpecifiers);
10645 if (IsFirst) {
10646 CommonResult = Result;
10647 IsFirst = false;
10648 }
10649 }
10650 if (!IsFirst)
10651 return CommonResult;
10652
10653 if (const auto *FD = dyn_cast<FunctionDecl>(Val: ND)) {
10654 unsigned BuiltinID = FD->getBuiltinID();
10655 if (BuiltinID == Builtin::BI__builtin___CFStringMakeConstantString ||
10656 BuiltinID == Builtin::BI__builtin___NSStringMakeConstantString) {
10657 const Expr *Arg = CE->getArg(Arg: 0);
10658 return checkFormatStringExpr(
10659 S, E: Arg, Args, APK, format_idx, firstDataArg, Type, CallType,
10660 InFunctionCall, CheckedVarArgs, UncoveredArg, Offset,
10661 IgnoreStringsWithoutSpecifiers);
10662 }
10663 }
10664 }
10665 if (const Expr *SLE = maybeConstEvalStringLiteral(Context&: S.Context, E))
10666 return checkFormatStringExpr(S, E: SLE, Args, APK, format_idx, firstDataArg,
10667 Type, CallType, /*InFunctionCall*/ false,
10668 CheckedVarArgs, UncoveredArg, Offset,
10669 IgnoreStringsWithoutSpecifiers);
10670 return SLCT_NotALiteral;
10671 }
10672 case Stmt::ObjCMessageExprClass: {
10673 const auto *ME = cast<ObjCMessageExpr>(Val: E);
10674 if (const auto *MD = ME->getMethodDecl()) {
10675 if (const auto *FA = MD->getAttr<FormatArgAttr>()) {
10676 // As a special case heuristic, if we're using the method -[NSBundle
10677 // localizedStringForKey:value:table:], ignore any key strings that lack
10678 // format specifiers. The idea is that if the key doesn't have any
10679 // format specifiers then its probably just a key to map to the
10680 // localized strings. If it does have format specifiers though, then its
10681 // likely that the text of the key is the format string in the
10682 // programmer's language, and should be checked.
10683 const ObjCInterfaceDecl *IFace;
10684 if (MD->isInstanceMethod() && (IFace = MD->getClassInterface()) &&
10685 IFace->getIdentifier()->isStr("NSBundle") &&
10686 MD->getSelector().isKeywordSelector(
10687 Names: {"localizedStringForKey", "value", "table"})) {
10688 IgnoreStringsWithoutSpecifiers = true;
10689 }
10690
10691 const Expr *Arg = ME->getArg(FA->getFormatIdx().getASTIndex());
10692 return checkFormatStringExpr(
10693 S, E: Arg, Args, APK, format_idx, firstDataArg, Type, CallType,
10694 InFunctionCall, CheckedVarArgs, UncoveredArg, Offset,
10695 IgnoreStringsWithoutSpecifiers);
10696 }
10697 }
10698
10699 return SLCT_NotALiteral;
10700 }
10701 case Stmt::ObjCStringLiteralClass:
10702 case Stmt::StringLiteralClass: {
10703 const StringLiteral *StrE = nullptr;
10704
10705 if (const ObjCStringLiteral *ObjCFExpr = dyn_cast<ObjCStringLiteral>(Val: E))
10706 StrE = ObjCFExpr->getString();
10707 else
10708 StrE = cast<StringLiteral>(Val: E);
10709
10710 if (StrE) {
10711 if (Offset.isNegative() || Offset > StrE->getLength()) {
10712 // TODO: It would be better to have an explicit warning for out of
10713 // bounds literals.
10714 return SLCT_NotALiteral;
10715 }
10716 FormatStringLiteral FStr(StrE, Offset.sextOrTrunc(width: 64).getSExtValue());
10717 CheckFormatString(S, FExpr: &FStr, OrigFormatExpr: E, Args, APK, format_idx, firstDataArg, Type,
10718 inFunctionCall: InFunctionCall, CallType, CheckedVarArgs, UncoveredArg,
10719 IgnoreStringsWithoutSpecifiers);
10720 return SLCT_CheckedLiteral;
10721 }
10722
10723 return SLCT_NotALiteral;
10724 }
10725 case Stmt::BinaryOperatorClass: {
10726 const BinaryOperator *BinOp = cast<BinaryOperator>(Val: E);
10727
10728 // A string literal + an int offset is still a string literal.
10729 if (BinOp->isAdditiveOp()) {
10730 Expr::EvalResult LResult, RResult;
10731
10732 bool LIsInt = BinOp->getLHS()->EvaluateAsInt(
10733 Result&: LResult, Ctx: S.Context, AllowSideEffects: Expr::SE_NoSideEffects,
10734 InConstantContext: S.isConstantEvaluatedContext());
10735 bool RIsInt = BinOp->getRHS()->EvaluateAsInt(
10736 Result&: RResult, Ctx: S.Context, AllowSideEffects: Expr::SE_NoSideEffects,
10737 InConstantContext: S.isConstantEvaluatedContext());
10738
10739 if (LIsInt != RIsInt) {
10740 BinaryOperatorKind BinOpKind = BinOp->getOpcode();
10741
10742 if (LIsInt) {
10743 if (BinOpKind == BO_Add) {
10744 sumOffsets(Offset, Addend: LResult.Val.getInt(), BinOpKind, AddendIsRight: RIsInt);
10745 E = BinOp->getRHS();
10746 goto tryAgain;
10747 }
10748 } else {
10749 sumOffsets(Offset, Addend: RResult.Val.getInt(), BinOpKind, AddendIsRight: RIsInt);
10750 E = BinOp->getLHS();
10751 goto tryAgain;
10752 }
10753 }
10754 }
10755
10756 return SLCT_NotALiteral;
10757 }
10758 case Stmt::UnaryOperatorClass: {
10759 const UnaryOperator *UnaOp = cast<UnaryOperator>(Val: E);
10760 auto ASE = dyn_cast<ArraySubscriptExpr>(Val: UnaOp->getSubExpr());
10761 if (UnaOp->getOpcode() == UO_AddrOf && ASE) {
10762 Expr::EvalResult IndexResult;
10763 if (ASE->getRHS()->EvaluateAsInt(Result&: IndexResult, Ctx: S.Context,
10764 AllowSideEffects: Expr::SE_NoSideEffects,
10765 InConstantContext: S.isConstantEvaluatedContext())) {
10766 sumOffsets(Offset, Addend: IndexResult.Val.getInt(), BinOpKind: BO_Add,
10767 /*RHS is int*/ AddendIsRight: true);
10768 E = ASE->getBase();
10769 goto tryAgain;
10770 }
10771 }
10772
10773 return SLCT_NotALiteral;
10774 }
10775
10776 default:
10777 return SLCT_NotALiteral;
10778 }
10779}
10780
10781// If this expression can be evaluated at compile-time,
10782// check if the result is a StringLiteral and return it
10783// otherwise return nullptr
10784static const Expr *maybeConstEvalStringLiteral(ASTContext &Context,
10785 const Expr *E) {
10786 Expr::EvalResult Result;
10787 if (E->EvaluateAsRValue(Result, Ctx: Context) && Result.Val.isLValue()) {
10788 const auto *LVE = Result.Val.getLValueBase().dyn_cast<const Expr *>();
10789 if (isa_and_nonnull<StringLiteral>(Val: LVE))
10790 return LVE;
10791 }
10792 return nullptr;
10793}
10794
10795Sema::FormatStringType Sema::GetFormatStringType(const FormatAttr *Format) {
10796 return llvm::StringSwitch<FormatStringType>(Format->getType()->getName())
10797 .Case(S: "scanf", Value: FST_Scanf)
10798 .Cases(S0: "printf", S1: "printf0", Value: FST_Printf)
10799 .Cases(S0: "NSString", S1: "CFString", Value: FST_NSString)
10800 .Case(S: "strftime", Value: FST_Strftime)
10801 .Case(S: "strfmon", Value: FST_Strfmon)
10802 .Cases(S0: "kprintf", S1: "cmn_err", S2: "vcmn_err", S3: "zcmn_err", Value: FST_Kprintf)
10803 .Case(S: "freebsd_kprintf", Value: FST_FreeBSDKPrintf)
10804 .Case(S: "os_trace", Value: FST_OSLog)
10805 .Case(S: "os_log", Value: FST_OSLog)
10806 .Default(Value: FST_Unknown);
10807}
10808
10809/// CheckFormatArguments - Check calls to printf and scanf (and similar
10810/// functions) for correct use of format strings.
10811/// Returns true if a format string has been fully checked.
10812bool Sema::CheckFormatArguments(const FormatAttr *Format,
10813 ArrayRef<const Expr *> Args, bool IsCXXMember,
10814 VariadicCallType CallType, SourceLocation Loc,
10815 SourceRange Range,
10816 llvm::SmallBitVector &CheckedVarArgs) {
10817 FormatStringInfo FSI;
10818 if (getFormatStringInfo(Format, IsCXXMember, IsVariadic: CallType != VariadicDoesNotApply,
10819 FSI: &FSI))
10820 return CheckFormatArguments(Args, FAPK: FSI.ArgPassingKind, format_idx: FSI.FormatIdx,
10821 firstDataArg: FSI.FirstDataArg, Type: GetFormatStringType(Format),
10822 CallType, Loc, range: Range, CheckedVarArgs);
10823 return false;
10824}
10825
10826bool Sema::CheckFormatArguments(ArrayRef<const Expr *> Args,
10827 Sema::FormatArgumentPassingKind APK,
10828 unsigned format_idx, unsigned firstDataArg,
10829 FormatStringType Type,
10830 VariadicCallType CallType, SourceLocation Loc,
10831 SourceRange Range,
10832 llvm::SmallBitVector &CheckedVarArgs) {
10833 // CHECK: printf/scanf-like function is called with no format string.
10834 if (format_idx >= Args.size()) {
10835 Diag(Loc, diag::warn_missing_format_string) << Range;
10836 return false;
10837 }
10838
10839 const Expr *OrigFormatExpr = Args[format_idx]->IgnoreParenCasts();
10840
10841 // CHECK: format string is not a string literal.
10842 //
10843 // Dynamically generated format strings are difficult to
10844 // automatically vet at compile time. Requiring that format strings
10845 // are string literals: (1) permits the checking of format strings by
10846 // the compiler and thereby (2) can practically remove the source of
10847 // many format string exploits.
10848
10849 // Format string can be either ObjC string (e.g. @"%d") or
10850 // C string (e.g. "%d")
10851 // ObjC string uses the same format specifiers as C string, so we can use
10852 // the same format string checking logic for both ObjC and C strings.
10853 UncoveredArgHandler UncoveredArg;
10854 StringLiteralCheckType CT = checkFormatStringExpr(
10855 S&: *this, E: OrigFormatExpr, Args, APK, format_idx, firstDataArg, Type,
10856 CallType,
10857 /*IsFunctionCall*/ InFunctionCall: true, CheckedVarArgs, UncoveredArg,
10858 /*no string offset*/ Offset: llvm::APSInt(64, false) = 0);
10859
10860 // Generate a diagnostic where an uncovered argument is detected.
10861 if (UncoveredArg.hasUncoveredArg()) {
10862 unsigned ArgIdx = UncoveredArg.getUncoveredArg() + firstDataArg;
10863 assert(ArgIdx < Args.size() && "ArgIdx outside bounds");
10864 UncoveredArg.Diagnose(S&: *this, /*IsFunctionCall*/true, ArgExpr: Args[ArgIdx]);
10865 }
10866
10867 if (CT != SLCT_NotALiteral)
10868 // Literal format string found, check done!
10869 return CT == SLCT_CheckedLiteral;
10870
10871 // Strftime is particular as it always uses a single 'time' argument,
10872 // so it is safe to pass a non-literal string.
10873 if (Type == FST_Strftime)
10874 return false;
10875
10876 // Do not emit diag when the string param is a macro expansion and the
10877 // format is either NSString or CFString. This is a hack to prevent
10878 // diag when using the NSLocalizedString and CFCopyLocalizedString macros
10879 // which are usually used in place of NS and CF string literals.
10880 SourceLocation FormatLoc = Args[format_idx]->getBeginLoc();
10881 if (Type == FST_NSString && SourceMgr.isInSystemMacro(loc: FormatLoc))
10882 return false;
10883
10884 // If there are no arguments specified, warn with -Wformat-security, otherwise
10885 // warn only with -Wformat-nonliteral.
10886 if (Args.size() == firstDataArg) {
10887 Diag(FormatLoc, diag::warn_format_nonliteral_noargs)
10888 << OrigFormatExpr->getSourceRange();
10889 switch (Type) {
10890 default:
10891 break;
10892 case FST_Kprintf:
10893 case FST_FreeBSDKPrintf:
10894 case FST_Printf:
10895 Diag(FormatLoc, diag::note_format_security_fixit)
10896 << FixItHint::CreateInsertion(FormatLoc, "\"%s\", ");
10897 break;
10898 case FST_NSString:
10899 Diag(FormatLoc, diag::note_format_security_fixit)
10900 << FixItHint::CreateInsertion(FormatLoc, "@\"%@\", ");
10901 break;
10902 }
10903 } else {
10904 Diag(FormatLoc, diag::warn_format_nonliteral)
10905 << OrigFormatExpr->getSourceRange();
10906 }
10907 return false;
10908}
10909
10910namespace {
10911
10912class CheckFormatHandler : public analyze_format_string::FormatStringHandler {
10913protected:
10914 Sema &S;
10915 const FormatStringLiteral *FExpr;
10916 const Expr *OrigFormatExpr;
10917 const Sema::FormatStringType FSType;
10918 const unsigned FirstDataArg;
10919 const unsigned NumDataArgs;
10920 const char *Beg; // Start of format string.
10921 const Sema::FormatArgumentPassingKind ArgPassingKind;
10922 ArrayRef<const Expr *> Args;
10923 unsigned FormatIdx;
10924 llvm::SmallBitVector CoveredArgs;
10925 bool usesPositionalArgs = false;
10926 bool atFirstArg = true;
10927 bool inFunctionCall;
10928 Sema::VariadicCallType CallType;
10929 llvm::SmallBitVector &CheckedVarArgs;
10930 UncoveredArgHandler &UncoveredArg;
10931
10932public:
10933 CheckFormatHandler(Sema &s, const FormatStringLiteral *fexpr,
10934 const Expr *origFormatExpr,
10935 const Sema::FormatStringType type, unsigned firstDataArg,
10936 unsigned numDataArgs, const char *beg,
10937 Sema::FormatArgumentPassingKind APK,
10938 ArrayRef<const Expr *> Args, unsigned formatIdx,
10939 bool inFunctionCall, Sema::VariadicCallType callType,
10940 llvm::SmallBitVector &CheckedVarArgs,
10941 UncoveredArgHandler &UncoveredArg)
10942 : S(s), FExpr(fexpr), OrigFormatExpr(origFormatExpr), FSType(type),
10943 FirstDataArg(firstDataArg), NumDataArgs(numDataArgs), Beg(beg),
10944 ArgPassingKind(APK), Args(Args), FormatIdx(formatIdx),
10945 inFunctionCall(inFunctionCall), CallType(callType),
10946 CheckedVarArgs(CheckedVarArgs), UncoveredArg(UncoveredArg) {
10947 CoveredArgs.resize(N: numDataArgs);
10948 CoveredArgs.reset();
10949 }
10950
10951 void DoneProcessing();
10952
10953 void HandleIncompleteSpecifier(const char *startSpecifier,
10954 unsigned specifierLen) override;
10955
10956 void HandleInvalidLengthModifier(
10957 const analyze_format_string::FormatSpecifier &FS,
10958 const analyze_format_string::ConversionSpecifier &CS,
10959 const char *startSpecifier, unsigned specifierLen,
10960 unsigned DiagID);
10961
10962 void HandleNonStandardLengthModifier(
10963 const analyze_format_string::FormatSpecifier &FS,
10964 const char *startSpecifier, unsigned specifierLen);
10965
10966 void HandleNonStandardConversionSpecifier(
10967 const analyze_format_string::ConversionSpecifier &CS,
10968 const char *startSpecifier, unsigned specifierLen);
10969
10970 void HandlePosition(const char *startPos, unsigned posLen) override;
10971
10972 void HandleInvalidPosition(const char *startSpecifier,
10973 unsigned specifierLen,
10974 analyze_format_string::PositionContext p) override;
10975
10976 void HandleZeroPosition(const char *startPos, unsigned posLen) override;
10977
10978 void HandleNullChar(const char *nullCharacter) override;
10979
10980 template <typename Range>
10981 static void
10982 EmitFormatDiagnostic(Sema &S, bool inFunctionCall, const Expr *ArgumentExpr,
10983 const PartialDiagnostic &PDiag, SourceLocation StringLoc,
10984 bool IsStringLocation, Range StringRange,
10985 ArrayRef<FixItHint> Fixit = std::nullopt);
10986
10987protected:
10988 bool HandleInvalidConversionSpecifier(unsigned argIndex, SourceLocation Loc,
10989 const char *startSpec,
10990 unsigned specifierLen,
10991 const char *csStart, unsigned csLen);
10992
10993 void HandlePositionalNonpositionalArgs(SourceLocation Loc,
10994 const char *startSpec,
10995 unsigned specifierLen);
10996
10997 SourceRange getFormatStringRange();
10998 CharSourceRange getSpecifierRange(const char *startSpecifier,
10999 unsigned specifierLen);
11000 SourceLocation getLocationOfByte(const char *x);
11001
11002 const Expr *getDataArg(unsigned i) const;
11003
11004 bool CheckNumArgs(const analyze_format_string::FormatSpecifier &FS,
11005 const analyze_format_string::ConversionSpecifier &CS,
11006 const char *startSpecifier, unsigned specifierLen,
11007 unsigned argIndex);
11008
11009 template <typename Range>
11010 void EmitFormatDiagnostic(PartialDiagnostic PDiag, SourceLocation StringLoc,
11011 bool IsStringLocation, Range StringRange,
11012 ArrayRef<FixItHint> Fixit = std::nullopt);
11013};
11014
11015} // namespace
11016
11017SourceRange CheckFormatHandler::getFormatStringRange() {
11018 return OrigFormatExpr->getSourceRange();
11019}
11020
11021CharSourceRange CheckFormatHandler::
11022getSpecifierRange(const char *startSpecifier, unsigned specifierLen) {
11023 SourceLocation Start = getLocationOfByte(x: startSpecifier);
11024 SourceLocation End = getLocationOfByte(x: startSpecifier + specifierLen - 1);
11025
11026 // Advance the end SourceLocation by one due to half-open ranges.
11027 End = End.getLocWithOffset(Offset: 1);
11028
11029 return CharSourceRange::getCharRange(B: Start, E: End);
11030}
11031
11032SourceLocation CheckFormatHandler::getLocationOfByte(const char *x) {
11033 return FExpr->getLocationOfByte(ByteNo: x - Beg, SM: S.getSourceManager(),
11034 Features: S.getLangOpts(), Target: S.Context.getTargetInfo());
11035}
11036
11037void CheckFormatHandler::HandleIncompleteSpecifier(const char *startSpecifier,
11038 unsigned specifierLen){
11039 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_incomplete_specifier),
11040 getLocationOfByte(startSpecifier),
11041 /*IsStringLocation*/true,
11042 getSpecifierRange(startSpecifier, specifierLen));
11043}
11044
11045void CheckFormatHandler::HandleInvalidLengthModifier(
11046 const analyze_format_string::FormatSpecifier &FS,
11047 const analyze_format_string::ConversionSpecifier &CS,
11048 const char *startSpecifier, unsigned specifierLen, unsigned DiagID) {
11049 using namespace analyze_format_string;
11050
11051 const LengthModifier &LM = FS.getLengthModifier();
11052 CharSourceRange LMRange = getSpecifierRange(startSpecifier: LM.getStart(), specifierLen: LM.getLength());
11053
11054 // See if we know how to fix this length modifier.
11055 std::optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier();
11056 if (FixedLM) {
11057 EmitFormatDiagnostic(PDiag: S.PDiag(DiagID) << LM.toString() << CS.toString(),
11058 Loc: getLocationOfByte(x: LM.getStart()),
11059 /*IsStringLocation*/true,
11060 StringRange: getSpecifierRange(startSpecifier, specifierLen));
11061
11062 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier)
11063 << FixedLM->toString()
11064 << FixItHint::CreateReplacement(LMRange, FixedLM->toString());
11065
11066 } else {
11067 FixItHint Hint;
11068 if (DiagID == diag::warn_format_nonsensical_length)
11069 Hint = FixItHint::CreateRemoval(RemoveRange: LMRange);
11070
11071 EmitFormatDiagnostic(PDiag: S.PDiag(DiagID) << LM.toString() << CS.toString(),
11072 Loc: getLocationOfByte(x: LM.getStart()),
11073 /*IsStringLocation*/true,
11074 StringRange: getSpecifierRange(startSpecifier, specifierLen),
11075 FixIt: Hint);
11076 }
11077}
11078
11079void CheckFormatHandler::HandleNonStandardLengthModifier(
11080 const analyze_format_string::FormatSpecifier &FS,
11081 const char *startSpecifier, unsigned specifierLen) {
11082 using namespace analyze_format_string;
11083
11084 const LengthModifier &LM = FS.getLengthModifier();
11085 CharSourceRange LMRange = getSpecifierRange(startSpecifier: LM.getStart(), specifierLen: LM.getLength());
11086
11087 // See if we know how to fix this length modifier.
11088 std::optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier();
11089 if (FixedLM) {
11090 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard)
11091 << LM.toString() << 0,
11092 getLocationOfByte(LM.getStart()),
11093 /*IsStringLocation*/true,
11094 getSpecifierRange(startSpecifier, specifierLen));
11095
11096 S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier)
11097 << FixedLM->toString()
11098 << FixItHint::CreateReplacement(LMRange, FixedLM->toString());
11099
11100 } else {
11101 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard)
11102 << LM.toString() << 0,
11103 getLocationOfByte(LM.getStart()),
11104 /*IsStringLocation*/true,
11105 getSpecifierRange(startSpecifier, specifierLen));
11106 }
11107}
11108
11109void CheckFormatHandler::HandleNonStandardConversionSpecifier(
11110 const analyze_format_string::ConversionSpecifier &CS,
11111 const char *startSpecifier, unsigned specifierLen) {
11112 using namespace analyze_format_string;
11113
11114 // See if we know how to fix this conversion specifier.
11115 std::optional<ConversionSpecifier> FixedCS = CS.getStandardSpecifier();
11116 if (FixedCS) {
11117 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard)
11118 << CS.toString() << /*conversion specifier*/1,
11119 getLocationOfByte(CS.getStart()),
11120 /*IsStringLocation*/true,
11121 getSpecifierRange(startSpecifier, specifierLen));
11122
11123 CharSourceRange CSRange = getSpecifierRange(startSpecifier: CS.getStart(), specifierLen: CS.getLength());
11124 S.Diag(getLocationOfByte(CS.getStart()), diag::note_format_fix_specifier)
11125 << FixedCS->toString()
11126 << FixItHint::CreateReplacement(CSRange, FixedCS->toString());
11127 } else {
11128 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard)
11129 << CS.toString() << /*conversion specifier*/1,
11130 getLocationOfByte(CS.getStart()),
11131 /*IsStringLocation*/true,
11132 getSpecifierRange(startSpecifier, specifierLen));
11133 }
11134}
11135
11136void CheckFormatHandler::HandlePosition(const char *startPos,
11137 unsigned posLen) {
11138 EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard_positional_arg),
11139 getLocationOfByte(startPos),
11140 /*IsStringLocation*/true,
11141 getSpecifierRange(startPos, posLen));
11142}
11143
11144void CheckFormatHandler::HandleInvalidPosition(
11145 const char *startSpecifier, unsigned specifierLen,
11146 analyze_format_string::PositionContext p) {
11147 EmitFormatDiagnostic(
11148 S.PDiag(diag::warn_format_invalid_positional_specifier) << (unsigned)p,
11149 getLocationOfByte(startSpecifier), /*IsStringLocation*/ true,
11150 getSpecifierRange(startSpecifier, specifierLen));
11151}
11152
11153void CheckFormatHandler::HandleZeroPosition(const char *startPos,
11154 unsigned posLen) {
11155 EmitFormatDiagnostic(S.PDiag(diag::warn_format_zero_positional_specifier),
11156 getLocationOfByte(startPos),
11157 /*IsStringLocation*/true,
11158 getSpecifierRange(startPos, posLen));
11159}
11160
11161void CheckFormatHandler::HandleNullChar(const char *nullCharacter) {
11162 if (!isa<ObjCStringLiteral>(Val: OrigFormatExpr)) {
11163 // The presence of a null character is likely an error.
11164 EmitFormatDiagnostic(
11165 S.PDiag(diag::warn_printf_format_string_contains_null_char),
11166 getLocationOfByte(nullCharacter), /*IsStringLocation*/true,
11167 getFormatStringRange());
11168 }
11169}
11170
11171// Note that this may return NULL if there was an error parsing or building
11172// one of the argument expressions.
11173const Expr *CheckFormatHandler::getDataArg(unsigned i) const {
11174 return Args[FirstDataArg + i];
11175}
11176
11177void CheckFormatHandler::DoneProcessing() {
11178 // Does the number of data arguments exceed the number of
11179 // format conversions in the format string?
11180 if (ArgPassingKind != Sema::FAPK_VAList) {
11181 // Find any arguments that weren't covered.
11182 CoveredArgs.flip();
11183 signed notCoveredArg = CoveredArgs.find_first();
11184 if (notCoveredArg >= 0) {
11185 assert((unsigned)notCoveredArg < NumDataArgs);
11186 UncoveredArg.Update(NewFirstUncoveredArg: notCoveredArg, StrExpr: OrigFormatExpr);
11187 } else {
11188 UncoveredArg.setAllCovered();
11189 }
11190 }
11191}
11192
11193void UncoveredArgHandler::Diagnose(Sema &S, bool IsFunctionCall,
11194 const Expr *ArgExpr) {
11195 assert(hasUncoveredArg() && !DiagnosticExprs.empty() &&
11196 "Invalid state");
11197
11198 if (!ArgExpr)
11199 return;
11200
11201 SourceLocation Loc = ArgExpr->getBeginLoc();
11202
11203 if (S.getSourceManager().isInSystemMacro(loc: Loc))
11204 return;
11205
11206 PartialDiagnostic PDiag = S.PDiag(diag::warn_printf_data_arg_not_used);
11207 for (auto E : DiagnosticExprs)
11208 PDiag << E->getSourceRange();
11209
11210 CheckFormatHandler::EmitFormatDiagnostic(
11211 S, IsFunctionCall, DiagnosticExprs[0],
11212 PDiag, Loc, /*IsStringLocation*/false,
11213 DiagnosticExprs[0]->getSourceRange());
11214}
11215
11216bool
11217CheckFormatHandler::HandleInvalidConversionSpecifier(unsigned argIndex,
11218 SourceLocation Loc,
11219 const char *startSpec,
11220 unsigned specifierLen,
11221 const char *csStart,
11222 unsigned csLen) {
11223 bool keepGoing = true;
11224 if (argIndex < NumDataArgs) {
11225 // Consider the argument coverered, even though the specifier doesn't
11226 // make sense.
11227 CoveredArgs.set(argIndex);
11228 }
11229 else {
11230 // If argIndex exceeds the number of data arguments we
11231 // don't issue a warning because that is just a cascade of warnings (and
11232 // they may have intended '%%' anyway). We don't want to continue processing
11233 // the format string after this point, however, as we will like just get
11234 // gibberish when trying to match arguments.
11235 keepGoing = false;
11236 }
11237
11238 StringRef Specifier(csStart, csLen);
11239
11240 // If the specifier in non-printable, it could be the first byte of a UTF-8
11241 // sequence. In that case, print the UTF-8 code point. If not, print the byte
11242 // hex value.
11243 std::string CodePointStr;
11244 if (!llvm::sys::locale::isPrint(c: *csStart)) {
11245 llvm::UTF32 CodePoint;
11246 const llvm::UTF8 **B = reinterpret_cast<const llvm::UTF8 **>(&csStart);
11247 const llvm::UTF8 *E =
11248 reinterpret_cast<const llvm::UTF8 *>(csStart + csLen);
11249 llvm::ConversionResult Result =
11250 llvm::convertUTF8Sequence(source: B, sourceEnd: E, target: &CodePoint, flags: llvm::strictConversion);
11251
11252 if (Result != llvm::conversionOK) {
11253 unsigned char FirstChar = *csStart;
11254 CodePoint = (llvm::UTF32)FirstChar;
11255 }
11256
11257 llvm::raw_string_ostream OS(CodePointStr);
11258 if (CodePoint < 256)
11259 OS << "\\x" << llvm::format(Fmt: "%02x", Vals: CodePoint);
11260 else if (CodePoint <= 0xFFFF)
11261 OS << "\\u" << llvm::format(Fmt: "%04x", Vals: CodePoint);
11262 else
11263 OS << "\\U" << llvm::format(Fmt: "%08x", Vals: CodePoint);
11264 OS.flush();
11265 Specifier = CodePointStr;
11266 }
11267
11268 EmitFormatDiagnostic(
11269 S.PDiag(diag::warn_format_invalid_conversion) << Specifier, Loc,
11270 /*IsStringLocation*/ true, getSpecifierRange(startSpec, specifierLen));
11271
11272 return keepGoing;
11273}
11274
11275void
11276CheckFormatHandler::HandlePositionalNonpositionalArgs(SourceLocation Loc,
11277 const char *startSpec,
11278 unsigned specifierLen) {
11279 EmitFormatDiagnostic(
11280 S.PDiag(diag::warn_format_mix_positional_nonpositional_args),
11281 Loc, /*isStringLoc*/true, getSpecifierRange(startSpec, specifierLen));
11282}
11283
11284bool
11285CheckFormatHandler::CheckNumArgs(
11286 const analyze_format_string::FormatSpecifier &FS,
11287 const analyze_format_string::ConversionSpecifier &CS,
11288 const char *startSpecifier, unsigned specifierLen, unsigned argIndex) {
11289
11290 if (argIndex >= NumDataArgs) {
11291 PartialDiagnostic PDiag = FS.usesPositionalArg()
11292 ? (S.PDiag(diag::warn_printf_positional_arg_exceeds_data_args)
11293 << (argIndex+1) << NumDataArgs)
11294 : S.PDiag(diag::warn_printf_insufficient_data_args);
11295 EmitFormatDiagnostic(
11296 PDiag, Loc: getLocationOfByte(x: CS.getStart()), /*IsStringLocation*/true,
11297 StringRange: getSpecifierRange(startSpecifier, specifierLen));
11298
11299 // Since more arguments than conversion tokens are given, by extension
11300 // all arguments are covered, so mark this as so.
11301 UncoveredArg.setAllCovered();
11302 return false;
11303 }
11304 return true;
11305}
11306
11307template<typename Range>
11308void CheckFormatHandler::EmitFormatDiagnostic(PartialDiagnostic PDiag,
11309 SourceLocation Loc,
11310 bool IsStringLocation,
11311 Range StringRange,
11312 ArrayRef<FixItHint> FixIt) {
11313 EmitFormatDiagnostic(S, inFunctionCall, Args[FormatIdx], PDiag,
11314 Loc, IsStringLocation, StringRange, FixIt);
11315}
11316
11317/// If the format string is not within the function call, emit a note
11318/// so that the function call and string are in diagnostic messages.
11319///
11320/// \param InFunctionCall if true, the format string is within the function
11321/// call and only one diagnostic message will be produced. Otherwise, an
11322/// extra note will be emitted pointing to location of the format string.
11323///
11324/// \param ArgumentExpr the expression that is passed as the format string
11325/// argument in the function call. Used for getting locations when two
11326/// diagnostics are emitted.
11327///
11328/// \param PDiag the callee should already have provided any strings for the
11329/// diagnostic message. This function only adds locations and fixits
11330/// to diagnostics.
11331///
11332/// \param Loc primary location for diagnostic. If two diagnostics are
11333/// required, one will be at Loc and a new SourceLocation will be created for
11334/// the other one.
11335///
11336/// \param IsStringLocation if true, Loc points to the format string should be
11337/// used for the note. Otherwise, Loc points to the argument list and will
11338/// be used with PDiag.
11339///
11340/// \param StringRange some or all of the string to highlight. This is
11341/// templated so it can accept either a CharSourceRange or a SourceRange.
11342///
11343/// \param FixIt optional fix it hint for the format string.
11344template <typename Range>
11345void CheckFormatHandler::EmitFormatDiagnostic(
11346 Sema &S, bool InFunctionCall, const Expr *ArgumentExpr,
11347 const PartialDiagnostic &PDiag, SourceLocation Loc, bool IsStringLocation,
11348 Range StringRange, ArrayRef<FixItHint> FixIt) {
11349 if (InFunctionCall) {
11350 const Sema::SemaDiagnosticBuilder &D = S.Diag(Loc, PD: PDiag);
11351 D << StringRange;
11352 D << FixIt;
11353 } else {
11354 S.Diag(Loc: IsStringLocation ? ArgumentExpr->getExprLoc() : Loc, PD: PDiag)
11355 << ArgumentExpr->getSourceRange();
11356
11357 const Sema::SemaDiagnosticBuilder &Note =
11358 S.Diag(IsStringLocation ? Loc : StringRange.getBegin(),
11359 diag::note_format_string_defined);
11360
11361 Note << StringRange;
11362 Note << FixIt;
11363 }
11364}
11365
11366//===--- CHECK: Printf format string checking ------------------------------===//
11367
11368namespace {
11369
11370class CheckPrintfHandler : public CheckFormatHandler {
11371public:
11372 CheckPrintfHandler(Sema &s, const FormatStringLiteral *fexpr,
11373 const Expr *origFormatExpr,
11374 const Sema::FormatStringType type, unsigned firstDataArg,
11375 unsigned numDataArgs, bool isObjC, const char *beg,
11376 Sema::FormatArgumentPassingKind APK,
11377 ArrayRef<const Expr *> Args, unsigned formatIdx,
11378 bool inFunctionCall, Sema::VariadicCallType CallType,
11379 llvm::SmallBitVector &CheckedVarArgs,
11380 UncoveredArgHandler &UncoveredArg)
11381 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg,
11382 numDataArgs, beg, APK, Args, formatIdx,
11383 inFunctionCall, CallType, CheckedVarArgs,
11384 UncoveredArg) {}
11385
11386 bool isObjCContext() const { return FSType == Sema::FST_NSString; }
11387
11388 /// Returns true if '%@' specifiers are allowed in the format string.
11389 bool allowsObjCArg() const {
11390 return FSType == Sema::FST_NSString || FSType == Sema::FST_OSLog ||
11391 FSType == Sema::FST_OSTrace;
11392 }
11393
11394 bool HandleInvalidPrintfConversionSpecifier(
11395 const analyze_printf::PrintfSpecifier &FS,
11396 const char *startSpecifier,
11397 unsigned specifierLen) override;
11398
11399 void handleInvalidMaskType(StringRef MaskType) override;
11400
11401 bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS,
11402 const char *startSpecifier, unsigned specifierLen,
11403 const TargetInfo &Target) override;
11404 bool checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
11405 const char *StartSpecifier,
11406 unsigned SpecifierLen,
11407 const Expr *E);
11408
11409 bool HandleAmount(const analyze_format_string::OptionalAmount &Amt, unsigned k,
11410 const char *startSpecifier, unsigned specifierLen);
11411 void HandleInvalidAmount(const analyze_printf::PrintfSpecifier &FS,
11412 const analyze_printf::OptionalAmount &Amt,
11413 unsigned type,
11414 const char *startSpecifier, unsigned specifierLen);
11415 void HandleFlag(const analyze_printf::PrintfSpecifier &FS,
11416 const analyze_printf::OptionalFlag &flag,
11417 const char *startSpecifier, unsigned specifierLen);
11418 void HandleIgnoredFlag(const analyze_printf::PrintfSpecifier &FS,
11419 const analyze_printf::OptionalFlag &ignoredFlag,
11420 const analyze_printf::OptionalFlag &flag,
11421 const char *startSpecifier, unsigned specifierLen);
11422 bool checkForCStrMembers(const analyze_printf::ArgType &AT,
11423 const Expr *E);
11424
11425 void HandleEmptyObjCModifierFlag(const char *startFlag,
11426 unsigned flagLen) override;
11427
11428 void HandleInvalidObjCModifierFlag(const char *startFlag,
11429 unsigned flagLen) override;
11430
11431 void HandleObjCFlagsWithNonObjCConversion(const char *flagsStart,
11432 const char *flagsEnd,
11433 const char *conversionPosition)
11434 override;
11435};
11436
11437} // namespace
11438
11439bool CheckPrintfHandler::HandleInvalidPrintfConversionSpecifier(
11440 const analyze_printf::PrintfSpecifier &FS,
11441 const char *startSpecifier,
11442 unsigned specifierLen) {
11443 const analyze_printf::PrintfConversionSpecifier &CS =
11444 FS.getConversionSpecifier();
11445
11446 return HandleInvalidConversionSpecifier(argIndex: FS.getArgIndex(),
11447 Loc: getLocationOfByte(x: CS.getStart()),
11448 startSpec: startSpecifier, specifierLen,
11449 csStart: CS.getStart(), csLen: CS.getLength());
11450}
11451
11452void CheckPrintfHandler::handleInvalidMaskType(StringRef MaskType) {
11453 S.Diag(getLocationOfByte(MaskType.data()), diag::err_invalid_mask_type_size);
11454}
11455
11456bool CheckPrintfHandler::HandleAmount(
11457 const analyze_format_string::OptionalAmount &Amt, unsigned k,
11458 const char *startSpecifier, unsigned specifierLen) {
11459 if (Amt.hasDataArgument()) {
11460 if (ArgPassingKind != Sema::FAPK_VAList) {
11461 unsigned argIndex = Amt.getArgIndex();
11462 if (argIndex >= NumDataArgs) {
11463 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_missing_arg)
11464 << k,
11465 getLocationOfByte(Amt.getStart()),
11466 /*IsStringLocation*/ true,
11467 getSpecifierRange(startSpecifier, specifierLen));
11468 // Don't do any more checking. We will just emit
11469 // spurious errors.
11470 return false;
11471 }
11472
11473 // Type check the data argument. It should be an 'int'.
11474 // Although not in conformance with C99, we also allow the argument to be
11475 // an 'unsigned int' as that is a reasonably safe case. GCC also
11476 // doesn't emit a warning for that case.
11477 CoveredArgs.set(argIndex);
11478 const Expr *Arg = getDataArg(i: argIndex);
11479 if (!Arg)
11480 return false;
11481
11482 QualType T = Arg->getType();
11483
11484 const analyze_printf::ArgType &AT = Amt.getArgType(Ctx&: S.Context);
11485 assert(AT.isValid());
11486
11487 if (!AT.matchesType(C&: S.Context, argTy: T)) {
11488 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_wrong_type)
11489 << k << AT.getRepresentativeTypeName(S.Context)
11490 << T << Arg->getSourceRange(),
11491 getLocationOfByte(Amt.getStart()),
11492 /*IsStringLocation*/true,
11493 getSpecifierRange(startSpecifier, specifierLen));
11494 // Don't do any more checking. We will just emit
11495 // spurious errors.
11496 return false;
11497 }
11498 }
11499 }
11500 return true;
11501}
11502
11503void CheckPrintfHandler::HandleInvalidAmount(
11504 const analyze_printf::PrintfSpecifier &FS,
11505 const analyze_printf::OptionalAmount &Amt,
11506 unsigned type,
11507 const char *startSpecifier,
11508 unsigned specifierLen) {
11509 const analyze_printf::PrintfConversionSpecifier &CS =
11510 FS.getConversionSpecifier();
11511
11512 FixItHint fixit =
11513 Amt.getHowSpecified() == analyze_printf::OptionalAmount::Constant
11514 ? FixItHint::CreateRemoval(RemoveRange: getSpecifierRange(startSpecifier: Amt.getStart(),
11515 specifierLen: Amt.getConstantLength()))
11516 : FixItHint();
11517
11518 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_optional_amount)
11519 << type << CS.toString(),
11520 getLocationOfByte(Amt.getStart()),
11521 /*IsStringLocation*/true,
11522 getSpecifierRange(startSpecifier, specifierLen),
11523 fixit);
11524}
11525
11526void CheckPrintfHandler::HandleFlag(const analyze_printf::PrintfSpecifier &FS,
11527 const analyze_printf::OptionalFlag &flag,
11528 const char *startSpecifier,
11529 unsigned specifierLen) {
11530 // Warn about pointless flag with a fixit removal.
11531 const analyze_printf::PrintfConversionSpecifier &CS =
11532 FS.getConversionSpecifier();
11533 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_flag)
11534 << flag.toString() << CS.toString(),
11535 getLocationOfByte(flag.getPosition()),
11536 /*IsStringLocation*/true,
11537 getSpecifierRange(startSpecifier, specifierLen),
11538 FixItHint::CreateRemoval(
11539 getSpecifierRange(flag.getPosition(), 1)));
11540}
11541
11542void CheckPrintfHandler::HandleIgnoredFlag(
11543 const analyze_printf::PrintfSpecifier &FS,
11544 const analyze_printf::OptionalFlag &ignoredFlag,
11545 const analyze_printf::OptionalFlag &flag,
11546 const char *startSpecifier,
11547 unsigned specifierLen) {
11548 // Warn about ignored flag with a fixit removal.
11549 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_ignored_flag)
11550 << ignoredFlag.toString() << flag.toString(),
11551 getLocationOfByte(ignoredFlag.getPosition()),
11552 /*IsStringLocation*/true,
11553 getSpecifierRange(startSpecifier, specifierLen),
11554 FixItHint::CreateRemoval(
11555 getSpecifierRange(ignoredFlag.getPosition(), 1)));
11556}
11557
11558void CheckPrintfHandler::HandleEmptyObjCModifierFlag(const char *startFlag,
11559 unsigned flagLen) {
11560 // Warn about an empty flag.
11561 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_empty_objc_flag),
11562 getLocationOfByte(startFlag),
11563 /*IsStringLocation*/true,
11564 getSpecifierRange(startFlag, flagLen));
11565}
11566
11567void CheckPrintfHandler::HandleInvalidObjCModifierFlag(const char *startFlag,
11568 unsigned flagLen) {
11569 // Warn about an invalid flag.
11570 auto Range = getSpecifierRange(startSpecifier: startFlag, specifierLen: flagLen);
11571 StringRef flag(startFlag, flagLen);
11572 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_invalid_objc_flag) << flag,
11573 getLocationOfByte(startFlag),
11574 /*IsStringLocation*/true,
11575 Range, FixItHint::CreateRemoval(Range));
11576}
11577
11578void CheckPrintfHandler::HandleObjCFlagsWithNonObjCConversion(
11579 const char *flagsStart, const char *flagsEnd, const char *conversionPosition) {
11580 // Warn about using '[...]' without a '@' conversion.
11581 auto Range = getSpecifierRange(startSpecifier: flagsStart, specifierLen: flagsEnd - flagsStart + 1);
11582 auto diag = diag::warn_printf_ObjCflags_without_ObjCConversion;
11583 EmitFormatDiagnostic(S.PDiag(DiagID: diag) << StringRef(conversionPosition, 1),
11584 getLocationOfByte(x: conversionPosition),
11585 /*IsStringLocation*/true,
11586 Range, FixItHint::CreateRemoval(RemoveRange: Range));
11587}
11588
11589// Determines if the specified is a C++ class or struct containing
11590// a member with the specified name and kind (e.g. a CXXMethodDecl named
11591// "c_str()").
11592template<typename MemberKind>
11593static llvm::SmallPtrSet<MemberKind*, 1>
11594CXXRecordMembersNamed(StringRef Name, Sema &S, QualType Ty) {
11595 const RecordType *RT = Ty->getAs<RecordType>();
11596 llvm::SmallPtrSet<MemberKind*, 1> Results;
11597
11598 if (!RT)
11599 return Results;
11600 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Val: RT->getDecl());
11601 if (!RD || !RD->getDefinition())
11602 return Results;
11603
11604 LookupResult R(S, &S.Context.Idents.get(Name), SourceLocation(),
11605 Sema::LookupMemberName);
11606 R.suppressDiagnostics();
11607
11608 // We just need to include all members of the right kind turned up by the
11609 // filter, at this point.
11610 if (S.LookupQualifiedName(R, RT->getDecl()))
11611 for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) {
11612 NamedDecl *decl = (*I)->getUnderlyingDecl();
11613 if (MemberKind *FK = dyn_cast<MemberKind>(decl))
11614 Results.insert(FK);
11615 }
11616 return Results;
11617}
11618
11619/// Check if we could call '.c_str()' on an object.
11620///
11621/// FIXME: This returns the wrong results in some cases (if cv-qualifiers don't
11622/// allow the call, or if it would be ambiguous).
11623bool Sema::hasCStrMethod(const Expr *E) {
11624 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>;
11625
11626 MethodSet Results =
11627 CXXRecordMembersNamed<CXXMethodDecl>(Name: "c_str", S&: *this, Ty: E->getType());
11628 for (MethodSet::iterator MI = Results.begin(), ME = Results.end();
11629 MI != ME; ++MI)
11630 if ((*MI)->getMinRequiredArguments() == 0)
11631 return true;
11632 return false;
11633}
11634
11635// Check if a (w)string was passed when a (w)char* was needed, and offer a
11636// better diagnostic if so. AT is assumed to be valid.
11637// Returns true when a c_str() conversion method is found.
11638bool CheckPrintfHandler::checkForCStrMembers(
11639 const analyze_printf::ArgType &AT, const Expr *E) {
11640 using MethodSet = llvm::SmallPtrSet<CXXMethodDecl *, 1>;
11641
11642 MethodSet Results =
11643 CXXRecordMembersNamed<CXXMethodDecl>(Name: "c_str", S, Ty: E->getType());
11644
11645 for (MethodSet::iterator MI = Results.begin(), ME = Results.end();
11646 MI != ME; ++MI) {
11647 const CXXMethodDecl *Method = *MI;
11648 if (Method->getMinRequiredArguments() == 0 &&
11649 AT.matchesType(C&: S.Context, argTy: Method->getReturnType())) {
11650 // FIXME: Suggest parens if the expression needs them.
11651 SourceLocation EndLoc = S.getLocForEndOfToken(Loc: E->getEndLoc());
11652 S.Diag(E->getBeginLoc(), diag::note_printf_c_str)
11653 << "c_str()" << FixItHint::CreateInsertion(EndLoc, ".c_str()");
11654 return true;
11655 }
11656 }
11657
11658 return false;
11659}
11660
11661bool CheckPrintfHandler::HandlePrintfSpecifier(
11662 const analyze_printf::PrintfSpecifier &FS, const char *startSpecifier,
11663 unsigned specifierLen, const TargetInfo &Target) {
11664 using namespace analyze_format_string;
11665 using namespace analyze_printf;
11666
11667 const PrintfConversionSpecifier &CS = FS.getConversionSpecifier();
11668
11669 if (FS.consumesDataArgument()) {
11670 if (atFirstArg) {
11671 atFirstArg = false;
11672 usesPositionalArgs = FS.usesPositionalArg();
11673 }
11674 else if (usesPositionalArgs != FS.usesPositionalArg()) {
11675 HandlePositionalNonpositionalArgs(Loc: getLocationOfByte(x: CS.getStart()),
11676 startSpec: startSpecifier, specifierLen);
11677 return false;
11678 }
11679 }
11680
11681 // First check if the field width, precision, and conversion specifier
11682 // have matching data arguments.
11683 if (!HandleAmount(Amt: FS.getFieldWidth(), /* field width */ k: 0,
11684 startSpecifier, specifierLen)) {
11685 return false;
11686 }
11687
11688 if (!HandleAmount(Amt: FS.getPrecision(), /* precision */ k: 1,
11689 startSpecifier, specifierLen)) {
11690 return false;
11691 }
11692
11693 if (!CS.consumesDataArgument()) {
11694 // FIXME: Technically specifying a precision or field width here
11695 // makes no sense. Worth issuing a warning at some point.
11696 return true;
11697 }
11698
11699 // Consume the argument.
11700 unsigned argIndex = FS.getArgIndex();
11701 if (argIndex < NumDataArgs) {
11702 // The check to see if the argIndex is valid will come later.
11703 // We set the bit here because we may exit early from this
11704 // function if we encounter some other error.
11705 CoveredArgs.set(argIndex);
11706 }
11707
11708 // FreeBSD kernel extensions.
11709 if (CS.getKind() == ConversionSpecifier::FreeBSDbArg ||
11710 CS.getKind() == ConversionSpecifier::FreeBSDDArg) {
11711 // We need at least two arguments.
11712 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex: argIndex + 1))
11713 return false;
11714
11715 // Claim the second argument.
11716 CoveredArgs.set(argIndex + 1);
11717
11718 // Type check the first argument (int for %b, pointer for %D)
11719 const Expr *Ex = getDataArg(i: argIndex);
11720 const analyze_printf::ArgType &AT =
11721 (CS.getKind() == ConversionSpecifier::FreeBSDbArg) ?
11722 ArgType(S.Context.IntTy) : ArgType::CPointerTy;
11723 if (AT.isValid() && !AT.matchesType(S.Context, Ex->getType()))
11724 EmitFormatDiagnostic(
11725 S.PDiag(diag::warn_format_conversion_argument_type_mismatch)
11726 << AT.getRepresentativeTypeName(S.Context) << Ex->getType()
11727 << false << Ex->getSourceRange(),
11728 Ex->getBeginLoc(), /*IsStringLocation*/ false,
11729 getSpecifierRange(startSpecifier, specifierLen));
11730
11731 // Type check the second argument (char * for both %b and %D)
11732 Ex = getDataArg(i: argIndex + 1);
11733 const analyze_printf::ArgType &AT2 = ArgType::CStrTy;
11734 if (AT2.isValid() && !AT2.matchesType(S.Context, Ex->getType()))
11735 EmitFormatDiagnostic(
11736 S.PDiag(diag::warn_format_conversion_argument_type_mismatch)
11737 << AT2.getRepresentativeTypeName(S.Context) << Ex->getType()
11738 << false << Ex->getSourceRange(),
11739 Ex->getBeginLoc(), /*IsStringLocation*/ false,
11740 getSpecifierRange(startSpecifier, specifierLen));
11741
11742 return true;
11743 }
11744
11745 // Check for using an Objective-C specific conversion specifier
11746 // in a non-ObjC literal.
11747 if (!allowsObjCArg() && CS.isObjCArg()) {
11748 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier,
11749 specifierLen);
11750 }
11751
11752 // %P can only be used with os_log.
11753 if (FSType != Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::PArg) {
11754 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier,
11755 specifierLen);
11756 }
11757
11758 // %n is not allowed with os_log.
11759 if (FSType == Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::nArg) {
11760 EmitFormatDiagnostic(S.PDiag(diag::warn_os_log_format_narg),
11761 getLocationOfByte(CS.getStart()),
11762 /*IsStringLocation*/ false,
11763 getSpecifierRange(startSpecifier, specifierLen));
11764
11765 return true;
11766 }
11767
11768 // Only scalars are allowed for os_trace.
11769 if (FSType == Sema::FST_OSTrace &&
11770 (CS.getKind() == ConversionSpecifier::PArg ||
11771 CS.getKind() == ConversionSpecifier::sArg ||
11772 CS.getKind() == ConversionSpecifier::ObjCObjArg)) {
11773 return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier,
11774 specifierLen);
11775 }
11776
11777 // Check for use of public/private annotation outside of os_log().
11778 if (FSType != Sema::FST_OSLog) {
11779 if (FS.isPublic().isSet()) {
11780 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation)
11781 << "public",
11782 getLocationOfByte(FS.isPublic().getPosition()),
11783 /*IsStringLocation*/ false,
11784 getSpecifierRange(startSpecifier, specifierLen));
11785 }
11786 if (FS.isPrivate().isSet()) {
11787 EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation)
11788 << "private",
11789 getLocationOfByte(FS.isPrivate().getPosition()),
11790 /*IsStringLocation*/ false,
11791 getSpecifierRange(startSpecifier, specifierLen));
11792 }
11793 }
11794
11795 const llvm::Triple &Triple = Target.getTriple();
11796 if (CS.getKind() == ConversionSpecifier::nArg &&
11797 (Triple.isAndroid() || Triple.isOSFuchsia())) {
11798 EmitFormatDiagnostic(S.PDiag(diag::warn_printf_narg_not_supported),
11799 getLocationOfByte(CS.getStart()),
11800 /*IsStringLocation*/ false,
11801 getSpecifierRange(startSpecifier, specifierLen));
11802 }
11803
11804 // Check for invalid use of field width
11805 if (!FS.hasValidFieldWidth()) {
11806 HandleInvalidAmount(FS, Amt: FS.getFieldWidth(), /* field width */ type: 0,
11807 startSpecifier, specifierLen);
11808 }
11809
11810 // Check for invalid use of precision
11811 if (!FS.hasValidPrecision()) {
11812 HandleInvalidAmount(FS, Amt: FS.getPrecision(), /* precision */ type: 1,
11813 startSpecifier, specifierLen);
11814 }
11815
11816 // Precision is mandatory for %P specifier.
11817 if (CS.getKind() == ConversionSpecifier::PArg &&
11818 FS.getPrecision().getHowSpecified() == OptionalAmount::NotSpecified) {
11819 EmitFormatDiagnostic(S.PDiag(diag::warn_format_P_no_precision),
11820 getLocationOfByte(startSpecifier),
11821 /*IsStringLocation*/ false,
11822 getSpecifierRange(startSpecifier, specifierLen));
11823 }
11824
11825 // Check each flag does not conflict with any other component.
11826 if (!FS.hasValidThousandsGroupingPrefix())
11827 HandleFlag(FS, flag: FS.hasThousandsGrouping(), startSpecifier, specifierLen);
11828 if (!FS.hasValidLeadingZeros())
11829 HandleFlag(FS, flag: FS.hasLeadingZeros(), startSpecifier, specifierLen);
11830 if (!FS.hasValidPlusPrefix())
11831 HandleFlag(FS, flag: FS.hasPlusPrefix(), startSpecifier, specifierLen);
11832 if (!FS.hasValidSpacePrefix())
11833 HandleFlag(FS, flag: FS.hasSpacePrefix(), startSpecifier, specifierLen);
11834 if (!FS.hasValidAlternativeForm())
11835 HandleFlag(FS, flag: FS.hasAlternativeForm(), startSpecifier, specifierLen);
11836 if (!FS.hasValidLeftJustified())
11837 HandleFlag(FS, flag: FS.isLeftJustified(), startSpecifier, specifierLen);
11838
11839 // Check that flags are not ignored by another flag
11840 if (FS.hasSpacePrefix() && FS.hasPlusPrefix()) // ' ' ignored by '+'
11841 HandleIgnoredFlag(FS, ignoredFlag: FS.hasSpacePrefix(), flag: FS.hasPlusPrefix(),
11842 startSpecifier, specifierLen);
11843 if (FS.hasLeadingZeros() && FS.isLeftJustified()) // '0' ignored by '-'
11844 HandleIgnoredFlag(FS, ignoredFlag: FS.hasLeadingZeros(), flag: FS.isLeftJustified(),
11845 startSpecifier, specifierLen);
11846
11847 // Check the length modifier is valid with the given conversion specifier.
11848 if (!FS.hasValidLengthModifier(Target: S.getASTContext().getTargetInfo(),
11849 LO: S.getLangOpts()))
11850 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen,
11851 diag::warn_format_nonsensical_length);
11852 else if (!FS.hasStandardLengthModifier())
11853 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen);
11854 else if (!FS.hasStandardLengthConversionCombination())
11855 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen,
11856 diag::warn_format_non_standard_conversion_spec);
11857
11858 if (!FS.hasStandardConversionSpecifier(LangOpt: S.getLangOpts()))
11859 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen);
11860
11861 // The remaining checks depend on the data arguments.
11862 if (ArgPassingKind == Sema::FAPK_VAList)
11863 return true;
11864
11865 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex))
11866 return false;
11867
11868 const Expr *Arg = getDataArg(i: argIndex);
11869 if (!Arg)
11870 return true;
11871
11872 return checkFormatExpr(FS, StartSpecifier: startSpecifier, SpecifierLen: specifierLen, E: Arg);
11873}
11874
11875static bool requiresParensToAddCast(const Expr *E) {
11876 // FIXME: We should have a general way to reason about operator
11877 // precedence and whether parens are actually needed here.
11878 // Take care of a few common cases where they aren't.
11879 const Expr *Inside = E->IgnoreImpCasts();
11880 if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(Val: Inside))
11881 Inside = POE->getSyntacticForm()->IgnoreImpCasts();
11882
11883 switch (Inside->getStmtClass()) {
11884 case Stmt::ArraySubscriptExprClass:
11885 case Stmt::CallExprClass:
11886 case Stmt::CharacterLiteralClass:
11887 case Stmt::CXXBoolLiteralExprClass:
11888 case Stmt::DeclRefExprClass:
11889 case Stmt::FloatingLiteralClass:
11890 case Stmt::IntegerLiteralClass:
11891 case Stmt::MemberExprClass:
11892 case Stmt::ObjCArrayLiteralClass:
11893 case Stmt::ObjCBoolLiteralExprClass:
11894 case Stmt::ObjCBoxedExprClass:
11895 case Stmt::ObjCDictionaryLiteralClass:
11896 case Stmt::ObjCEncodeExprClass:
11897 case Stmt::ObjCIvarRefExprClass:
11898 case Stmt::ObjCMessageExprClass:
11899 case Stmt::ObjCPropertyRefExprClass:
11900 case Stmt::ObjCStringLiteralClass:
11901 case Stmt::ObjCSubscriptRefExprClass:
11902 case Stmt::ParenExprClass:
11903 case Stmt::StringLiteralClass:
11904 case Stmt::UnaryOperatorClass:
11905 return false;
11906 default:
11907 return true;
11908 }
11909}
11910
11911static std::pair<QualType, StringRef>
11912shouldNotPrintDirectly(const ASTContext &Context,
11913 QualType IntendedTy,
11914 const Expr *E) {
11915 // Use a 'while' to peel off layers of typedefs.
11916 QualType TyTy = IntendedTy;
11917 while (const TypedefType *UserTy = TyTy->getAs<TypedefType>()) {
11918 StringRef Name = UserTy->getDecl()->getName();
11919 QualType CastTy = llvm::StringSwitch<QualType>(Name)
11920 .Case(S: "CFIndex", Value: Context.getNSIntegerType())
11921 .Case(S: "NSInteger", Value: Context.getNSIntegerType())
11922 .Case(S: "NSUInteger", Value: Context.getNSUIntegerType())
11923 .Case(S: "SInt32", Value: Context.IntTy)
11924 .Case("UInt32", Context.UnsignedIntTy)
11925 .Default(QualType());
11926
11927 if (!CastTy.isNull())
11928 return std::make_pair(x&: CastTy, y&: Name);
11929
11930 TyTy = UserTy->desugar();
11931 }
11932
11933 // Strip parens if necessary.
11934 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E))
11935 return shouldNotPrintDirectly(Context,
11936 PE->getSubExpr()->getType(),
11937 PE->getSubExpr());
11938
11939 // If this is a conditional expression, then its result type is constructed
11940 // via usual arithmetic conversions and thus there might be no necessary
11941 // typedef sugar there. Recurse to operands to check for NSInteger &
11942 // Co. usage condition.
11943 if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(Val: E)) {
11944 QualType TrueTy, FalseTy;
11945 StringRef TrueName, FalseName;
11946
11947 std::tie(TrueTy, TrueName) =
11948 shouldNotPrintDirectly(Context,
11949 CO->getTrueExpr()->getType(),
11950 CO->getTrueExpr());
11951 std::tie(FalseTy, FalseName) =
11952 shouldNotPrintDirectly(Context,
11953 CO->getFalseExpr()->getType(),
11954 CO->getFalseExpr());
11955
11956 if (TrueTy == FalseTy)
11957 return std::make_pair(x&: TrueTy, y&: TrueName);
11958 else if (TrueTy.isNull())
11959 return std::make_pair(x&: FalseTy, y&: FalseName);
11960 else if (FalseTy.isNull())
11961 return std::make_pair(x&: TrueTy, y&: TrueName);
11962 }
11963
11964 return std::make_pair(x: QualType(), y: StringRef());
11965}
11966
11967/// Return true if \p ICE is an implicit argument promotion of an arithmetic
11968/// type. Bit-field 'promotions' from a higher ranked type to a lower ranked
11969/// type do not count.
11970static bool
11971isArithmeticArgumentPromotion(Sema &S, const ImplicitCastExpr *ICE) {
11972 QualType From = ICE->getSubExpr()->getType();
11973 QualType To = ICE->getType();
11974 // It's an integer promotion if the destination type is the promoted
11975 // source type.
11976 if (ICE->getCastKind() == CK_IntegralCast &&
11977 S.Context.isPromotableIntegerType(T: From) &&
11978 S.Context.getPromotedIntegerType(PromotableType: From) == To)
11979 return true;
11980 // Look through vector types, since we do default argument promotion for
11981 // those in OpenCL.
11982 if (const auto *VecTy = From->getAs<ExtVectorType>())
11983 From = VecTy->getElementType();
11984 if (const auto *VecTy = To->getAs<ExtVectorType>())
11985 To = VecTy->getElementType();
11986 // It's a floating promotion if the source type is a lower rank.
11987 return ICE->getCastKind() == CK_FloatingCast &&
11988 S.Context.getFloatingTypeOrder(LHS: From, RHS: To) < 0;
11989}
11990
11991bool
11992CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
11993 const char *StartSpecifier,
11994 unsigned SpecifierLen,
11995 const Expr *E) {
11996 using namespace analyze_format_string;
11997 using namespace analyze_printf;
11998
11999 // Now type check the data expression that matches the
12000 // format specifier.
12001 const analyze_printf::ArgType &AT = FS.getArgType(Ctx&: S.Context, IsObjCLiteral: isObjCContext());
12002 if (!AT.isValid())
12003 return true;
12004
12005 QualType ExprTy = E->getType();
12006 while (const TypeOfExprType *TET = dyn_cast<TypeOfExprType>(Val&: ExprTy)) {
12007 ExprTy = TET->getUnderlyingExpr()->getType();
12008 }
12009
12010 // When using the format attribute in C++, you can receive a function or an
12011 // array that will necessarily decay to a pointer when passed to the final
12012 // format consumer. Apply decay before type comparison.
12013 if (ExprTy->canDecayToPointerType())
12014 ExprTy = S.Context.getDecayedType(T: ExprTy);
12015
12016 // Diagnose attempts to print a boolean value as a character. Unlike other
12017 // -Wformat diagnostics, this is fine from a type perspective, but it still
12018 // doesn't make sense.
12019 if (FS.getConversionSpecifier().getKind() == ConversionSpecifier::cArg &&
12020 E->isKnownToHaveBooleanValue()) {
12021 const CharSourceRange &CSR =
12022 getSpecifierRange(startSpecifier: StartSpecifier, specifierLen: SpecifierLen);
12023 SmallString<4> FSString;
12024 llvm::raw_svector_ostream os(FSString);
12025 FS.toString(os);
12026 EmitFormatDiagnostic(S.PDiag(diag::warn_format_bool_as_character)
12027 << FSString,
12028 E->getExprLoc(), false, CSR);
12029 return true;
12030 }
12031
12032 ArgType::MatchKind ImplicitMatch = ArgType::NoMatch;
12033 ArgType::MatchKind Match = AT.matchesType(C&: S.Context, argTy: ExprTy);
12034 if (Match == ArgType::Match)
12035 return true;
12036
12037 // NoMatchPromotionTypeConfusion should be only returned in ImplictCastExpr
12038 assert(Match != ArgType::NoMatchPromotionTypeConfusion);
12039
12040 // Look through argument promotions for our error message's reported type.
12041 // This includes the integral and floating promotions, but excludes array
12042 // and function pointer decay (seeing that an argument intended to be a
12043 // string has type 'char [6]' is probably more confusing than 'char *') and
12044 // certain bitfield promotions (bitfields can be 'demoted' to a lesser type).
12045 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Val: E)) {
12046 if (isArithmeticArgumentPromotion(S, ICE)) {
12047 E = ICE->getSubExpr();
12048 ExprTy = E->getType();
12049
12050 // Check if we didn't match because of an implicit cast from a 'char'
12051 // or 'short' to an 'int'. This is done because printf is a varargs
12052 // function.
12053 if (ICE->getType() == S.Context.IntTy ||
12054 ICE->getType() == S.Context.UnsignedIntTy) {
12055 // All further checking is done on the subexpression
12056 ImplicitMatch = AT.matchesType(C&: S.Context, argTy: ExprTy);
12057 if (ImplicitMatch == ArgType::Match)
12058 return true;
12059 }
12060 }
12061 } else if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(Val: E)) {
12062 // Special case for 'a', which has type 'int' in C.
12063 // Note, however, that we do /not/ want to treat multibyte constants like
12064 // 'MooV' as characters! This form is deprecated but still exists. In
12065 // addition, don't treat expressions as of type 'char' if one byte length
12066 // modifier is provided.
12067 if (ExprTy == S.Context.IntTy &&
12068 FS.getLengthModifier().getKind() != LengthModifier::AsChar)
12069 if (llvm::isUIntN(N: S.Context.getCharWidth(), x: CL->getValue())) {
12070 ExprTy = S.Context.CharTy;
12071 // To improve check results, we consider a character literal in C
12072 // to be a 'char' rather than an 'int'. 'printf("%hd", 'a');' is
12073 // more likely a type confusion situation, so we will suggest to
12074 // use '%hhd' instead by discarding the MatchPromotion.
12075 if (Match == ArgType::MatchPromotion)
12076 Match = ArgType::NoMatch;
12077 }
12078 }
12079 if (Match == ArgType::MatchPromotion) {
12080 // WG14 N2562 only clarified promotions in *printf
12081 // For NSLog in ObjC, just preserve -Wformat behavior
12082 if (!S.getLangOpts().ObjC &&
12083 ImplicitMatch != ArgType::NoMatchPromotionTypeConfusion &&
12084 ImplicitMatch != ArgType::NoMatchTypeConfusion)
12085 return true;
12086 Match = ArgType::NoMatch;
12087 }
12088 if (ImplicitMatch == ArgType::NoMatchPedantic ||
12089 ImplicitMatch == ArgType::NoMatchTypeConfusion)
12090 Match = ImplicitMatch;
12091 assert(Match != ArgType::MatchPromotion);
12092
12093 // Look through unscoped enums to their underlying type.
12094 bool IsEnum = false;
12095 bool IsScopedEnum = false;
12096 QualType IntendedTy = ExprTy;
12097 if (auto EnumTy = ExprTy->getAs<EnumType>()) {
12098 IntendedTy = EnumTy->getDecl()->getIntegerType();
12099 if (EnumTy->isUnscopedEnumerationType()) {
12100 ExprTy = IntendedTy;
12101 // This controls whether we're talking about the underlying type or not,
12102 // which we only want to do when it's an unscoped enum.
12103 IsEnum = true;
12104 } else {
12105 IsScopedEnum = true;
12106 }
12107 }
12108
12109 // %C in an Objective-C context prints a unichar, not a wchar_t.
12110 // If the argument is an integer of some kind, believe the %C and suggest
12111 // a cast instead of changing the conversion specifier.
12112 if (isObjCContext() &&
12113 FS.getConversionSpecifier().getKind() == ConversionSpecifier::CArg) {
12114 if (ExprTy->isIntegralOrUnscopedEnumerationType() &&
12115 !ExprTy->isCharType()) {
12116 // 'unichar' is defined as a typedef of unsigned short, but we should
12117 // prefer using the typedef if it is visible.
12118 IntendedTy = S.Context.UnsignedShortTy;
12119
12120 // While we are here, check if the value is an IntegerLiteral that happens
12121 // to be within the valid range.
12122 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(Val: E)) {
12123 const llvm::APInt &V = IL->getValue();
12124 if (V.getActiveBits() <= S.Context.getTypeSize(T: IntendedTy))
12125 return true;
12126 }
12127
12128 LookupResult Result(S, &S.Context.Idents.get(Name: "unichar"), E->getBeginLoc(),
12129 Sema::LookupOrdinaryName);
12130 if (S.LookupName(R&: Result, S: S.getCurScope())) {
12131 NamedDecl *ND = Result.getFoundDecl();
12132 if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(Val: ND))
12133 if (TD->getUnderlyingType() == IntendedTy)
12134 IntendedTy = S.Context.getTypedefType(Decl: TD);
12135 }
12136 }
12137 }
12138
12139 // Special-case some of Darwin's platform-independence types by suggesting
12140 // casts to primitive types that are known to be large enough.
12141 bool ShouldNotPrintDirectly = false; StringRef CastTyName;
12142 if (S.Context.getTargetInfo().getTriple().isOSDarwin()) {
12143 QualType CastTy;
12144 std::tie(CastTy, CastTyName) = shouldNotPrintDirectly(S.Context, IntendedTy, E);
12145 if (!CastTy.isNull()) {
12146 // %zi/%zu and %td/%tu are OK to use for NSInteger/NSUInteger of type int
12147 // (long in ASTContext). Only complain to pedants or when they're the
12148 // underlying type of a scoped enum (which always needs a cast).
12149 if (!IsScopedEnum &&
12150 (CastTyName == "NSInteger" || CastTyName == "NSUInteger") &&
12151 (AT.isSizeT() || AT.isPtrdiffT()) &&
12152 AT.matchesType(C&: S.Context, argTy: CastTy))
12153 Match = ArgType::NoMatchPedantic;
12154 IntendedTy = CastTy;
12155 ShouldNotPrintDirectly = true;
12156 }
12157 }
12158
12159 // We may be able to offer a FixItHint if it is a supported type.
12160 PrintfSpecifier fixedFS = FS;
12161 bool Success =
12162 fixedFS.fixType(QT: IntendedTy, LangOpt: S.getLangOpts(), Ctx&: S.Context, IsObjCLiteral: isObjCContext());
12163
12164 if (Success) {
12165 // Get the fix string from the fixed format specifier
12166 SmallString<16> buf;
12167 llvm::raw_svector_ostream os(buf);
12168 fixedFS.toString(os);
12169
12170 CharSourceRange SpecRange = getSpecifierRange(startSpecifier: StartSpecifier, specifierLen: SpecifierLen);
12171
12172 if (IntendedTy == ExprTy && !ShouldNotPrintDirectly && !IsScopedEnum) {
12173 unsigned Diag;
12174 switch (Match) {
12175 case ArgType::Match:
12176 case ArgType::MatchPromotion:
12177 case ArgType::NoMatchPromotionTypeConfusion:
12178 llvm_unreachable("expected non-matching");
12179 case ArgType::NoMatchPedantic:
12180 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic;
12181 break;
12182 case ArgType::NoMatchTypeConfusion:
12183 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion;
12184 break;
12185 case ArgType::NoMatch:
12186 Diag = diag::warn_format_conversion_argument_type_mismatch;
12187 break;
12188 }
12189
12190 // In this case, the specifier is wrong and should be changed to match
12191 // the argument.
12192 EmitFormatDiagnostic(S.PDiag(DiagID: Diag)
12193 << AT.getRepresentativeTypeName(C&: S.Context)
12194 << IntendedTy << IsEnum << E->getSourceRange(),
12195 E->getBeginLoc(),
12196 /*IsStringLocation*/ false, SpecRange,
12197 FixItHint::CreateReplacement(RemoveRange: SpecRange, Code: os.str()));
12198 } else {
12199 // The canonical type for formatting this value is different from the
12200 // actual type of the expression. (This occurs, for example, with Darwin's
12201 // NSInteger on 32-bit platforms, where it is typedef'd as 'int', but
12202 // should be printed as 'long' for 64-bit compatibility.)
12203 // Rather than emitting a normal format/argument mismatch, we want to
12204 // add a cast to the recommended type (and correct the format string
12205 // if necessary). We should also do so for scoped enumerations.
12206 SmallString<16> CastBuf;
12207 llvm::raw_svector_ostream CastFix(CastBuf);
12208 CastFix << (S.LangOpts.CPlusPlus ? "static_cast<" : "(");
12209 IntendedTy.print(OS&: CastFix, Policy: S.Context.getPrintingPolicy());
12210 CastFix << (S.LangOpts.CPlusPlus ? ">" : ")");
12211
12212 SmallVector<FixItHint,4> Hints;
12213 if (AT.matchesType(C&: S.Context, argTy: IntendedTy) != ArgType::Match ||
12214 ShouldNotPrintDirectly)
12215 Hints.push_back(Elt: FixItHint::CreateReplacement(RemoveRange: SpecRange, Code: os.str()));
12216
12217 if (const CStyleCastExpr *CCast = dyn_cast<CStyleCastExpr>(Val: E)) {
12218 // If there's already a cast present, just replace it.
12219 SourceRange CastRange(CCast->getLParenLoc(), CCast->getRParenLoc());
12220 Hints.push_back(Elt: FixItHint::CreateReplacement(RemoveRange: CastRange, Code: CastFix.str()));
12221
12222 } else if (!requiresParensToAddCast(E) && !S.LangOpts.CPlusPlus) {
12223 // If the expression has high enough precedence,
12224 // just write the C-style cast.
12225 Hints.push_back(
12226 FixItHint::CreateInsertion(InsertionLoc: E->getBeginLoc(), Code: CastFix.str()));
12227 } else {
12228 // Otherwise, add parens around the expression as well as the cast.
12229 CastFix << "(";
12230 Hints.push_back(
12231 FixItHint::CreateInsertion(InsertionLoc: E->getBeginLoc(), Code: CastFix.str()));
12232
12233 // We don't use getLocForEndOfToken because it returns invalid source
12234 // locations for macro expansions (by design).
12235 SourceLocation EndLoc = S.SourceMgr.getSpellingLoc(Loc: E->getEndLoc());
12236 SourceLocation After = EndLoc.getLocWithOffset(
12237 Offset: Lexer::MeasureTokenLength(Loc: EndLoc, SM: S.SourceMgr, LangOpts: S.LangOpts));
12238 Hints.push_back(Elt: FixItHint::CreateInsertion(InsertionLoc: After, Code: ")"));
12239 }
12240
12241 if (ShouldNotPrintDirectly && !IsScopedEnum) {
12242 // The expression has a type that should not be printed directly.
12243 // We extract the name from the typedef because we don't want to show
12244 // the underlying type in the diagnostic.
12245 StringRef Name;
12246 if (const auto *TypedefTy = ExprTy->getAs<TypedefType>())
12247 Name = TypedefTy->getDecl()->getName();
12248 else
12249 Name = CastTyName;
12250 unsigned Diag = Match == ArgType::NoMatchPedantic
12251 ? diag::warn_format_argument_needs_cast_pedantic
12252 : diag::warn_format_argument_needs_cast;
12253 EmitFormatDiagnostic(S.PDiag(DiagID: Diag) << Name << IntendedTy << IsEnum
12254 << E->getSourceRange(),
12255 E->getBeginLoc(), /*IsStringLocation=*/false,
12256 SpecRange, Hints);
12257 } else {
12258 // In this case, the expression could be printed using a different
12259 // specifier, but we've decided that the specifier is probably correct
12260 // and we should cast instead. Just use the normal warning message.
12261 EmitFormatDiagnostic(
12262 S.PDiag(diag::warn_format_conversion_argument_type_mismatch)
12263 << AT.getRepresentativeTypeName(S.Context) << ExprTy << IsEnum
12264 << E->getSourceRange(),
12265 E->getBeginLoc(), /*IsStringLocation*/ false, SpecRange, Hints);
12266 }
12267 }
12268 } else {
12269 const CharSourceRange &CSR = getSpecifierRange(startSpecifier: StartSpecifier,
12270 specifierLen: SpecifierLen);
12271 // Since the warning for passing non-POD types to variadic functions
12272 // was deferred until now, we emit a warning for non-POD
12273 // arguments here.
12274 bool EmitTypeMismatch = false;
12275 switch (S.isValidVarArgType(Ty: ExprTy)) {
12276 case Sema::VAK_Valid:
12277 case Sema::VAK_ValidInCXX11: {
12278 unsigned Diag;
12279 switch (Match) {
12280 case ArgType::Match:
12281 case ArgType::MatchPromotion:
12282 case ArgType::NoMatchPromotionTypeConfusion:
12283 llvm_unreachable("expected non-matching");
12284 case ArgType::NoMatchPedantic:
12285 Diag = diag::warn_format_conversion_argument_type_mismatch_pedantic;
12286 break;
12287 case ArgType::NoMatchTypeConfusion:
12288 Diag = diag::warn_format_conversion_argument_type_mismatch_confusion;
12289 break;
12290 case ArgType::NoMatch:
12291 Diag = diag::warn_format_conversion_argument_type_mismatch;
12292 break;
12293 }
12294
12295 EmitFormatDiagnostic(
12296 S.PDiag(DiagID: Diag) << AT.getRepresentativeTypeName(C&: S.Context) << ExprTy
12297 << IsEnum << CSR << E->getSourceRange(),
12298 E->getBeginLoc(), /*IsStringLocation*/ false, CSR);
12299 break;
12300 }
12301 case Sema::VAK_Undefined:
12302 case Sema::VAK_MSVCUndefined:
12303 if (CallType == Sema::VariadicDoesNotApply) {
12304 EmitTypeMismatch = true;
12305 } else {
12306 EmitFormatDiagnostic(
12307 S.PDiag(diag::warn_non_pod_vararg_with_format_string)
12308 << S.getLangOpts().CPlusPlus11 << ExprTy << CallType
12309 << AT.getRepresentativeTypeName(S.Context) << CSR
12310 << E->getSourceRange(),
12311 E->getBeginLoc(), /*IsStringLocation*/ false, CSR);
12312 checkForCStrMembers(AT, E);
12313 }
12314 break;
12315
12316 case Sema::VAK_Invalid:
12317 if (CallType == Sema::VariadicDoesNotApply)
12318 EmitTypeMismatch = true;
12319 else if (ExprTy->isObjCObjectType())
12320 EmitFormatDiagnostic(
12321 S.PDiag(diag::err_cannot_pass_objc_interface_to_vararg_format)
12322 << S.getLangOpts().CPlusPlus11 << ExprTy << CallType
12323 << AT.getRepresentativeTypeName(S.Context) << CSR
12324 << E->getSourceRange(),
12325 E->getBeginLoc(), /*IsStringLocation*/ false, CSR);
12326 else
12327 // FIXME: If this is an initializer list, suggest removing the braces
12328 // or inserting a cast to the target type.
12329 S.Diag(E->getBeginLoc(), diag::err_cannot_pass_to_vararg_format)
12330 << isa<InitListExpr>(E) << ExprTy << CallType
12331 << AT.getRepresentativeTypeName(S.Context) << E->getSourceRange();
12332 break;
12333 }
12334
12335 if (EmitTypeMismatch) {
12336 // The function is not variadic, so we do not generate warnings about
12337 // being allowed to pass that object as a variadic argument. Instead,
12338 // since there are inherently no printf specifiers for types which cannot
12339 // be passed as variadic arguments, emit a plain old specifier mismatch
12340 // argument.
12341 EmitFormatDiagnostic(
12342 S.PDiag(diag::warn_format_conversion_argument_type_mismatch)
12343 << AT.getRepresentativeTypeName(S.Context) << ExprTy << false
12344 << E->getSourceRange(),
12345 E->getBeginLoc(), false, CSR);
12346 }
12347
12348 assert(FirstDataArg + FS.getArgIndex() < CheckedVarArgs.size() &&
12349 "format string specifier index out of range");
12350 CheckedVarArgs[FirstDataArg + FS.getArgIndex()] = true;
12351 }
12352
12353 return true;
12354}
12355
12356//===--- CHECK: Scanf format string checking ------------------------------===//
12357
12358namespace {
12359
12360class CheckScanfHandler : public CheckFormatHandler {
12361public:
12362 CheckScanfHandler(Sema &s, const FormatStringLiteral *fexpr,
12363 const Expr *origFormatExpr, Sema::FormatStringType type,
12364 unsigned firstDataArg, unsigned numDataArgs,
12365 const char *beg, Sema::FormatArgumentPassingKind APK,
12366 ArrayRef<const Expr *> Args, unsigned formatIdx,
12367 bool inFunctionCall, Sema::VariadicCallType CallType,
12368 llvm::SmallBitVector &CheckedVarArgs,
12369 UncoveredArgHandler &UncoveredArg)
12370 : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg,
12371 numDataArgs, beg, APK, Args, formatIdx,
12372 inFunctionCall, CallType, CheckedVarArgs,
12373 UncoveredArg) {}
12374
12375 bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS,
12376 const char *startSpecifier,
12377 unsigned specifierLen) override;
12378
12379 bool HandleInvalidScanfConversionSpecifier(
12380 const analyze_scanf::ScanfSpecifier &FS,
12381 const char *startSpecifier,
12382 unsigned specifierLen) override;
12383
12384 void HandleIncompleteScanList(const char *start, const char *end) override;
12385};
12386
12387} // namespace
12388
12389void CheckScanfHandler::HandleIncompleteScanList(const char *start,
12390 const char *end) {
12391 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_scanlist_incomplete),
12392 getLocationOfByte(end), /*IsStringLocation*/true,
12393 getSpecifierRange(start, end - start));
12394}
12395
12396bool CheckScanfHandler::HandleInvalidScanfConversionSpecifier(
12397 const analyze_scanf::ScanfSpecifier &FS,
12398 const char *startSpecifier,
12399 unsigned specifierLen) {
12400 const analyze_scanf::ScanfConversionSpecifier &CS =
12401 FS.getConversionSpecifier();
12402
12403 return HandleInvalidConversionSpecifier(argIndex: FS.getArgIndex(),
12404 Loc: getLocationOfByte(x: CS.getStart()),
12405 startSpec: startSpecifier, specifierLen,
12406 csStart: CS.getStart(), csLen: CS.getLength());
12407}
12408
12409bool CheckScanfHandler::HandleScanfSpecifier(
12410 const analyze_scanf::ScanfSpecifier &FS,
12411 const char *startSpecifier,
12412 unsigned specifierLen) {
12413 using namespace analyze_scanf;
12414 using namespace analyze_format_string;
12415
12416 const ScanfConversionSpecifier &CS = FS.getConversionSpecifier();
12417
12418 // Handle case where '%' and '*' don't consume an argument. These shouldn't
12419 // be used to decide if we are using positional arguments consistently.
12420 if (FS.consumesDataArgument()) {
12421 if (atFirstArg) {
12422 atFirstArg = false;
12423 usesPositionalArgs = FS.usesPositionalArg();
12424 }
12425 else if (usesPositionalArgs != FS.usesPositionalArg()) {
12426 HandlePositionalNonpositionalArgs(Loc: getLocationOfByte(x: CS.getStart()),
12427 startSpec: startSpecifier, specifierLen);
12428 return false;
12429 }
12430 }
12431
12432 // Check if the field with is non-zero.
12433 const OptionalAmount &Amt = FS.getFieldWidth();
12434 if (Amt.getHowSpecified() == OptionalAmount::Constant) {
12435 if (Amt.getConstantAmount() == 0) {
12436 const CharSourceRange &R = getSpecifierRange(startSpecifier: Amt.getStart(),
12437 specifierLen: Amt.getConstantLength());
12438 EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_nonzero_width),
12439 getLocationOfByte(Amt.getStart()),
12440 /*IsStringLocation*/true, R,
12441 FixItHint::CreateRemoval(R));
12442 }
12443 }
12444
12445 if (!FS.consumesDataArgument()) {
12446 // FIXME: Technically specifying a precision or field width here
12447 // makes no sense. Worth issuing a warning at some point.
12448 return true;
12449 }
12450
12451 // Consume the argument.
12452 unsigned argIndex = FS.getArgIndex();
12453 if (argIndex < NumDataArgs) {
12454 // The check to see if the argIndex is valid will come later.
12455 // We set the bit here because we may exit early from this
12456 // function if we encounter some other error.
12457 CoveredArgs.set(argIndex);
12458 }
12459
12460 // Check the length modifier is valid with the given conversion specifier.
12461 if (!FS.hasValidLengthModifier(Target: S.getASTContext().getTargetInfo(),
12462 LO: S.getLangOpts()))
12463 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen,
12464 diag::warn_format_nonsensical_length);
12465 else if (!FS.hasStandardLengthModifier())
12466 HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen);
12467 else if (!FS.hasStandardLengthConversionCombination())
12468 HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen,
12469 diag::warn_format_non_standard_conversion_spec);
12470
12471 if (!FS.hasStandardConversionSpecifier(LangOpt: S.getLangOpts()))
12472 HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen);
12473
12474 // The remaining checks depend on the data arguments.
12475 if (ArgPassingKind == Sema::FAPK_VAList)
12476 return true;
12477
12478 if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex))
12479 return false;
12480
12481 // Check that the argument type matches the format specifier.
12482 const Expr *Ex = getDataArg(i: argIndex);
12483 if (!Ex)
12484 return true;
12485
12486 const analyze_format_string::ArgType &AT = FS.getArgType(Ctx&: S.Context);
12487
12488 if (!AT.isValid()) {
12489 return true;
12490 }
12491
12492 analyze_format_string::ArgType::MatchKind Match =
12493 AT.matchesType(C&: S.Context, argTy: Ex->getType());
12494 bool Pedantic = Match == analyze_format_string::ArgType::NoMatchPedantic;
12495 if (Match == analyze_format_string::ArgType::Match)
12496 return true;
12497
12498 ScanfSpecifier fixedFS = FS;
12499 bool Success = fixedFS.fixType(QT: Ex->getType(), RawQT: Ex->IgnoreImpCasts()->getType(),
12500 LangOpt: S.getLangOpts(), Ctx&: S.Context);
12501
12502 unsigned Diag =
12503 Pedantic ? diag::warn_format_conversion_argument_type_mismatch_pedantic
12504 : diag::warn_format_conversion_argument_type_mismatch;
12505
12506 if (Success) {
12507 // Get the fix string from the fixed format specifier.
12508 SmallString<128> buf;
12509 llvm::raw_svector_ostream os(buf);
12510 fixedFS.toString(os);
12511
12512 EmitFormatDiagnostic(
12513 S.PDiag(DiagID: Diag) << AT.getRepresentativeTypeName(C&: S.Context)
12514 << Ex->getType() << false << Ex->getSourceRange(),
12515 Ex->getBeginLoc(),
12516 /*IsStringLocation*/ false,
12517 getSpecifierRange(startSpecifier, specifierLen),
12518 FixItHint::CreateReplacement(
12519 RemoveRange: getSpecifierRange(startSpecifier, specifierLen), Code: os.str()));
12520 } else {
12521 EmitFormatDiagnostic(S.PDiag(DiagID: Diag)
12522 << AT.getRepresentativeTypeName(C&: S.Context)
12523 << Ex->getType() << false << Ex->getSourceRange(),
12524 Ex->getBeginLoc(),
12525 /*IsStringLocation*/ false,
12526 getSpecifierRange(startSpecifier, specifierLen));
12527 }
12528
12529 return true;
12530}
12531
12532static void CheckFormatString(
12533 Sema &S, const FormatStringLiteral *FExpr, const Expr *OrigFormatExpr,
12534 ArrayRef<const Expr *> Args, Sema::FormatArgumentPassingKind APK,
12535 unsigned format_idx, unsigned firstDataArg, Sema::FormatStringType Type,
12536 bool inFunctionCall, Sema::VariadicCallType CallType,
12537 llvm::SmallBitVector &CheckedVarArgs, UncoveredArgHandler &UncoveredArg,
12538 bool IgnoreStringsWithoutSpecifiers) {
12539 // CHECK: is the format string a wide literal?
12540 if (!FExpr->isAscii() && !FExpr->isUTF8()) {
12541 CheckFormatHandler::EmitFormatDiagnostic(
12542 S, inFunctionCall, Args[format_idx],
12543 S.PDiag(diag::warn_format_string_is_wide_literal), FExpr->getBeginLoc(),
12544 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange());
12545 return;
12546 }
12547
12548 // Str - The format string. NOTE: this is NOT null-terminated!
12549 StringRef StrRef = FExpr->getString();
12550 const char *Str = StrRef.data();
12551 // Account for cases where the string literal is truncated in a declaration.
12552 const ConstantArrayType *T =
12553 S.Context.getAsConstantArrayType(T: FExpr->getType());
12554 assert(T && "String literal not of constant array type!");
12555 size_t TypeSize = T->getSize().getZExtValue();
12556 size_t StrLen = std::min(a: std::max(a: TypeSize, b: size_t(1)) - 1, b: StrRef.size());
12557 const unsigned numDataArgs = Args.size() - firstDataArg;
12558
12559 if (IgnoreStringsWithoutSpecifiers &&
12560 !analyze_format_string::parseFormatStringHasFormattingSpecifiers(
12561 Begin: Str, End: Str + StrLen, LO: S.getLangOpts(), Target: S.Context.getTargetInfo()))
12562 return;
12563
12564 // Emit a warning if the string literal is truncated and does not contain an
12565 // embedded null character.
12566 if (TypeSize <= StrRef.size() && !StrRef.substr(Start: 0, N: TypeSize).contains(C: '\0')) {
12567 CheckFormatHandler::EmitFormatDiagnostic(
12568 S, inFunctionCall, Args[format_idx],
12569 S.PDiag(diag::warn_printf_format_string_not_null_terminated),
12570 FExpr->getBeginLoc(),
12571 /*IsStringLocation=*/true, OrigFormatExpr->getSourceRange());
12572 return;
12573 }
12574
12575 // CHECK: empty format string?
12576 if (StrLen == 0 && numDataArgs > 0) {
12577 CheckFormatHandler::EmitFormatDiagnostic(
12578 S, inFunctionCall, Args[format_idx],
12579 S.PDiag(diag::warn_empty_format_string), FExpr->getBeginLoc(),
12580 /*IsStringLocation*/ true, OrigFormatExpr->getSourceRange());
12581 return;
12582 }
12583
12584 if (Type == Sema::FST_Printf || Type == Sema::FST_NSString ||
12585 Type == Sema::FST_FreeBSDKPrintf || Type == Sema::FST_OSLog ||
12586 Type == Sema::FST_OSTrace) {
12587 CheckPrintfHandler H(
12588 S, FExpr, OrigFormatExpr, Type, firstDataArg, numDataArgs,
12589 (Type == Sema::FST_NSString || Type == Sema::FST_OSTrace), Str, APK,
12590 Args, format_idx, inFunctionCall, CallType, CheckedVarArgs,
12591 UncoveredArg);
12592
12593 if (!analyze_format_string::ParsePrintfString(
12594 H, beg: Str, end: Str + StrLen, LO: S.getLangOpts(), Target: S.Context.getTargetInfo(),
12595 isFreeBSDKPrintf: Type == Sema::FST_FreeBSDKPrintf))
12596 H.DoneProcessing();
12597 } else if (Type == Sema::FST_Scanf) {
12598 CheckScanfHandler H(S, FExpr, OrigFormatExpr, Type, firstDataArg,
12599 numDataArgs, Str, APK, Args, format_idx, inFunctionCall,
12600 CallType, CheckedVarArgs, UncoveredArg);
12601
12602 if (!analyze_format_string::ParseScanfString(
12603 H, beg: Str, end: Str + StrLen, LO: S.getLangOpts(), Target: S.Context.getTargetInfo()))
12604 H.DoneProcessing();
12605 } // TODO: handle other formats
12606}
12607
12608bool Sema::FormatStringHasSArg(const StringLiteral *FExpr) {
12609 // Str - The format string. NOTE: this is NOT null-terminated!
12610 StringRef StrRef = FExpr->getString();
12611 const char *Str = StrRef.data();
12612 // Account for cases where the string literal is truncated in a declaration.
12613 const ConstantArrayType *T = Context.getAsConstantArrayType(T: FExpr->getType());
12614 assert(T && "String literal not of constant array type!");
12615 size_t TypeSize = T->getSize().getZExtValue();
12616 size_t StrLen = std::min(a: std::max(a: TypeSize, b: size_t(1)) - 1, b: StrRef.size());
12617 return analyze_format_string::ParseFormatStringHasSArg(beg: Str, end: Str + StrLen,
12618 LO: getLangOpts(),
12619 Target: Context.getTargetInfo());
12620}
12621
12622//===--- CHECK: Warn on use of wrong absolute value function. -------------===//
12623
12624// Returns the related absolute value function that is larger, of 0 if one
12625// does not exist.
12626static unsigned getLargerAbsoluteValueFunction(unsigned AbsFunction) {
12627 switch (AbsFunction) {
12628 default:
12629 return 0;
12630
12631 case Builtin::BI__builtin_abs:
12632 return Builtin::BI__builtin_labs;
12633 case Builtin::BI__builtin_labs:
12634 return Builtin::BI__builtin_llabs;
12635 case Builtin::BI__builtin_llabs:
12636 return 0;
12637
12638 case Builtin::BI__builtin_fabsf:
12639 return Builtin::BI__builtin_fabs;
12640 case Builtin::BI__builtin_fabs:
12641 return Builtin::BI__builtin_fabsl;
12642 case Builtin::BI__builtin_fabsl:
12643 return 0;
12644
12645 case Builtin::BI__builtin_cabsf:
12646 return Builtin::BI__builtin_cabs;
12647 case Builtin::BI__builtin_cabs:
12648 return Builtin::BI__builtin_cabsl;
12649 case Builtin::BI__builtin_cabsl:
12650 return 0;
12651
12652 case Builtin::BIabs:
12653 return Builtin::BIlabs;
12654 case Builtin::BIlabs:
12655 return Builtin::BIllabs;
12656 case Builtin::BIllabs:
12657 return 0;
12658
12659 case Builtin::BIfabsf:
12660 return Builtin::BIfabs;
12661 case Builtin::BIfabs:
12662 return Builtin::BIfabsl;
12663 case Builtin::BIfabsl:
12664 return 0;
12665
12666 case Builtin::BIcabsf:
12667 return Builtin::BIcabs;
12668 case Builtin::BIcabs:
12669 return Builtin::BIcabsl;
12670 case Builtin::BIcabsl:
12671 return 0;
12672 }
12673}
12674
12675// Returns the argument type of the absolute value function.
12676static QualType getAbsoluteValueArgumentType(ASTContext &Context,
12677 unsigned AbsType) {
12678 if (AbsType == 0)
12679 return QualType();
12680
12681 ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None;
12682 QualType BuiltinType = Context.GetBuiltinType(ID: AbsType, Error);
12683 if (Error != ASTContext::GE_None)
12684 return QualType();
12685
12686 const FunctionProtoType *FT = BuiltinType->getAs<FunctionProtoType>();
12687 if (!FT)
12688 return QualType();
12689
12690 if (FT->getNumParams() != 1)
12691 return QualType();
12692
12693 return FT->getParamType(i: 0);
12694}
12695
12696// Returns the best absolute value function, or zero, based on type and
12697// current absolute value function.
12698static unsigned getBestAbsFunction(ASTContext &Context, QualType ArgType,
12699 unsigned AbsFunctionKind) {
12700 unsigned BestKind = 0;
12701 uint64_t ArgSize = Context.getTypeSize(T: ArgType);
12702 for (unsigned Kind = AbsFunctionKind; Kind != 0;
12703 Kind = getLargerAbsoluteValueFunction(AbsFunction: Kind)) {
12704 QualType ParamType = getAbsoluteValueArgumentType(Context, AbsType: Kind);
12705 if (Context.getTypeSize(T: ParamType) >= ArgSize) {
12706 if (BestKind == 0)
12707 BestKind = Kind;
12708 else if (Context.hasSameType(T1: ParamType, T2: ArgType)) {
12709 BestKind = Kind;
12710 break;
12711 }
12712 }
12713 }
12714 return BestKind;
12715}
12716
12717enum AbsoluteValueKind {
12718 AVK_Integer,
12719 AVK_Floating,
12720 AVK_Complex
12721};
12722
12723static AbsoluteValueKind getAbsoluteValueKind(QualType T) {
12724 if (T->isIntegralOrEnumerationType())
12725 return AVK_Integer;
12726 if (T->isRealFloatingType())
12727 return AVK_Floating;
12728 if (T->isAnyComplexType())
12729 return AVK_Complex;
12730
12731 llvm_unreachable("Type not integer, floating, or complex");
12732}
12733
12734// Changes the absolute value function to a different type. Preserves whether
12735// the function is a builtin.
12736static unsigned changeAbsFunction(unsigned AbsKind,
12737 AbsoluteValueKind ValueKind) {
12738 switch (ValueKind) {
12739 case AVK_Integer:
12740 switch (AbsKind) {
12741 default:
12742 return 0;
12743 case Builtin::BI__builtin_fabsf:
12744 case Builtin::BI__builtin_fabs:
12745 case Builtin::BI__builtin_fabsl:
12746 case Builtin::BI__builtin_cabsf:
12747 case Builtin::BI__builtin_cabs:
12748 case Builtin::BI__builtin_cabsl:
12749 return Builtin::BI__builtin_abs;
12750 case Builtin::BIfabsf:
12751 case Builtin::BIfabs:
12752 case Builtin::BIfabsl:
12753 case Builtin::BIcabsf:
12754 case Builtin::BIcabs:
12755 case Builtin::BIcabsl:
12756 return Builtin::BIabs;
12757 }
12758 case AVK_Floating:
12759 switch (AbsKind) {
12760 default:
12761 return 0;
12762 case Builtin::BI__builtin_abs:
12763 case Builtin::BI__builtin_labs:
12764 case Builtin::BI__builtin_llabs:
12765 case Builtin::BI__builtin_cabsf:
12766 case Builtin::BI__builtin_cabs:
12767 case Builtin::BI__builtin_cabsl:
12768 return Builtin::BI__builtin_fabsf;
12769 case Builtin::BIabs:
12770 case Builtin::BIlabs:
12771 case Builtin::BIllabs:
12772 case Builtin::BIcabsf:
12773 case Builtin::BIcabs:
12774 case Builtin::BIcabsl:
12775 return Builtin::BIfabsf;
12776 }
12777 case AVK_Complex:
12778 switch (AbsKind) {
12779 default:
12780 return 0;
12781 case Builtin::BI__builtin_abs:
12782 case Builtin::BI__builtin_labs:
12783 case Builtin::BI__builtin_llabs:
12784 case Builtin::BI__builtin_fabsf:
12785 case Builtin::BI__builtin_fabs:
12786 case Builtin::BI__builtin_fabsl:
12787 return Builtin::BI__builtin_cabsf;
12788 case Builtin::BIabs:
12789 case Builtin::BIlabs:
12790 case Builtin::BIllabs:
12791 case Builtin::BIfabsf:
12792 case Builtin::BIfabs:
12793 case Builtin::BIfabsl:
12794 return Builtin::BIcabsf;
12795 }
12796 }
12797 llvm_unreachable("Unable to convert function");
12798}
12799
12800static unsigned getAbsoluteValueFunctionKind(const FunctionDecl *FDecl) {
12801 const IdentifierInfo *FnInfo = FDecl->getIdentifier();
12802 if (!FnInfo)
12803 return 0;
12804
12805 switch (FDecl->getBuiltinID()) {
12806 default:
12807 return 0;
12808 case Builtin::BI__builtin_abs:
12809 case Builtin::BI__builtin_fabs:
12810 case Builtin::BI__builtin_fabsf:
12811 case Builtin::BI__builtin_fabsl:
12812 case Builtin::BI__builtin_labs:
12813 case Builtin::BI__builtin_llabs:
12814 case Builtin::BI__builtin_cabs:
12815 case Builtin::BI__builtin_cabsf:
12816 case Builtin::BI__builtin_cabsl:
12817 case Builtin::BIabs:
12818 case Builtin::BIlabs:
12819 case Builtin::BIllabs:
12820 case Builtin::BIfabs:
12821 case Builtin::BIfabsf:
12822 case Builtin::BIfabsl:
12823 case Builtin::BIcabs:
12824 case Builtin::BIcabsf:
12825 case Builtin::BIcabsl:
12826 return FDecl->getBuiltinID();
12827 }
12828 llvm_unreachable("Unknown Builtin type");
12829}
12830
12831// If the replacement is valid, emit a note with replacement function.
12832// Additionally, suggest including the proper header if not already included.
12833static void emitReplacement(Sema &S, SourceLocation Loc, SourceRange Range,
12834 unsigned AbsKind, QualType ArgType) {
12835 bool EmitHeaderHint = true;
12836 const char *HeaderName = nullptr;
12837 StringRef FunctionName;
12838 if (S.getLangOpts().CPlusPlus && !ArgType->isAnyComplexType()) {
12839 FunctionName = "std::abs";
12840 if (ArgType->isIntegralOrEnumerationType()) {
12841 HeaderName = "cstdlib";
12842 } else if (ArgType->isRealFloatingType()) {
12843 HeaderName = "cmath";
12844 } else {
12845 llvm_unreachable("Invalid Type");
12846 }
12847
12848 // Lookup all std::abs
12849 if (NamespaceDecl *Std = S.getStdNamespace()) {
12850 LookupResult R(S, &S.Context.Idents.get(Name: "abs"), Loc, Sema::LookupAnyName);
12851 R.suppressDiagnostics();
12852 S.LookupQualifiedName(R, Std);
12853
12854 for (const auto *I : R) {
12855 const FunctionDecl *FDecl = nullptr;
12856 if (const UsingShadowDecl *UsingD = dyn_cast<UsingShadowDecl>(Val: I)) {
12857 FDecl = dyn_cast<FunctionDecl>(Val: UsingD->getTargetDecl());
12858 } else {
12859 FDecl = dyn_cast<FunctionDecl>(Val: I);
12860 }
12861 if (!FDecl)
12862 continue;
12863
12864 // Found std::abs(), check that they are the right ones.
12865 if (FDecl->getNumParams() != 1)
12866 continue;
12867
12868 // Check that the parameter type can handle the argument.
12869 QualType ParamType = FDecl->getParamDecl(i: 0)->getType();
12870 if (getAbsoluteValueKind(T: ArgType) == getAbsoluteValueKind(T: ParamType) &&
12871 S.Context.getTypeSize(T: ArgType) <=
12872 S.Context.getTypeSize(T: ParamType)) {
12873 // Found a function, don't need the header hint.
12874 EmitHeaderHint = false;
12875 break;
12876 }
12877 }
12878 }
12879 } else {
12880 FunctionName = S.Context.BuiltinInfo.getName(ID: AbsKind);
12881 HeaderName = S.Context.BuiltinInfo.getHeaderName(ID: AbsKind);
12882
12883 if (HeaderName) {
12884 DeclarationName DN(&S.Context.Idents.get(Name: FunctionName));
12885 LookupResult R(S, DN, Loc, Sema::LookupAnyName);
12886 R.suppressDiagnostics();
12887 S.LookupName(R, S: S.getCurScope());
12888
12889 if (R.isSingleResult()) {
12890 FunctionDecl *FD = dyn_cast<FunctionDecl>(Val: R.getFoundDecl());
12891 if (FD && FD->getBuiltinID() == AbsKind) {
12892 EmitHeaderHint = false;
12893 } else {
12894 return;
12895 }
12896 } else if (!R.empty()) {
12897 return;
12898 }
12899 }
12900 }
12901
12902 S.Diag(Loc, diag::note_replace_abs_function)
12903 << FunctionName << FixItHint::CreateReplacement(Range, FunctionName);
12904
12905 if (!HeaderName)
12906 return;
12907
12908 if (!EmitHeaderHint)
12909 return;
12910
12911 S.Diag(Loc, diag::note_include_header_or_declare) << HeaderName
12912 << FunctionName;
12913}
12914
12915template <std::size_t StrLen>
12916static bool IsStdFunction(const FunctionDecl *FDecl,
12917 const char (&Str)[StrLen]) {
12918 if (!FDecl)
12919 return false;
12920 if (!FDecl->getIdentifier() || !FDecl->getIdentifier()->isStr(Str))
12921 return false;
12922 if (!FDecl->isInStdNamespace())
12923 return false;
12924
12925 return true;
12926}
12927
12928void Sema::CheckInfNaNFunction(const CallExpr *Call,
12929 const FunctionDecl *FDecl) {
12930 FPOptions FPO = Call->getFPFeaturesInEffect(LO: getLangOpts());
12931 if ((IsStdFunction(FDecl, "isnan") || IsStdFunction(FDecl, "isunordered") ||
12932 (Call->getBuiltinCallee() == Builtin::BI__builtin_nanf)) &&
12933 FPO.getNoHonorNaNs())
12934 Diag(Call->getBeginLoc(), diag::warn_fp_nan_inf_when_disabled)
12935 << 1 << 0 << Call->getSourceRange();
12936 else if ((IsStdFunction(FDecl, "isinf") ||
12937 (IsStdFunction(FDecl, "isfinite") ||
12938 (FDecl->getIdentifier() && FDecl->getName() == "infinity"))) &&
12939 FPO.getNoHonorInfs())
12940 Diag(Call->getBeginLoc(), diag::warn_fp_nan_inf_when_disabled)
12941 << 0 << 0 << Call->getSourceRange();
12942}
12943
12944// Warn when using the wrong abs() function.
12945void Sema::CheckAbsoluteValueFunction(const CallExpr *Call,
12946 const FunctionDecl *FDecl) {
12947 if (Call->getNumArgs() != 1)
12948 return;
12949
12950 unsigned AbsKind = getAbsoluteValueFunctionKind(FDecl);
12951 bool IsStdAbs = IsStdFunction(FDecl, Str: "abs");
12952 if (AbsKind == 0 && !IsStdAbs)
12953 return;
12954
12955 QualType ArgType = Call->getArg(Arg: 0)->IgnoreParenImpCasts()->getType();
12956 QualType ParamType = Call->getArg(Arg: 0)->getType();
12957
12958 // Unsigned types cannot be negative. Suggest removing the absolute value
12959 // function call.
12960 if (ArgType->isUnsignedIntegerType()) {
12961 StringRef FunctionName =
12962 IsStdAbs ? "std::abs" : Context.BuiltinInfo.getName(ID: AbsKind);
12963 Diag(Call->getExprLoc(), diag::warn_unsigned_abs) << ArgType << ParamType;
12964 Diag(Call->getExprLoc(), diag::note_remove_abs)
12965 << FunctionName
12966 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange());
12967 return;
12968 }
12969
12970 // Taking the absolute value of a pointer is very suspicious, they probably
12971 // wanted to index into an array, dereference a pointer, call a function, etc.
12972 if (ArgType->isPointerType() || ArgType->canDecayToPointerType()) {
12973 unsigned DiagType = 0;
12974 if (ArgType->isFunctionType())
12975 DiagType = 1;
12976 else if (ArgType->isArrayType())
12977 DiagType = 2;
12978
12979 Diag(Call->getExprLoc(), diag::warn_pointer_abs) << DiagType << ArgType;
12980 return;
12981 }
12982
12983 // std::abs has overloads which prevent most of the absolute value problems
12984 // from occurring.
12985 if (IsStdAbs)
12986 return;
12987
12988 AbsoluteValueKind ArgValueKind = getAbsoluteValueKind(T: ArgType);
12989 AbsoluteValueKind ParamValueKind = getAbsoluteValueKind(T: ParamType);
12990
12991 // The argument and parameter are the same kind. Check if they are the right
12992 // size.
12993 if (ArgValueKind == ParamValueKind) {
12994 if (Context.getTypeSize(T: ArgType) <= Context.getTypeSize(T: ParamType))
12995 return;
12996
12997 unsigned NewAbsKind = getBestAbsFunction(Context, ArgType, AbsFunctionKind: AbsKind);
12998 Diag(Call->getExprLoc(), diag::warn_abs_too_small)
12999 << FDecl << ArgType << ParamType;
13000
13001 if (NewAbsKind == 0)
13002 return;
13003
13004 emitReplacement(*this, Call->getExprLoc(),
13005 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType);
13006 return;
13007 }
13008
13009 // ArgValueKind != ParamValueKind
13010 // The wrong type of absolute value function was used. Attempt to find the
13011 // proper one.
13012 unsigned NewAbsKind = changeAbsFunction(AbsKind, ValueKind: ArgValueKind);
13013 NewAbsKind = getBestAbsFunction(Context, ArgType, AbsFunctionKind: NewAbsKind);
13014 if (NewAbsKind == 0)
13015 return;
13016
13017 Diag(Call->getExprLoc(), diag::warn_wrong_absolute_value_type)
13018 << FDecl << ParamValueKind << ArgValueKind;
13019
13020 emitReplacement(*this, Call->getExprLoc(),
13021 Call->getCallee()->getSourceRange(), NewAbsKind, ArgType);
13022}
13023
13024//===--- CHECK: Warn on use of std::max and unsigned zero. r---------------===//
13025void Sema::CheckMaxUnsignedZero(const CallExpr *Call,
13026 const FunctionDecl *FDecl) {
13027 if (!Call || !FDecl) return;
13028
13029 // Ignore template specializations and macros.
13030 if (inTemplateInstantiation()) return;
13031 if (Call->getExprLoc().isMacroID()) return;
13032
13033 // Only care about the one template argument, two function parameter std::max
13034 if (Call->getNumArgs() != 2) return;
13035 if (!IsStdFunction(FDecl, Str: "max")) return;
13036 const auto * ArgList = FDecl->getTemplateSpecializationArgs();
13037 if (!ArgList) return;
13038 if (ArgList->size() != 1) return;
13039
13040 // Check that template type argument is unsigned integer.
13041 const auto& TA = ArgList->get(Idx: 0);
13042 if (TA.getKind() != TemplateArgument::Type) return;
13043 QualType ArgType = TA.getAsType();
13044 if (!ArgType->isUnsignedIntegerType()) return;
13045
13046 // See if either argument is a literal zero.
13047 auto IsLiteralZeroArg = [](const Expr* E) -> bool {
13048 const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(Val: E);
13049 if (!MTE) return false;
13050 const auto *Num = dyn_cast<IntegerLiteral>(Val: MTE->getSubExpr());
13051 if (!Num) return false;
13052 if (Num->getValue() != 0) return false;
13053 return true;
13054 };
13055
13056 const Expr *FirstArg = Call->getArg(Arg: 0);
13057 const Expr *SecondArg = Call->getArg(Arg: 1);
13058 const bool IsFirstArgZero = IsLiteralZeroArg(FirstArg);
13059 const bool IsSecondArgZero = IsLiteralZeroArg(SecondArg);
13060
13061 // Only warn when exactly one argument is zero.
13062 if (IsFirstArgZero == IsSecondArgZero) return;
13063
13064 SourceRange FirstRange = FirstArg->getSourceRange();
13065 SourceRange SecondRange = SecondArg->getSourceRange();
13066
13067 SourceRange ZeroRange = IsFirstArgZero ? FirstRange : SecondRange;
13068
13069 Diag(Call->getExprLoc(), diag::warn_max_unsigned_zero)
13070 << IsFirstArgZero << Call->getCallee()->getSourceRange() << ZeroRange;
13071
13072 // Deduce what parts to remove so that "std::max(0u, foo)" becomes "(foo)".
13073 SourceRange RemovalRange;
13074 if (IsFirstArgZero) {
13075 RemovalRange = SourceRange(FirstRange.getBegin(),
13076 SecondRange.getBegin().getLocWithOffset(Offset: -1));
13077 } else {
13078 RemovalRange = SourceRange(getLocForEndOfToken(Loc: FirstRange.getEnd()),
13079 SecondRange.getEnd());
13080 }
13081
13082 Diag(Call->getExprLoc(), diag::note_remove_max_call)
13083 << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange())
13084 << FixItHint::CreateRemoval(RemovalRange);
13085}
13086
13087//===--- CHECK: Standard memory functions ---------------------------------===//
13088
13089/// Takes the expression passed to the size_t parameter of functions
13090/// such as memcmp, strncat, etc and warns if it's a comparison.
13091///
13092/// This is to catch typos like `if (memcmp(&a, &b, sizeof(a) > 0))`.
13093static bool CheckMemorySizeofForComparison(Sema &S, const Expr *E,
13094 IdentifierInfo *FnName,
13095 SourceLocation FnLoc,
13096 SourceLocation RParenLoc) {
13097 const BinaryOperator *Size = dyn_cast<BinaryOperator>(Val: E);
13098 if (!Size)
13099 return false;
13100
13101 // if E is binop and op is <=>, >, <, >=, <=, ==, &&, ||:
13102 if (!Size->isComparisonOp() && !Size->isLogicalOp())
13103 return false;
13104
13105 SourceRange SizeRange = Size->getSourceRange();
13106 S.Diag(Size->getOperatorLoc(), diag::warn_memsize_comparison)
13107 << SizeRange << FnName;
13108 S.Diag(FnLoc, diag::note_memsize_comparison_paren)
13109 << FnName
13110 << FixItHint::CreateInsertion(
13111 S.getLocForEndOfToken(Size->getLHS()->getEndLoc()), ")")
13112 << FixItHint::CreateRemoval(RParenLoc);
13113 S.Diag(SizeRange.getBegin(), diag::note_memsize_comparison_cast_silence)
13114 << FixItHint::CreateInsertion(SizeRange.getBegin(), "(size_t)(")
13115 << FixItHint::CreateInsertion(S.getLocForEndOfToken(SizeRange.getEnd()),
13116 ")");
13117
13118 return true;
13119}
13120
13121/// Determine whether the given type is or contains a dynamic class type
13122/// (e.g., whether it has a vtable).
13123static const CXXRecordDecl *getContainedDynamicClass(QualType T,
13124 bool &IsContained) {
13125 // Look through array types while ignoring qualifiers.
13126 const Type *Ty = T->getBaseElementTypeUnsafe();
13127 IsContained = false;
13128
13129 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
13130 RD = RD ? RD->getDefinition() : nullptr;
13131 if (!RD || RD->isInvalidDecl())
13132 return nullptr;
13133
13134 if (RD->isDynamicClass())
13135 return RD;
13136
13137 // Check all the fields. If any bases were dynamic, the class is dynamic.
13138 // It's impossible for a class to transitively contain itself by value, so
13139 // infinite recursion is impossible.
13140 for (auto *FD : RD->fields()) {
13141 bool SubContained;
13142 if (const CXXRecordDecl *ContainedRD =
13143 getContainedDynamicClass(FD->getType(), SubContained)) {
13144 IsContained = true;
13145 return ContainedRD;
13146 }
13147 }
13148
13149 return nullptr;
13150}
13151
13152static const UnaryExprOrTypeTraitExpr *getAsSizeOfExpr(const Expr *E) {
13153 if (const auto *Unary = dyn_cast<UnaryExprOrTypeTraitExpr>(Val: E))
13154 if (Unary->getKind() == UETT_SizeOf)
13155 return Unary;
13156 return nullptr;
13157}
13158
13159/// If E is a sizeof expression, returns its argument expression,
13160/// otherwise returns NULL.
13161static const Expr *getSizeOfExprArg(const Expr *E) {
13162 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E))
13163 if (!SizeOf->isArgumentType())
13164 return SizeOf->getArgumentExpr()->IgnoreParenImpCasts();
13165 return nullptr;
13166}
13167
13168/// If E is a sizeof expression, returns its argument type.
13169static QualType getSizeOfArgType(const Expr *E) {
13170 if (const UnaryExprOrTypeTraitExpr *SizeOf = getAsSizeOfExpr(E))
13171 return SizeOf->getTypeOfArgument();
13172 return QualType();
13173}
13174
13175namespace {
13176
13177struct SearchNonTrivialToInitializeField
13178 : DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField> {
13179 using Super =
13180 DefaultInitializedTypeVisitor<SearchNonTrivialToInitializeField>;
13181
13182 SearchNonTrivialToInitializeField(const Expr *E, Sema &S) : E(E), S(S) {}
13183
13184 void visitWithKind(QualType::PrimitiveDefaultInitializeKind PDIK, QualType FT,
13185 SourceLocation SL) {
13186 if (const auto *AT = asDerived().getContext().getAsArrayType(T: FT)) {
13187 asDerived().visitArray(PDIK, AT, SL);
13188 return;
13189 }
13190
13191 Super::visitWithKind(PDIK, FT, Args&: SL);
13192 }
13193
13194 void visitARCStrong(QualType FT, SourceLocation SL) {
13195 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1);
13196 }
13197 void visitARCWeak(QualType FT, SourceLocation SL) {
13198 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 1);
13199 }
13200 void visitStruct(QualType FT, SourceLocation SL) {
13201 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields())
13202 visit(FD->getType(), FD->getLocation());
13203 }
13204 void visitArray(QualType::PrimitiveDefaultInitializeKind PDIK,
13205 const ArrayType *AT, SourceLocation SL) {
13206 visit(FT: getContext().getBaseElementType(VAT: AT), Args&: SL);
13207 }
13208 void visitTrivial(QualType FT, SourceLocation SL) {}
13209
13210 static void diag(QualType RT, const Expr *E, Sema &S) {
13211 SearchNonTrivialToInitializeField(E, S).visitStruct(FT: RT, SL: SourceLocation());
13212 }
13213
13214 ASTContext &getContext() { return S.getASTContext(); }
13215
13216 const Expr *E;
13217 Sema &S;
13218};
13219
13220struct SearchNonTrivialToCopyField
13221 : CopiedTypeVisitor<SearchNonTrivialToCopyField, false> {
13222 using Super = CopiedTypeVisitor<SearchNonTrivialToCopyField, false>;
13223
13224 SearchNonTrivialToCopyField(const Expr *E, Sema &S) : E(E), S(S) {}
13225
13226 void visitWithKind(QualType::PrimitiveCopyKind PCK, QualType FT,
13227 SourceLocation SL) {
13228 if (const auto *AT = asDerived().getContext().getAsArrayType(T: FT)) {
13229 asDerived().visitArray(PCK, AT, SL);
13230 return;
13231 }
13232
13233 Super::visitWithKind(PCK, FT, Args&: SL);
13234 }
13235
13236 void visitARCStrong(QualType FT, SourceLocation SL) {
13237 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0);
13238 }
13239 void visitARCWeak(QualType FT, SourceLocation SL) {
13240 S.DiagRuntimeBehavior(SL, E, S.PDiag(diag::note_nontrivial_field) << 0);
13241 }
13242 void visitStruct(QualType FT, SourceLocation SL) {
13243 for (const FieldDecl *FD : FT->castAs<RecordType>()->getDecl()->fields())
13244 visit(FD->getType(), FD->getLocation());
13245 }
13246 void visitArray(QualType::PrimitiveCopyKind PCK, const ArrayType *AT,
13247 SourceLocation SL) {
13248 visit(FT: getContext().getBaseElementType(VAT: AT), Args&: SL);
13249 }
13250 void preVisit(QualType::PrimitiveCopyKind PCK, QualType FT,
13251 SourceLocation SL) {}
13252 void visitTrivial(QualType FT, SourceLocation SL) {}
13253 void visitVolatileTrivial(QualType FT, SourceLocation SL) {}
13254
13255 static void diag(QualType RT, const Expr *E, Sema &S) {
13256 SearchNonTrivialToCopyField(E, S).visitStruct(FT: RT, SL: SourceLocation());
13257 }
13258
13259 ASTContext &getContext() { return S.getASTContext(); }
13260
13261 const Expr *E;
13262 Sema &S;
13263};
13264
13265}
13266
13267/// Detect if \c SizeofExpr is likely to calculate the sizeof an object.
13268static bool doesExprLikelyComputeSize(const Expr *SizeofExpr) {
13269 SizeofExpr = SizeofExpr->IgnoreParenImpCasts();
13270
13271 if (const auto *BO = dyn_cast<BinaryOperator>(Val: SizeofExpr)) {
13272 if (BO->getOpcode() != BO_Mul && BO->getOpcode() != BO_Add)
13273 return false;
13274
13275 return doesExprLikelyComputeSize(SizeofExpr: BO->getLHS()) ||
13276 doesExprLikelyComputeSize(SizeofExpr: BO->getRHS());
13277 }
13278
13279 return getAsSizeOfExpr(E: SizeofExpr) != nullptr;
13280}
13281
13282/// Check if the ArgLoc originated from a macro passed to the call at CallLoc.
13283///
13284/// \code
13285/// #define MACRO 0
13286/// foo(MACRO);
13287/// foo(0);
13288/// \endcode
13289///
13290/// This should return true for the first call to foo, but not for the second
13291/// (regardless of whether foo is a macro or function).
13292static bool isArgumentExpandedFromMacro(SourceManager &SM,
13293 SourceLocation CallLoc,
13294 SourceLocation ArgLoc) {
13295 if (!CallLoc.isMacroID())
13296 return SM.getFileID(SpellingLoc: CallLoc) != SM.getFileID(SpellingLoc: ArgLoc);
13297
13298 return SM.getFileID(SpellingLoc: SM.getImmediateMacroCallerLoc(Loc: CallLoc)) !=
13299 SM.getFileID(SpellingLoc: SM.getImmediateMacroCallerLoc(Loc: ArgLoc));
13300}
13301
13302/// Diagnose cases like 'memset(buf, sizeof(buf), 0)', which should have the
13303/// last two arguments transposed.
13304static void CheckMemaccessSize(Sema &S, unsigned BId, const CallExpr *Call) {
13305 if (BId != Builtin::BImemset && BId != Builtin::BIbzero)
13306 return;
13307
13308 const Expr *SizeArg =
13309 Call->getArg(BId == Builtin::BImemset ? 2 : 1)->IgnoreImpCasts();
13310
13311 auto isLiteralZero = [](const Expr *E) {
13312 return (isa<IntegerLiteral>(E) &&
13313 cast<IntegerLiteral>(E)->getValue() == 0) ||
13314 (isa<CharacterLiteral>(E) &&
13315 cast<CharacterLiteral>(E)->getValue() == 0);
13316 };
13317
13318 // If we're memsetting or bzeroing 0 bytes, then this is likely an error.
13319 SourceLocation CallLoc = Call->getRParenLoc();
13320 SourceManager &SM = S.getSourceManager();
13321 if (isLiteralZero(SizeArg) &&
13322 !isArgumentExpandedFromMacro(SM, CallLoc, ArgLoc: SizeArg->getExprLoc())) {
13323
13324 SourceLocation DiagLoc = SizeArg->getExprLoc();
13325
13326 // Some platforms #define bzero to __builtin_memset. See if this is the
13327 // case, and if so, emit a better diagnostic.
13328 if (BId == Builtin::BIbzero ||
13329 (CallLoc.isMacroID() && Lexer::getImmediateMacroName(
13330 CallLoc, SM, S.getLangOpts()) == "bzero")) {
13331 S.Diag(DiagLoc, diag::warn_suspicious_bzero_size);
13332 S.Diag(DiagLoc, diag::note_suspicious_bzero_size_silence);
13333 } else if (!isLiteralZero(Call->getArg(Arg: 1)->IgnoreImpCasts())) {
13334 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 0;
13335 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 0;
13336 }
13337 return;
13338 }
13339
13340 // If the second argument to a memset is a sizeof expression and the third
13341 // isn't, this is also likely an error. This should catch
13342 // 'memset(buf, sizeof(buf), 0xff)'.
13343 if (BId == Builtin::BImemset &&
13344 doesExprLikelyComputeSize(Call->getArg(1)) &&
13345 !doesExprLikelyComputeSize(Call->getArg(2))) {
13346 SourceLocation DiagLoc = Call->getArg(Arg: 1)->getExprLoc();
13347 S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 1;
13348 S.Diag(DiagLoc, diag::note_suspicious_sizeof_memset_silence) << 1;
13349 return;
13350 }
13351}
13352
13353/// Check for dangerous or invalid arguments to memset().
13354///
13355/// This issues warnings on known problematic, dangerous or unspecified
13356/// arguments to the standard 'memset', 'memcpy', 'memmove', and 'memcmp'
13357/// function calls.
13358///
13359/// \param Call The call expression to diagnose.
13360void Sema::CheckMemaccessArguments(const CallExpr *Call,
13361 unsigned BId,
13362 IdentifierInfo *FnName) {
13363 assert(BId != 0);
13364
13365 // It is possible to have a non-standard definition of memset. Validate
13366 // we have enough arguments, and if not, abort further checking.
13367 unsigned ExpectedNumArgs =
13368 (BId == Builtin::BIstrndup || BId == Builtin::BIbzero ? 2 : 3);
13369 if (Call->getNumArgs() < ExpectedNumArgs)
13370 return;
13371
13372 unsigned LastArg = (BId == Builtin::BImemset || BId == Builtin::BIbzero ||
13373 BId == Builtin::BIstrndup ? 1 : 2);
13374 unsigned LenArg =
13375 (BId == Builtin::BIbzero || BId == Builtin::BIstrndup ? 1 : 2);
13376 const Expr *LenExpr = Call->getArg(Arg: LenArg)->IgnoreParenImpCasts();
13377
13378 if (CheckMemorySizeofForComparison(S&: *this, E: LenExpr, FnName,
13379 FnLoc: Call->getBeginLoc(), RParenLoc: Call->getRParenLoc()))
13380 return;
13381
13382 // Catch cases like 'memset(buf, sizeof(buf), 0)'.
13383 CheckMemaccessSize(S&: *this, BId, Call);
13384
13385 // We have special checking when the length is a sizeof expression.
13386 QualType SizeOfArgTy = getSizeOfArgType(E: LenExpr);
13387 const Expr *SizeOfArg = getSizeOfExprArg(E: LenExpr);
13388 llvm::FoldingSetNodeID SizeOfArgID;
13389
13390 // Although widely used, 'bzero' is not a standard function. Be more strict
13391 // with the argument types before allowing diagnostics and only allow the
13392 // form bzero(ptr, sizeof(...)).
13393 QualType FirstArgTy = Call->getArg(Arg: 0)->IgnoreParenImpCasts()->getType();
13394 if (BId == Builtin::BIbzero && !FirstArgTy->getAs<PointerType>())
13395 return;
13396
13397 for (unsigned ArgIdx = 0; ArgIdx != LastArg; ++ArgIdx) {
13398 const Expr *Dest = Call->getArg(Arg: ArgIdx)->IgnoreParenImpCasts();
13399 SourceRange ArgRange = Call->getArg(Arg: ArgIdx)->getSourceRange();
13400
13401 QualType DestTy = Dest->getType();
13402 QualType PointeeTy;
13403 if (const PointerType *DestPtrTy = DestTy->getAs<PointerType>()) {
13404 PointeeTy = DestPtrTy->getPointeeType();
13405
13406 // Never warn about void type pointers. This can be used to suppress
13407 // false positives.
13408 if (PointeeTy->isVoidType())
13409 continue;
13410
13411 // Catch "memset(p, 0, sizeof(p))" -- needs to be sizeof(*p). Do this by
13412 // actually comparing the expressions for equality. Because computing the
13413 // expression IDs can be expensive, we only do this if the diagnostic is
13414 // enabled.
13415 if (SizeOfArg &&
13416 !Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess,
13417 SizeOfArg->getExprLoc())) {
13418 // We only compute IDs for expressions if the warning is enabled, and
13419 // cache the sizeof arg's ID.
13420 if (SizeOfArgID == llvm::FoldingSetNodeID())
13421 SizeOfArg->Profile(SizeOfArgID, Context, true);
13422 llvm::FoldingSetNodeID DestID;
13423 Dest->Profile(DestID, Context, true);
13424 if (DestID == SizeOfArgID) {
13425 // TODO: For strncpy() and friends, this could suggest sizeof(dst)
13426 // over sizeof(src) as well.
13427 unsigned ActionIdx = 0; // Default is to suggest dereferencing.
13428 StringRef ReadableName = FnName->getName();
13429
13430 if (const UnaryOperator *UnaryOp = dyn_cast<UnaryOperator>(Val: Dest))
13431 if (UnaryOp->getOpcode() == UO_AddrOf)
13432 ActionIdx = 1; // If its an address-of operator, just remove it.
13433 if (!PointeeTy->isIncompleteType() &&
13434 (Context.getTypeSize(T: PointeeTy) == Context.getCharWidth()))
13435 ActionIdx = 2; // If the pointee's size is sizeof(char),
13436 // suggest an explicit length.
13437
13438 // If the function is defined as a builtin macro, do not show macro
13439 // expansion.
13440 SourceLocation SL = SizeOfArg->getExprLoc();
13441 SourceRange DSR = Dest->getSourceRange();
13442 SourceRange SSR = SizeOfArg->getSourceRange();
13443 SourceManager &SM = getSourceManager();
13444
13445 if (SM.isMacroArgExpansion(Loc: SL)) {
13446 ReadableName = Lexer::getImmediateMacroName(Loc: SL, SM, LangOpts);
13447 SL = SM.getSpellingLoc(Loc: SL);
13448 DSR = SourceRange(SM.getSpellingLoc(Loc: DSR.getBegin()),
13449 SM.getSpellingLoc(Loc: DSR.getEnd()));
13450 SSR = SourceRange(SM.getSpellingLoc(Loc: SSR.getBegin()),
13451 SM.getSpellingLoc(Loc: SSR.getEnd()));
13452 }
13453
13454 DiagRuntimeBehavior(SL, SizeOfArg,
13455 PDiag(diag::warn_sizeof_pointer_expr_memaccess)
13456 << ReadableName
13457 << PointeeTy
13458 << DestTy
13459 << DSR
13460 << SSR);
13461 DiagRuntimeBehavior(SL, SizeOfArg,
13462 PDiag(diag::warn_sizeof_pointer_expr_memaccess_note)
13463 << ActionIdx
13464 << SSR);
13465
13466 break;
13467 }
13468 }
13469
13470 // Also check for cases where the sizeof argument is the exact same
13471 // type as the memory argument, and where it points to a user-defined
13472 // record type.
13473 if (SizeOfArgTy != QualType()) {
13474 if (PointeeTy->isRecordType() &&
13475 Context.typesAreCompatible(T1: SizeOfArgTy, T2: DestTy)) {
13476 DiagRuntimeBehavior(LenExpr->getExprLoc(), Dest,
13477 PDiag(diag::warn_sizeof_pointer_type_memaccess)
13478 << FnName << SizeOfArgTy << ArgIdx
13479 << PointeeTy << Dest->getSourceRange()
13480 << LenExpr->getSourceRange());
13481 break;
13482 }
13483 }
13484 } else if (DestTy->isArrayType()) {
13485 PointeeTy = DestTy;
13486 }
13487
13488 if (PointeeTy == QualType())
13489 continue;
13490
13491 // Always complain about dynamic classes.
13492 bool IsContained;
13493 if (const CXXRecordDecl *ContainedRD =
13494 getContainedDynamicClass(T: PointeeTy, IsContained)) {
13495
13496 unsigned OperationType = 0;
13497 const bool IsCmp = BId == Builtin::BImemcmp || BId == Builtin::BIbcmp;
13498 // "overwritten" if we're warning about the destination for any call
13499 // but memcmp; otherwise a verb appropriate to the call.
13500 if (ArgIdx != 0 || IsCmp) {
13501 if (BId == Builtin::BImemcpy)
13502 OperationType = 1;
13503 else if(BId == Builtin::BImemmove)
13504 OperationType = 2;
13505 else if (IsCmp)
13506 OperationType = 3;
13507 }
13508
13509 DiagRuntimeBehavior(Dest->getExprLoc(), Dest,
13510 PDiag(diag::warn_dyn_class_memaccess)
13511 << (IsCmp ? ArgIdx + 2 : ArgIdx) << FnName
13512 << IsContained << ContainedRD << OperationType
13513 << Call->getCallee()->getSourceRange());
13514 } else if (PointeeTy.hasNonTrivialObjCLifetime() &&
13515 BId != Builtin::BImemset)
13516 DiagRuntimeBehavior(
13517 Dest->getExprLoc(), Dest,
13518 PDiag(diag::warn_arc_object_memaccess)
13519 << ArgIdx << FnName << PointeeTy
13520 << Call->getCallee()->getSourceRange());
13521 else if (const auto *RT = PointeeTy->getAs<RecordType>()) {
13522 if ((BId == Builtin::BImemset || BId == Builtin::BIbzero) &&
13523 RT->getDecl()->isNonTrivialToPrimitiveDefaultInitialize()) {
13524 DiagRuntimeBehavior(Dest->getExprLoc(), Dest,
13525 PDiag(diag::warn_cstruct_memaccess)
13526 << ArgIdx << FnName << PointeeTy << 0);
13527 SearchNonTrivialToInitializeField::diag(RT: PointeeTy, E: Dest, S&: *this);
13528 } else if ((BId == Builtin::BImemcpy || BId == Builtin::BImemmove) &&
13529 RT->getDecl()->isNonTrivialToPrimitiveCopy()) {
13530 DiagRuntimeBehavior(Dest->getExprLoc(), Dest,
13531 PDiag(diag::warn_cstruct_memaccess)
13532 << ArgIdx << FnName << PointeeTy << 1);
13533 SearchNonTrivialToCopyField::diag(RT: PointeeTy, E: Dest, S&: *this);
13534 } else {
13535 continue;
13536 }
13537 } else
13538 continue;
13539
13540 DiagRuntimeBehavior(
13541 Dest->getExprLoc(), Dest,
13542 PDiag(diag::note_bad_memaccess_silence)
13543 << FixItHint::CreateInsertion(ArgRange.getBegin(), "(void*)"));
13544 break;
13545 }
13546}
13547
13548// A little helper routine: ignore addition and subtraction of integer literals.
13549// This intentionally does not ignore all integer constant expressions because
13550// we don't want to remove sizeof().
13551static const Expr *ignoreLiteralAdditions(const Expr *Ex, ASTContext &Ctx) {
13552 Ex = Ex->IgnoreParenCasts();
13553
13554 while (true) {
13555 const BinaryOperator * BO = dyn_cast<BinaryOperator>(Val: Ex);
13556 if (!BO || !BO->isAdditiveOp())
13557 break;
13558
13559 const Expr *RHS = BO->getRHS()->IgnoreParenCasts();
13560 const Expr *LHS = BO->getLHS()->IgnoreParenCasts();
13561
13562 if (isa<IntegerLiteral>(Val: RHS))
13563 Ex = LHS;
13564 else if (isa<IntegerLiteral>(Val: LHS))
13565 Ex = RHS;
13566 else
13567 break;
13568 }
13569
13570 return Ex;
13571}
13572
13573static bool isConstantSizeArrayWithMoreThanOneElement(QualType Ty,
13574 ASTContext &Context) {
13575 // Only handle constant-sized or VLAs, but not flexible members.
13576 if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(T: Ty)) {
13577 // Only issue the FIXIT for arrays of size > 1.
13578 if (CAT->getSize().getSExtValue() <= 1)
13579 return false;
13580 } else if (!Ty->isVariableArrayType()) {
13581 return false;
13582 }
13583 return true;
13584}
13585
13586// Warn if the user has made the 'size' argument to strlcpy or strlcat
13587// be the size of the source, instead of the destination.
13588void Sema::CheckStrlcpycatArguments(const CallExpr *Call,
13589 IdentifierInfo *FnName) {
13590
13591 // Don't crash if the user has the wrong number of arguments
13592 unsigned NumArgs = Call->getNumArgs();
13593 if ((NumArgs != 3) && (NumArgs != 4))
13594 return;
13595
13596 const Expr *SrcArg = ignoreLiteralAdditions(Ex: Call->getArg(Arg: 1), Ctx&: Context);
13597 const Expr *SizeArg = ignoreLiteralAdditions(Ex: Call->getArg(Arg: 2), Ctx&: Context);
13598 const Expr *CompareWithSrc = nullptr;
13599
13600 if (CheckMemorySizeofForComparison(S&: *this, E: SizeArg, FnName,
13601 FnLoc: Call->getBeginLoc(), RParenLoc: Call->getRParenLoc()))
13602 return;
13603
13604 // Look for 'strlcpy(dst, x, sizeof(x))'
13605 if (const Expr *Ex = getSizeOfExprArg(E: SizeArg))
13606 CompareWithSrc = Ex;
13607 else {
13608 // Look for 'strlcpy(dst, x, strlen(x))'
13609 if (const CallExpr *SizeCall = dyn_cast<CallExpr>(Val: SizeArg)) {
13610 if (SizeCall->getBuiltinCallee() == Builtin::BIstrlen &&
13611 SizeCall->getNumArgs() == 1)
13612 CompareWithSrc = ignoreLiteralAdditions(Ex: SizeCall->getArg(Arg: 0), Ctx&: Context);
13613 }
13614 }
13615
13616 if (!CompareWithSrc)
13617 return;
13618
13619 // Determine if the argument to sizeof/strlen is equal to the source
13620 // argument. In principle there's all kinds of things you could do
13621 // here, for instance creating an == expression and evaluating it with
13622 // EvaluateAsBooleanCondition, but this uses a more direct technique:
13623 const DeclRefExpr *SrcArgDRE = dyn_cast<DeclRefExpr>(Val: SrcArg);
13624 if (!SrcArgDRE)
13625 return;
13626
13627 const DeclRefExpr *CompareWithSrcDRE = dyn_cast<DeclRefExpr>(Val: CompareWithSrc);
13628 if (!CompareWithSrcDRE ||
13629 SrcArgDRE->getDecl() != CompareWithSrcDRE->getDecl())
13630 return;
13631
13632 const Expr *OriginalSizeArg = Call->getArg(Arg: 2);
13633 Diag(CompareWithSrcDRE->getBeginLoc(), diag::warn_strlcpycat_wrong_size)
13634 << OriginalSizeArg->getSourceRange() << FnName;
13635
13636 // Output a FIXIT hint if the destination is an array (rather than a
13637 // pointer to an array). This could be enhanced to handle some
13638 // pointers if we know the actual size, like if DstArg is 'array+2'
13639 // we could say 'sizeof(array)-2'.
13640 const Expr *DstArg = Call->getArg(Arg: 0)->IgnoreParenImpCasts();
13641 if (!isConstantSizeArrayWithMoreThanOneElement(Ty: DstArg->getType(), Context))
13642 return;
13643
13644 SmallString<128> sizeString;
13645 llvm::raw_svector_ostream OS(sizeString);
13646 OS << "sizeof(";
13647 DstArg->printPretty(OS, nullptr, getPrintingPolicy());
13648 OS << ")";
13649
13650 Diag(OriginalSizeArg->getBeginLoc(), diag::note_strlcpycat_wrong_size)
13651 << FixItHint::CreateReplacement(OriginalSizeArg->getSourceRange(),
13652 OS.str());
13653}
13654
13655/// Check if two expressions refer to the same declaration.
13656static bool referToTheSameDecl(const Expr *E1, const Expr *E2) {
13657 if (const DeclRefExpr *D1 = dyn_cast_or_null<DeclRefExpr>(Val: E1))
13658 if (const DeclRefExpr *D2 = dyn_cast_or_null<DeclRefExpr>(Val: E2))
13659 return D1->getDecl() == D2->getDecl();
13660 return false;
13661}
13662
13663static const Expr *getStrlenExprArg(const Expr *E) {
13664 if (const CallExpr *CE = dyn_cast<CallExpr>(Val: E)) {
13665 const FunctionDecl *FD = CE->getDirectCallee();
13666 if (!FD || FD->getMemoryFunctionKind() != Builtin::BIstrlen)
13667 return nullptr;
13668 return CE->getArg(Arg: 0)->IgnoreParenCasts();
13669 }
13670 return nullptr;
13671}
13672
13673// Warn on anti-patterns as the 'size' argument to strncat.
13674// The correct size argument should look like following:
13675// strncat(dst, src, sizeof(dst) - strlen(dest) - 1);
13676void Sema::CheckStrncatArguments(const CallExpr *CE,
13677 IdentifierInfo *FnName) {
13678 // Don't crash if the user has the wrong number of arguments.
13679 if (CE->getNumArgs() < 3)
13680 return;
13681 const Expr *DstArg = CE->getArg(Arg: 0)->IgnoreParenCasts();
13682 const Expr *SrcArg = CE->getArg(Arg: 1)->IgnoreParenCasts();
13683 const Expr *LenArg = CE->getArg(Arg: 2)->IgnoreParenCasts();
13684
13685 if (CheckMemorySizeofForComparison(S&: *this, E: LenArg, FnName, FnLoc: CE->getBeginLoc(),
13686 RParenLoc: CE->getRParenLoc()))
13687 return;
13688
13689 // Identify common expressions, which are wrongly used as the size argument
13690 // to strncat and may lead to buffer overflows.
13691 unsigned PatternType = 0;
13692 if (const Expr *SizeOfArg = getSizeOfExprArg(E: LenArg)) {
13693 // - sizeof(dst)
13694 if (referToTheSameDecl(E1: SizeOfArg, E2: DstArg))
13695 PatternType = 1;
13696 // - sizeof(src)
13697 else if (referToTheSameDecl(E1: SizeOfArg, E2: SrcArg))
13698 PatternType = 2;
13699 } else if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(Val: LenArg)) {
13700 if (BE->getOpcode() == BO_Sub) {
13701 const Expr *L = BE->getLHS()->IgnoreParenCasts();
13702 const Expr *R = BE->getRHS()->IgnoreParenCasts();
13703 // - sizeof(dst) - strlen(dst)
13704 if (referToTheSameDecl(E1: DstArg, E2: getSizeOfExprArg(E: L)) &&
13705 referToTheSameDecl(E1: DstArg, E2: getStrlenExprArg(E: R)))
13706 PatternType = 1;
13707 // - sizeof(src) - (anything)
13708 else if (referToTheSameDecl(E1: SrcArg, E2: getSizeOfExprArg(E: L)))
13709 PatternType = 2;
13710 }
13711 }
13712
13713 if (PatternType == 0)
13714 return;
13715
13716 // Generate the diagnostic.
13717 SourceLocation SL = LenArg->getBeginLoc();
13718 SourceRange SR = LenArg->getSourceRange();
13719 SourceManager &SM = getSourceManager();
13720
13721 // If the function is defined as a builtin macro, do not show macro expansion.
13722 if (SM.isMacroArgExpansion(Loc: SL)) {
13723 SL = SM.getSpellingLoc(Loc: SL);
13724 SR = SourceRange(SM.getSpellingLoc(Loc: SR.getBegin()),
13725 SM.getSpellingLoc(Loc: SR.getEnd()));
13726 }
13727
13728 // Check if the destination is an array (rather than a pointer to an array).
13729 QualType DstTy = DstArg->getType();
13730 bool isKnownSizeArray = isConstantSizeArrayWithMoreThanOneElement(Ty: DstTy,
13731 Context);
13732 if (!isKnownSizeArray) {
13733 if (PatternType == 1)
13734 Diag(SL, diag::warn_strncat_wrong_size) << SR;
13735 else
13736 Diag(SL, diag::warn_strncat_src_size) << SR;
13737 return;
13738 }
13739
13740 if (PatternType == 1)
13741 Diag(SL, diag::warn_strncat_large_size) << SR;
13742 else
13743 Diag(SL, diag::warn_strncat_src_size) << SR;
13744
13745 SmallString<128> sizeString;
13746 llvm::raw_svector_ostream OS(sizeString);
13747 OS << "sizeof(";
13748 DstArg->printPretty(OS, nullptr, getPrintingPolicy());
13749 OS << ") - ";
13750 OS << "strlen(";
13751 DstArg->printPretty(OS, nullptr, getPrintingPolicy());
13752 OS << ") - 1";
13753
13754 Diag(SL, diag::note_strncat_wrong_size)
13755 << FixItHint::CreateReplacement(SR, OS.str());
13756}
13757
13758namespace {
13759void CheckFreeArgumentsOnLvalue(Sema &S, const std::string &CalleeName,
13760 const UnaryOperator *UnaryExpr, const Decl *D) {
13761 if (isa<FieldDecl, FunctionDecl, VarDecl>(Val: D)) {
13762 S.Diag(UnaryExpr->getBeginLoc(), diag::warn_free_nonheap_object)
13763 << CalleeName << 0 /*object: */ << cast<NamedDecl>(D);
13764 return;
13765 }
13766}
13767
13768void CheckFreeArgumentsAddressof(Sema &S, const std::string &CalleeName,
13769 const UnaryOperator *UnaryExpr) {
13770 if (const auto *Lvalue = dyn_cast<DeclRefExpr>(Val: UnaryExpr->getSubExpr())) {
13771 const Decl *D = Lvalue->getDecl();
13772 if (isa<DeclaratorDecl>(Val: D))
13773 if (!dyn_cast<DeclaratorDecl>(Val: D)->getType()->isReferenceType())
13774 return CheckFreeArgumentsOnLvalue(S, CalleeName, UnaryExpr, D);
13775 }
13776
13777 if (const auto *Lvalue = dyn_cast<MemberExpr>(Val: UnaryExpr->getSubExpr()))
13778 return CheckFreeArgumentsOnLvalue(S, CalleeName, UnaryExpr,
13779 Lvalue->getMemberDecl());
13780}
13781
13782void CheckFreeArgumentsPlus(Sema &S, const std::string &CalleeName,
13783 const UnaryOperator *UnaryExpr) {
13784 const auto *Lambda = dyn_cast<LambdaExpr>(
13785 Val: UnaryExpr->getSubExpr()->IgnoreImplicitAsWritten()->IgnoreParens());
13786 if (!Lambda)
13787 return;
13788
13789 S.Diag(Lambda->getBeginLoc(), diag::warn_free_nonheap_object)
13790 << CalleeName << 2 /*object: lambda expression*/;
13791}
13792
13793void CheckFreeArgumentsStackArray(Sema &S, const std::string &CalleeName,
13794 const DeclRefExpr *Lvalue) {
13795 const auto *Var = dyn_cast<VarDecl>(Val: Lvalue->getDecl());
13796 if (Var == nullptr)
13797 return;
13798
13799 S.Diag(Lvalue->getBeginLoc(), diag::warn_free_nonheap_object)
13800 << CalleeName << 0 /*object: */ << Var;
13801}
13802
13803void CheckFreeArgumentsCast(Sema &S, const std::string &CalleeName,
13804 const CastExpr *Cast) {
13805 SmallString<128> SizeString;
13806 llvm::raw_svector_ostream OS(SizeString);
13807
13808 clang::CastKind Kind = Cast->getCastKind();
13809 if (Kind == clang::CK_BitCast &&
13810 !Cast->getSubExpr()->getType()->isFunctionPointerType())
13811 return;
13812 if (Kind == clang::CK_IntegralToPointer &&
13813 !isa<IntegerLiteral>(
13814 Val: Cast->getSubExpr()->IgnoreParenImpCasts()->IgnoreParens()))
13815 return;
13816
13817 switch (Cast->getCastKind()) {
13818 case clang::CK_BitCast:
13819 case clang::CK_IntegralToPointer:
13820 case clang::CK_FunctionToPointerDecay:
13821 OS << '\'';
13822 Cast->printPretty(OS, nullptr, S.getPrintingPolicy());
13823 OS << '\'';
13824 break;
13825 default:
13826 return;
13827 }
13828
13829 S.Diag(Cast->getBeginLoc(), diag::warn_free_nonheap_object)
13830 << CalleeName << 0 /*object: */ << OS.str();
13831}
13832} // namespace
13833
13834/// Alerts the user that they are attempting to free a non-malloc'd object.
13835void Sema::CheckFreeArguments(const CallExpr *E) {
13836 const std::string CalleeName =
13837 cast<FunctionDecl>(Val: E->getCalleeDecl())->getQualifiedNameAsString();
13838
13839 { // Prefer something that doesn't involve a cast to make things simpler.
13840 const Expr *Arg = E->getArg(Arg: 0)->IgnoreParenCasts();
13841 if (const auto *UnaryExpr = dyn_cast<UnaryOperator>(Val: Arg))
13842 switch (UnaryExpr->getOpcode()) {
13843 case UnaryOperator::Opcode::UO_AddrOf:
13844 return CheckFreeArgumentsAddressof(S&: *this, CalleeName, UnaryExpr);
13845 case UnaryOperator::Opcode::UO_Plus:
13846 return CheckFreeArgumentsPlus(S&: *this, CalleeName, UnaryExpr);
13847 default:
13848 break;
13849 }
13850
13851 if (const auto *Lvalue = dyn_cast<DeclRefExpr>(Val: Arg))
13852 if (Lvalue->getType()->isArrayType())
13853 return CheckFreeArgumentsStackArray(S&: *this, CalleeName, Lvalue);
13854
13855 if (const auto *Label = dyn_cast<AddrLabelExpr>(Val: Arg)) {
13856 Diag(Label->getBeginLoc(), diag::warn_free_nonheap_object)
13857 << CalleeName << 0 /*object: */ << Label->getLabel()->getIdentifier();
13858 return;
13859 }
13860
13861 if (isa<BlockExpr>(Val: Arg)) {
13862 Diag(Arg->getBeginLoc(), diag::warn_free_nonheap_object)
13863 << CalleeName << 1 /*object: block*/;
13864 return;
13865 }
13866 }
13867 // Maybe the cast was important, check after the other cases.
13868 if (const auto *Cast = dyn_cast<CastExpr>(Val: E->getArg(Arg: 0)))
13869 return CheckFreeArgumentsCast(S&: *this, CalleeName, Cast);
13870}
13871
13872void
13873Sema::CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
13874 SourceLocation ReturnLoc,
13875 bool isObjCMethod,
13876 const AttrVec *Attrs,
13877 const FunctionDecl *FD) {
13878 // Check if the return value is null but should not be.
13879 if (((Attrs && hasSpecificAttr<ReturnsNonNullAttr>(*Attrs)) ||
13880 (!isObjCMethod && isNonNullType(lhsType))) &&
13881 CheckNonNullExpr(*this, RetValExp))
13882 Diag(ReturnLoc, diag::warn_null_ret)
13883 << (isObjCMethod ? 1 : 0) << RetValExp->getSourceRange();
13884
13885 // C++11 [basic.stc.dynamic.allocation]p4:
13886 // If an allocation function declared with a non-throwing
13887 // exception-specification fails to allocate storage, it shall return
13888 // a null pointer. Any other allocation function that fails to allocate
13889 // storage shall indicate failure only by throwing an exception [...]
13890 if (FD) {
13891 OverloadedOperatorKind Op = FD->getOverloadedOperator();
13892 if (Op == OO_New || Op == OO_Array_New) {
13893 const FunctionProtoType *Proto
13894 = FD->getType()->castAs<FunctionProtoType>();
13895 if (!Proto->isNothrow(/*ResultIfDependent*/true) &&
13896 CheckNonNullExpr(*this, RetValExp))
13897 Diag(ReturnLoc, diag::warn_operator_new_returns_null)
13898 << FD << getLangOpts().CPlusPlus11;
13899 }
13900 }
13901
13902 if (RetValExp && RetValExp->getType()->isWebAssemblyTableType()) {
13903 Diag(ReturnLoc, diag::err_wasm_table_art) << 1;
13904 }
13905
13906 // PPC MMA non-pointer types are not allowed as return type. Checking the type
13907 // here prevent the user from using a PPC MMA type as trailing return type.
13908 if (Context.getTargetInfo().getTriple().isPPC64())
13909 CheckPPCMMAType(Type: RetValExp->getType(), TypeLoc: ReturnLoc);
13910}
13911
13912/// Check for comparisons of floating-point values using == and !=. Issue a
13913/// warning if the comparison is not likely to do what the programmer intended.
13914void Sema::CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS,
13915 BinaryOperatorKind Opcode) {
13916 if (!BinaryOperator::isEqualityOp(Opc: Opcode))
13917 return;
13918
13919 // Match and capture subexpressions such as "(float) X == 0.1".
13920 FloatingLiteral *FPLiteral;
13921 CastExpr *FPCast;
13922 auto getCastAndLiteral = [&FPLiteral, &FPCast](Expr *L, Expr *R) {
13923 FPLiteral = dyn_cast<FloatingLiteral>(Val: L->IgnoreParens());
13924 FPCast = dyn_cast<CastExpr>(Val: R->IgnoreParens());
13925 return FPLiteral && FPCast;
13926 };
13927
13928 if (getCastAndLiteral(LHS, RHS) || getCastAndLiteral(RHS, LHS)) {
13929 auto *SourceTy = FPCast->getSubExpr()->getType()->getAs<BuiltinType>();
13930 auto *TargetTy = FPLiteral->getType()->getAs<BuiltinType>();
13931 if (SourceTy && TargetTy && SourceTy->isFloatingPoint() &&
13932 TargetTy->isFloatingPoint()) {
13933 bool Lossy;
13934 llvm::APFloat TargetC = FPLiteral->getValue();
13935 TargetC.convert(ToSemantics: Context.getFloatTypeSemantics(T: QualType(SourceTy, 0)),
13936 RM: llvm::APFloat::rmNearestTiesToEven, losesInfo: &Lossy);
13937 if (Lossy) {
13938 // If the literal cannot be represented in the source type, then a
13939 // check for == is always false and check for != is always true.
13940 Diag(Loc, diag::warn_float_compare_literal)
13941 << (Opcode == BO_EQ) << QualType(SourceTy, 0)
13942 << LHS->getSourceRange() << RHS->getSourceRange();
13943 return;
13944 }
13945 }
13946 }
13947
13948 // Match a more general floating-point equality comparison (-Wfloat-equal).
13949 Expr* LeftExprSansParen = LHS->IgnoreParenImpCasts();
13950 Expr* RightExprSansParen = RHS->IgnoreParenImpCasts();
13951
13952 // Special case: check for x == x (which is OK).
13953 // Do not emit warnings for such cases.
13954 if (auto *DRL = dyn_cast<DeclRefExpr>(Val: LeftExprSansParen))
13955 if (auto *DRR = dyn_cast<DeclRefExpr>(Val: RightExprSansParen))
13956 if (DRL->getDecl() == DRR->getDecl())
13957 return;
13958
13959 // Special case: check for comparisons against literals that can be exactly
13960 // represented by APFloat. In such cases, do not emit a warning. This
13961 // is a heuristic: often comparison against such literals are used to
13962 // detect if a value in a variable has not changed. This clearly can
13963 // lead to false negatives.
13964 if (FloatingLiteral* FLL = dyn_cast<FloatingLiteral>(Val: LeftExprSansParen)) {
13965 if (FLL->isExact())
13966 return;
13967 } else
13968 if (FloatingLiteral* FLR = dyn_cast<FloatingLiteral>(Val: RightExprSansParen))
13969 if (FLR->isExact())
13970 return;
13971
13972 // Check for comparisons with builtin types.
13973 if (CallExpr* CL = dyn_cast<CallExpr>(Val: LeftExprSansParen))
13974 if (CL->getBuiltinCallee())
13975 return;
13976
13977 if (CallExpr* CR = dyn_cast<CallExpr>(Val: RightExprSansParen))
13978 if (CR->getBuiltinCallee())
13979 return;
13980
13981 // Emit the diagnostic.
13982 Diag(Loc, diag::warn_floatingpoint_eq)
13983 << LHS->getSourceRange() << RHS->getSourceRange();
13984}
13985
13986//===--- CHECK: Integer mixed-sign comparisons (-Wsign-compare) --------===//
13987//===--- CHECK: Lossy implicit conversions (-Wconversion) --------------===//
13988
13989namespace {
13990
13991/// Structure recording the 'active' range of an integer-valued
13992/// expression.
13993struct IntRange {
13994 /// The number of bits active in the int. Note that this includes exactly one
13995 /// sign bit if !NonNegative.
13996 unsigned Width;
13997
13998 /// True if the int is known not to have negative values. If so, all leading
13999 /// bits before Width are known zero, otherwise they are known to be the
14000 /// same as the MSB within Width.
14001 bool NonNegative;
14002
14003 IntRange(unsigned Width, bool NonNegative)
14004 : Width(Width), NonNegative(NonNegative) {}
14005
14006 /// Number of bits excluding the sign bit.
14007 unsigned valueBits() const {
14008 return NonNegative ? Width : Width - 1;
14009 }
14010
14011 /// Returns the range of the bool type.
14012 static IntRange forBoolType() {
14013 return IntRange(1, true);
14014 }
14015
14016 /// Returns the range of an opaque value of the given integral type.
14017 static IntRange forValueOfType(ASTContext &C, QualType T) {
14018 return forValueOfCanonicalType(C,
14019 T: T->getCanonicalTypeInternal().getTypePtr());
14020 }
14021
14022 /// Returns the range of an opaque value of a canonical integral type.
14023 static IntRange forValueOfCanonicalType(ASTContext &C, const Type *T) {
14024 assert(T->isCanonicalUnqualified());
14025
14026 if (const VectorType *VT = dyn_cast<VectorType>(Val: T))
14027 T = VT->getElementType().getTypePtr();
14028 if (const ComplexType *CT = dyn_cast<ComplexType>(Val: T))
14029 T = CT->getElementType().getTypePtr();
14030 if (const AtomicType *AT = dyn_cast<AtomicType>(Val: T))
14031 T = AT->getValueType().getTypePtr();
14032
14033 if (!C.getLangOpts().CPlusPlus) {
14034 // For enum types in C code, use the underlying datatype.
14035 if (const EnumType *ET = dyn_cast<EnumType>(Val: T))
14036 T = ET->getDecl()->getIntegerType().getDesugaredType(Context: C).getTypePtr();
14037 } else if (const EnumType *ET = dyn_cast<EnumType>(Val: T)) {
14038 // For enum types in C++, use the known bit width of the enumerators.
14039 EnumDecl *Enum = ET->getDecl();
14040 // In C++11, enums can have a fixed underlying type. Use this type to
14041 // compute the range.
14042 if (Enum->isFixed()) {
14043 return IntRange(C.getIntWidth(T: QualType(T, 0)),
14044 !ET->isSignedIntegerOrEnumerationType());
14045 }
14046
14047 unsigned NumPositive = Enum->getNumPositiveBits();
14048 unsigned NumNegative = Enum->getNumNegativeBits();
14049
14050 if (NumNegative == 0)
14051 return IntRange(NumPositive, true/*NonNegative*/);
14052 else
14053 return IntRange(std::max(a: NumPositive + 1, b: NumNegative),
14054 false/*NonNegative*/);
14055 }
14056
14057 if (const auto *EIT = dyn_cast<BitIntType>(Val: T))
14058 return IntRange(EIT->getNumBits(), EIT->isUnsigned());
14059
14060 const BuiltinType *BT = cast<BuiltinType>(Val: T);
14061 assert(BT->isInteger());
14062
14063 return IntRange(C.getIntWidth(T: QualType(T, 0)), BT->isUnsignedInteger());
14064 }
14065
14066 /// Returns the "target" range of a canonical integral type, i.e.
14067 /// the range of values expressible in the type.
14068 ///
14069 /// This matches forValueOfCanonicalType except that enums have the
14070 /// full range of their type, not the range of their enumerators.
14071 static IntRange forTargetOfCanonicalType(ASTContext &C, const Type *T) {
14072 assert(T->isCanonicalUnqualified());
14073
14074 if (const VectorType *VT = dyn_cast<VectorType>(Val: T))
14075 T = VT->getElementType().getTypePtr();
14076 if (const ComplexType *CT = dyn_cast<ComplexType>(Val: T))
14077 T = CT->getElementType().getTypePtr();
14078 if (const AtomicType *AT = dyn_cast<AtomicType>(Val: T))
14079 T = AT->getValueType().getTypePtr();
14080 if (const EnumType *ET = dyn_cast<EnumType>(Val: T))
14081 T = C.getCanonicalType(T: ET->getDecl()->getIntegerType()).getTypePtr();
14082
14083 if (const auto *EIT = dyn_cast<BitIntType>(Val: T))
14084 return IntRange(EIT->getNumBits(), EIT->isUnsigned());
14085
14086 const BuiltinType *BT = cast<BuiltinType>(Val: T);
14087 assert(BT->isInteger());
14088
14089 return IntRange(C.getIntWidth(T: QualType(T, 0)), BT->isUnsignedInteger());
14090 }
14091
14092 /// Returns the supremum of two ranges: i.e. their conservative merge.
14093 static IntRange join(IntRange L, IntRange R) {
14094 bool Unsigned = L.NonNegative && R.NonNegative;
14095 return IntRange(std::max(a: L.valueBits(), b: R.valueBits()) + !Unsigned,
14096 L.NonNegative && R.NonNegative);
14097 }
14098
14099 /// Return the range of a bitwise-AND of the two ranges.
14100 static IntRange bit_and(IntRange L, IntRange R) {
14101 unsigned Bits = std::max(a: L.Width, b: R.Width);
14102 bool NonNegative = false;
14103 if (L.NonNegative) {
14104 Bits = std::min(a: Bits, b: L.Width);
14105 NonNegative = true;
14106 }
14107 if (R.NonNegative) {
14108 Bits = std::min(a: Bits, b: R.Width);
14109 NonNegative = true;
14110 }
14111 return IntRange(Bits, NonNegative);
14112 }
14113
14114 /// Return the range of a sum of the two ranges.
14115 static IntRange sum(IntRange L, IntRange R) {
14116 bool Unsigned = L.NonNegative && R.NonNegative;
14117 return IntRange(std::max(a: L.valueBits(), b: R.valueBits()) + 1 + !Unsigned,
14118 Unsigned);
14119 }
14120
14121 /// Return the range of a difference of the two ranges.
14122 static IntRange difference(IntRange L, IntRange R) {
14123 // We need a 1-bit-wider range if:
14124 // 1) LHS can be negative: least value can be reduced.
14125 // 2) RHS can be negative: greatest value can be increased.
14126 bool CanWiden = !L.NonNegative || !R.NonNegative;
14127 bool Unsigned = L.NonNegative && R.Width == 0;
14128 return IntRange(std::max(a: L.valueBits(), b: R.valueBits()) + CanWiden +
14129 !Unsigned,
14130 Unsigned);
14131 }
14132
14133 /// Return the range of a product of the two ranges.
14134 static IntRange product(IntRange L, IntRange R) {
14135 // If both LHS and RHS can be negative, we can form
14136 // -2^L * -2^R = 2^(L + R)
14137 // which requires L + R + 1 value bits to represent.
14138 bool CanWiden = !L.NonNegative && !R.NonNegative;
14139 bool Unsigned = L.NonNegative && R.NonNegative;
14140 return IntRange(L.valueBits() + R.valueBits() + CanWiden + !Unsigned,
14141 Unsigned);
14142 }
14143
14144 /// Return the range of a remainder operation between the two ranges.
14145 static IntRange rem(IntRange L, IntRange R) {
14146 // The result of a remainder can't be larger than the result of
14147 // either side. The sign of the result is the sign of the LHS.
14148 bool Unsigned = L.NonNegative;
14149 return IntRange(std::min(a: L.valueBits(), b: R.valueBits()) + !Unsigned,
14150 Unsigned);
14151 }
14152};
14153
14154} // namespace
14155
14156static IntRange GetValueRange(ASTContext &C, llvm::APSInt &value,
14157 unsigned MaxWidth) {
14158 if (value.isSigned() && value.isNegative())
14159 return IntRange(value.getSignificantBits(), false);
14160
14161 if (value.getBitWidth() > MaxWidth)
14162 value = value.trunc(width: MaxWidth);
14163
14164 // isNonNegative() just checks the sign bit without considering
14165 // signedness.
14166 return IntRange(value.getActiveBits(), true);
14167}
14168
14169static IntRange GetValueRange(ASTContext &C, APValue &result, QualType Ty,
14170 unsigned MaxWidth) {
14171 if (result.isInt())
14172 return GetValueRange(C, value&: result.getInt(), MaxWidth);
14173
14174 if (result.isVector()) {
14175 IntRange R = GetValueRange(C, result&: result.getVectorElt(I: 0), Ty, MaxWidth);
14176 for (unsigned i = 1, e = result.getVectorLength(); i != e; ++i) {
14177 IntRange El = GetValueRange(C, result&: result.getVectorElt(I: i), Ty, MaxWidth);
14178 R = IntRange::join(L: R, R: El);
14179 }
14180 return R;
14181 }
14182
14183 if (result.isComplexInt()) {
14184 IntRange R = GetValueRange(C, value&: result.getComplexIntReal(), MaxWidth);
14185 IntRange I = GetValueRange(C, value&: result.getComplexIntImag(), MaxWidth);
14186 return IntRange::join(L: R, R: I);
14187 }
14188
14189 // This can happen with lossless casts to intptr_t of "based" lvalues.
14190 // Assume it might use arbitrary bits.
14191 // FIXME: The only reason we need to pass the type in here is to get
14192 // the sign right on this one case. It would be nice if APValue
14193 // preserved this.
14194 assert(result.isLValue() || result.isAddrLabelDiff());
14195 return IntRange(MaxWidth, Ty->isUnsignedIntegerOrEnumerationType());
14196}
14197
14198static QualType GetExprType(const Expr *E) {
14199 QualType Ty = E->getType();
14200 if (const AtomicType *AtomicRHS = Ty->getAs<AtomicType>())
14201 Ty = AtomicRHS->getValueType();
14202 return Ty;
14203}
14204
14205/// Pseudo-evaluate the given integer expression, estimating the
14206/// range of values it might take.
14207///
14208/// \param MaxWidth The width to which the value will be truncated.
14209/// \param Approximate If \c true, return a likely range for the result: in
14210/// particular, assume that arithmetic on narrower types doesn't leave
14211/// those types. If \c false, return a range including all possible
14212/// result values.
14213static IntRange GetExprRange(ASTContext &C, const Expr *E, unsigned MaxWidth,
14214 bool InConstantContext, bool Approximate) {
14215 E = E->IgnoreParens();
14216
14217 // Try a full evaluation first.
14218 Expr::EvalResult result;
14219 if (E->EvaluateAsRValue(Result&: result, Ctx: C, InConstantContext))
14220 return GetValueRange(C, result&: result.Val, Ty: GetExprType(E), MaxWidth);
14221
14222 // I think we only want to look through implicit casts here; if the
14223 // user has an explicit widening cast, we should treat the value as
14224 // being of the new, wider type.
14225 if (const auto *CE = dyn_cast<ImplicitCastExpr>(Val: E)) {
14226 if (CE->getCastKind() == CK_NoOp || CE->getCastKind() == CK_LValueToRValue)
14227 return GetExprRange(C, CE->getSubExpr(), MaxWidth, InConstantContext,
14228 Approximate);
14229
14230 IntRange OutputTypeRange = IntRange::forValueOfType(C, T: GetExprType(CE));
14231
14232 bool isIntegerCast = CE->getCastKind() == CK_IntegralCast ||
14233 CE->getCastKind() == CK_BooleanToSignedIntegral;
14234
14235 // Assume that non-integer casts can span the full range of the type.
14236 if (!isIntegerCast)
14237 return OutputTypeRange;
14238
14239 IntRange SubRange = GetExprRange(C, CE->getSubExpr(),
14240 std::min(a: MaxWidth, b: OutputTypeRange.Width),
14241 InConstantContext, Approximate);
14242
14243 // Bail out if the subexpr's range is as wide as the cast type.
14244 if (SubRange.Width >= OutputTypeRange.Width)
14245 return OutputTypeRange;
14246
14247 // Otherwise, we take the smaller width, and we're non-negative if
14248 // either the output type or the subexpr is.
14249 return IntRange(SubRange.Width,
14250 SubRange.NonNegative || OutputTypeRange.NonNegative);
14251 }
14252
14253 if (const auto *CO = dyn_cast<ConditionalOperator>(Val: E)) {
14254 // If we can fold the condition, just take that operand.
14255 bool CondResult;
14256 if (CO->getCond()->EvaluateAsBooleanCondition(Result&: CondResult, Ctx: C))
14257 return GetExprRange(C,
14258 E: CondResult ? CO->getTrueExpr() : CO->getFalseExpr(),
14259 MaxWidth, InConstantContext, Approximate);
14260
14261 // Otherwise, conservatively merge.
14262 // GetExprRange requires an integer expression, but a throw expression
14263 // results in a void type.
14264 Expr *E = CO->getTrueExpr();
14265 IntRange L = E->getType()->isVoidType()
14266 ? IntRange{0, true}
14267 : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate);
14268 E = CO->getFalseExpr();
14269 IntRange R = E->getType()->isVoidType()
14270 ? IntRange{0, true}
14271 : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate);
14272 return IntRange::join(L, R);
14273 }
14274
14275 if (const auto *BO = dyn_cast<BinaryOperator>(Val: E)) {
14276 IntRange (*Combine)(IntRange, IntRange) = IntRange::join;
14277
14278 switch (BO->getOpcode()) {
14279 case BO_Cmp:
14280 llvm_unreachable("builtin <=> should have class type");
14281
14282 // Boolean-valued operations are single-bit and positive.
14283 case BO_LAnd:
14284 case BO_LOr:
14285 case BO_LT:
14286 case BO_GT:
14287 case BO_LE:
14288 case BO_GE:
14289 case BO_EQ:
14290 case BO_NE:
14291 return IntRange::forBoolType();
14292
14293 // The type of the assignments is the type of the LHS, so the RHS
14294 // is not necessarily the same type.
14295 case BO_MulAssign:
14296 case BO_DivAssign:
14297 case BO_RemAssign:
14298 case BO_AddAssign:
14299 case BO_SubAssign:
14300 case BO_XorAssign:
14301 case BO_OrAssign:
14302 // TODO: bitfields?
14303 return IntRange::forValueOfType(C, T: GetExprType(E));
14304
14305 // Simple assignments just pass through the RHS, which will have
14306 // been coerced to the LHS type.
14307 case BO_Assign:
14308 // TODO: bitfields?
14309 return GetExprRange(C, E: BO->getRHS(), MaxWidth, InConstantContext,
14310 Approximate);
14311
14312 // Operations with opaque sources are black-listed.
14313 case BO_PtrMemD:
14314 case BO_PtrMemI:
14315 return IntRange::forValueOfType(C, T: GetExprType(E));
14316
14317 // Bitwise-and uses the *infinum* of the two source ranges.
14318 case BO_And:
14319 case BO_AndAssign:
14320 Combine = IntRange::bit_and;
14321 break;
14322
14323 // Left shift gets black-listed based on a judgement call.
14324 case BO_Shl:
14325 // ...except that we want to treat '1 << (blah)' as logically
14326 // positive. It's an important idiom.
14327 if (IntegerLiteral *I
14328 = dyn_cast<IntegerLiteral>(Val: BO->getLHS()->IgnoreParenCasts())) {
14329 if (I->getValue() == 1) {
14330 IntRange R = IntRange::forValueOfType(C, T: GetExprType(E));
14331 return IntRange(R.Width, /*NonNegative*/ true);
14332 }
14333 }
14334 [[fallthrough]];
14335
14336 case BO_ShlAssign:
14337 return IntRange::forValueOfType(C, T: GetExprType(E));
14338
14339 // Right shift by a constant can narrow its left argument.
14340 case BO_Shr:
14341 case BO_ShrAssign: {
14342 IntRange L = GetExprRange(C, E: BO->getLHS(), MaxWidth, InConstantContext,
14343 Approximate);
14344
14345 // If the shift amount is a positive constant, drop the width by
14346 // that much.
14347 if (std::optional<llvm::APSInt> shift =
14348 BO->getRHS()->getIntegerConstantExpr(Ctx: C)) {
14349 if (shift->isNonNegative()) {
14350 if (shift->uge(RHS: L.Width))
14351 L.Width = (L.NonNegative ? 0 : 1);
14352 else
14353 L.Width -= shift->getZExtValue();
14354 }
14355 }
14356
14357 return L;
14358 }
14359
14360 // Comma acts as its right operand.
14361 case BO_Comma:
14362 return GetExprRange(C, E: BO->getRHS(), MaxWidth, InConstantContext,
14363 Approximate);
14364
14365 case BO_Add:
14366 if (!Approximate)
14367 Combine = IntRange::sum;
14368 break;
14369
14370 case BO_Sub:
14371 if (BO->getLHS()->getType()->isPointerType())
14372 return IntRange::forValueOfType(C, T: GetExprType(E));
14373 if (!Approximate)
14374 Combine = IntRange::difference;
14375 break;
14376
14377 case BO_Mul:
14378 if (!Approximate)
14379 Combine = IntRange::product;
14380 break;
14381
14382 // The width of a division result is mostly determined by the size
14383 // of the LHS.
14384 case BO_Div: {
14385 // Don't 'pre-truncate' the operands.
14386 unsigned opWidth = C.getIntWidth(T: GetExprType(E));
14387 IntRange L = GetExprRange(C, E: BO->getLHS(), MaxWidth: opWidth, InConstantContext,
14388 Approximate);
14389
14390 // If the divisor is constant, use that.
14391 if (std::optional<llvm::APSInt> divisor =
14392 BO->getRHS()->getIntegerConstantExpr(Ctx: C)) {
14393 unsigned log2 = divisor->logBase2(); // floor(log_2(divisor))
14394 if (log2 >= L.Width)
14395 L.Width = (L.NonNegative ? 0 : 1);
14396 else
14397 L.Width = std::min(a: L.Width - log2, b: MaxWidth);
14398 return L;
14399 }
14400
14401 // Otherwise, just use the LHS's width.
14402 // FIXME: This is wrong if the LHS could be its minimal value and the RHS
14403 // could be -1.
14404 IntRange R = GetExprRange(C, E: BO->getRHS(), MaxWidth: opWidth, InConstantContext,
14405 Approximate);
14406 return IntRange(L.Width, L.NonNegative && R.NonNegative);
14407 }
14408
14409 case BO_Rem:
14410 Combine = IntRange::rem;
14411 break;
14412
14413 // The default behavior is okay for these.
14414 case BO_Xor:
14415 case BO_Or:
14416 break;
14417 }
14418
14419 // Combine the two ranges, but limit the result to the type in which we
14420 // performed the computation.
14421 QualType T = GetExprType(E);
14422 unsigned opWidth = C.getIntWidth(T);
14423 IntRange L =
14424 GetExprRange(C, E: BO->getLHS(), MaxWidth: opWidth, InConstantContext, Approximate);
14425 IntRange R =
14426 GetExprRange(C, E: BO->getRHS(), MaxWidth: opWidth, InConstantContext, Approximate);
14427 IntRange C = Combine(L, R);
14428 C.NonNegative |= T->isUnsignedIntegerOrEnumerationType();
14429 C.Width = std::min(a: C.Width, b: MaxWidth);
14430 return C;
14431 }
14432
14433 if (const auto *UO = dyn_cast<UnaryOperator>(Val: E)) {
14434 switch (UO->getOpcode()) {
14435 // Boolean-valued operations are white-listed.
14436 case UO_LNot:
14437 return IntRange::forBoolType();
14438
14439 // Operations with opaque sources are black-listed.
14440 case UO_Deref:
14441 case UO_AddrOf: // should be impossible
14442 return IntRange::forValueOfType(C, T: GetExprType(E));
14443
14444 default:
14445 return GetExprRange(C, E: UO->getSubExpr(), MaxWidth, InConstantContext,
14446 Approximate);
14447 }
14448 }
14449
14450 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(Val: E))
14451 return GetExprRange(C, E: OVE->getSourceExpr(), MaxWidth, InConstantContext,
14452 Approximate);
14453
14454 if (const auto *BitField = E->getSourceBitField())
14455 return IntRange(BitField->getBitWidthValue(Ctx: C),
14456 BitField->getType()->isUnsignedIntegerOrEnumerationType());
14457
14458 return IntRange::forValueOfType(C, T: GetExprType(E));
14459}
14460
14461static IntRange GetExprRange(ASTContext &C, const Expr *E,
14462 bool InConstantContext, bool Approximate) {
14463 return GetExprRange(C, E, MaxWidth: C.getIntWidth(T: GetExprType(E)), InConstantContext,
14464 Approximate);
14465}
14466
14467/// Checks whether the given value, which currently has the given
14468/// source semantics, has the same value when coerced through the
14469/// target semantics.
14470static bool IsSameFloatAfterCast(const llvm::APFloat &value,
14471 const llvm::fltSemantics &Src,
14472 const llvm::fltSemantics &Tgt) {
14473 llvm::APFloat truncated = value;
14474
14475 bool ignored;
14476 truncated.convert(ToSemantics: Src, RM: llvm::APFloat::rmNearestTiesToEven, losesInfo: &ignored);
14477 truncated.convert(ToSemantics: Tgt, RM: llvm::APFloat::rmNearestTiesToEven, losesInfo: &ignored);
14478
14479 return truncated.bitwiseIsEqual(RHS: value);
14480}
14481
14482/// Checks whether the given value, which currently has the given
14483/// source semantics, has the same value when coerced through the
14484/// target semantics.
14485///
14486/// The value might be a vector of floats (or a complex number).
14487static bool IsSameFloatAfterCast(const APValue &value,
14488 const llvm::fltSemantics &Src,
14489 const llvm::fltSemantics &Tgt) {
14490 if (value.isFloat())
14491 return IsSameFloatAfterCast(value: value.getFloat(), Src, Tgt);
14492
14493 if (value.isVector()) {
14494 for (unsigned i = 0, e = value.getVectorLength(); i != e; ++i)
14495 if (!IsSameFloatAfterCast(value: value.getVectorElt(I: i), Src, Tgt))
14496 return false;
14497 return true;
14498 }
14499
14500 assert(value.isComplexFloat());
14501 return (IsSameFloatAfterCast(value: value.getComplexFloatReal(), Src, Tgt) &&
14502 IsSameFloatAfterCast(value: value.getComplexFloatImag(), Src, Tgt));
14503}
14504
14505static void AnalyzeImplicitConversions(Sema &S, Expr *E, SourceLocation CC,
14506 bool IsListInit = false);
14507
14508static bool IsEnumConstOrFromMacro(Sema &S, Expr *E) {
14509 // Suppress cases where we are comparing against an enum constant.
14510 if (const DeclRefExpr *DR =
14511 dyn_cast<DeclRefExpr>(Val: E->IgnoreParenImpCasts()))
14512 if (isa<EnumConstantDecl>(Val: DR->getDecl()))
14513 return true;
14514
14515 // Suppress cases where the value is expanded from a macro, unless that macro
14516 // is how a language represents a boolean literal. This is the case in both C
14517 // and Objective-C.
14518 SourceLocation BeginLoc = E->getBeginLoc();
14519 if (BeginLoc.isMacroID()) {
14520 StringRef MacroName = Lexer::getImmediateMacroName(
14521 Loc: BeginLoc, SM: S.getSourceManager(), LangOpts: S.getLangOpts());
14522 return MacroName != "YES" && MacroName != "NO" &&
14523 MacroName != "true" && MacroName != "false";
14524 }
14525
14526 return false;
14527}
14528
14529static bool isKnownToHaveUnsignedValue(Expr *E) {
14530 return E->getType()->isIntegerType() &&
14531 (!E->getType()->isSignedIntegerType() ||
14532 !E->IgnoreParenImpCasts()->getType()->isSignedIntegerType());
14533}
14534
14535namespace {
14536/// The promoted range of values of a type. In general this has the
14537/// following structure:
14538///
14539/// |-----------| . . . |-----------|
14540/// ^ ^ ^ ^
14541/// Min HoleMin HoleMax Max
14542///
14543/// ... where there is only a hole if a signed type is promoted to unsigned
14544/// (in which case Min and Max are the smallest and largest representable
14545/// values).
14546struct PromotedRange {
14547 // Min, or HoleMax if there is a hole.
14548 llvm::APSInt PromotedMin;
14549 // Max, or HoleMin if there is a hole.
14550 llvm::APSInt PromotedMax;
14551
14552 PromotedRange(IntRange R, unsigned BitWidth, bool Unsigned) {
14553 if (R.Width == 0)
14554 PromotedMin = PromotedMax = llvm::APSInt(BitWidth, Unsigned);
14555 else if (R.Width >= BitWidth && !Unsigned) {
14556 // Promotion made the type *narrower*. This happens when promoting
14557 // a < 32-bit unsigned / <= 32-bit signed bit-field to 'signed int'.
14558 // Treat all values of 'signed int' as being in range for now.
14559 PromotedMin = llvm::APSInt::getMinValue(numBits: BitWidth, Unsigned);
14560 PromotedMax = llvm::APSInt::getMaxValue(numBits: BitWidth, Unsigned);
14561 } else {
14562 PromotedMin = llvm::APSInt::getMinValue(numBits: R.Width, Unsigned: R.NonNegative)
14563 .extOrTrunc(width: BitWidth);
14564 PromotedMin.setIsUnsigned(Unsigned);
14565
14566 PromotedMax = llvm::APSInt::getMaxValue(numBits: R.Width, Unsigned: R.NonNegative)
14567 .extOrTrunc(width: BitWidth);
14568 PromotedMax.setIsUnsigned(Unsigned);
14569 }
14570 }
14571
14572 // Determine whether this range is contiguous (has no hole).
14573 bool isContiguous() const { return PromotedMin <= PromotedMax; }
14574
14575 // Where a constant value is within the range.
14576 enum ComparisonResult {
14577 LT = 0x1,
14578 LE = 0x2,
14579 GT = 0x4,
14580 GE = 0x8,
14581 EQ = 0x10,
14582 NE = 0x20,
14583 InRangeFlag = 0x40,
14584
14585 Less = LE | LT | NE,
14586 Min = LE | InRangeFlag,
14587 InRange = InRangeFlag,
14588 Max = GE | InRangeFlag,
14589 Greater = GE | GT | NE,
14590
14591 OnlyValue = LE | GE | EQ | InRangeFlag,
14592 InHole = NE
14593 };
14594
14595 ComparisonResult compare(const llvm::APSInt &Value) const {
14596 assert(Value.getBitWidth() == PromotedMin.getBitWidth() &&
14597 Value.isUnsigned() == PromotedMin.isUnsigned());
14598 if (!isContiguous()) {
14599 assert(Value.isUnsigned() && "discontiguous range for signed compare");
14600 if (Value.isMinValue()) return Min;
14601 if (Value.isMaxValue()) return Max;
14602 if (Value >= PromotedMin) return InRange;
14603 if (Value <= PromotedMax) return InRange;
14604 return InHole;
14605 }
14606
14607 switch (llvm::APSInt::compareValues(I1: Value, I2: PromotedMin)) {
14608 case -1: return Less;
14609 case 0: return PromotedMin == PromotedMax ? OnlyValue : Min;
14610 case 1:
14611 switch (llvm::APSInt::compareValues(I1: Value, I2: PromotedMax)) {
14612 case -1: return InRange;
14613 case 0: return Max;
14614 case 1: return Greater;
14615 }
14616 }
14617
14618 llvm_unreachable("impossible compare result");
14619 }
14620
14621 static std::optional<StringRef>
14622 constantValue(BinaryOperatorKind Op, ComparisonResult R, bool ConstantOnRHS) {
14623 if (Op == BO_Cmp) {
14624 ComparisonResult LTFlag = LT, GTFlag = GT;
14625 if (ConstantOnRHS) std::swap(a&: LTFlag, b&: GTFlag);
14626
14627 if (R & EQ) return StringRef("'std::strong_ordering::equal'");
14628 if (R & LTFlag) return StringRef("'std::strong_ordering::less'");
14629 if (R & GTFlag) return StringRef("'std::strong_ordering::greater'");
14630 return std::nullopt;
14631 }
14632
14633 ComparisonResult TrueFlag, FalseFlag;
14634 if (Op == BO_EQ) {
14635 TrueFlag = EQ;
14636 FalseFlag = NE;
14637 } else if (Op == BO_NE) {
14638 TrueFlag = NE;
14639 FalseFlag = EQ;
14640 } else {
14641 if ((Op == BO_LT || Op == BO_GE) ^ ConstantOnRHS) {
14642 TrueFlag = LT;
14643 FalseFlag = GE;
14644 } else {
14645 TrueFlag = GT;
14646 FalseFlag = LE;
14647 }
14648 if (Op == BO_GE || Op == BO_LE)
14649 std::swap(a&: TrueFlag, b&: FalseFlag);
14650 }
14651 if (R & TrueFlag)
14652 return StringRef("true");
14653 if (R & FalseFlag)
14654 return StringRef("false");
14655 return std::nullopt;
14656 }
14657};
14658}
14659
14660static bool HasEnumType(Expr *E) {
14661 // Strip off implicit integral promotions.
14662 while (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Val: E)) {
14663 if (ICE->getCastKind() != CK_IntegralCast &&
14664 ICE->getCastKind() != CK_NoOp)
14665 break;
14666 E = ICE->getSubExpr();
14667 }
14668
14669 return E->getType()->isEnumeralType();
14670}
14671
14672static int classifyConstantValue(Expr *Constant) {
14673 // The values of this enumeration are used in the diagnostics
14674 // diag::warn_out_of_range_compare and diag::warn_tautological_bool_compare.
14675 enum ConstantValueKind {
14676 Miscellaneous = 0,
14677 LiteralTrue,
14678 LiteralFalse
14679 };
14680 if (auto *BL = dyn_cast<CXXBoolLiteralExpr>(Val: Constant))
14681 return BL->getValue() ? ConstantValueKind::LiteralTrue
14682 : ConstantValueKind::LiteralFalse;
14683 return ConstantValueKind::Miscellaneous;
14684}
14685
14686static bool CheckTautologicalComparison(Sema &S, BinaryOperator *E,
14687 Expr *Constant, Expr *Other,
14688 const llvm::APSInt &Value,
14689 bool RhsConstant) {
14690 if (S.inTemplateInstantiation())
14691 return false;
14692
14693 Expr *OriginalOther = Other;
14694
14695 Constant = Constant->IgnoreParenImpCasts();
14696 Other = Other->IgnoreParenImpCasts();
14697
14698 // Suppress warnings on tautological comparisons between values of the same
14699 // enumeration type. There are only two ways we could warn on this:
14700 // - If the constant is outside the range of representable values of
14701 // the enumeration. In such a case, we should warn about the cast
14702 // to enumeration type, not about the comparison.
14703 // - If the constant is the maximum / minimum in-range value. For an
14704 // enumeratin type, such comparisons can be meaningful and useful.
14705 if (Constant->getType()->isEnumeralType() &&
14706 S.Context.hasSameUnqualifiedType(T1: Constant->getType(), T2: Other->getType()))
14707 return false;
14708
14709 IntRange OtherValueRange = GetExprRange(
14710 C&: S.Context, E: Other, InConstantContext: S.isConstantEvaluatedContext(), /*Approximate=*/false);
14711
14712 QualType OtherT = Other->getType();
14713 if (const auto *AT = OtherT->getAs<AtomicType>())
14714 OtherT = AT->getValueType();
14715 IntRange OtherTypeRange = IntRange::forValueOfType(C&: S.Context, T: OtherT);
14716
14717 // Special case for ObjC BOOL on targets where its a typedef for a signed char
14718 // (Namely, macOS). FIXME: IntRange::forValueOfType should do this.
14719 bool IsObjCSignedCharBool = S.getLangOpts().ObjC &&
14720 S.NSAPIObj->isObjCBOOLType(T: OtherT) &&
14721 OtherT->isSpecificBuiltinType(K: BuiltinType::SChar);
14722
14723 // Whether we're treating Other as being a bool because of the form of
14724 // expression despite it having another type (typically 'int' in C).
14725 bool OtherIsBooleanDespiteType =
14726 !OtherT->isBooleanType() && Other->isKnownToHaveBooleanValue();
14727 if (OtherIsBooleanDespiteType || IsObjCSignedCharBool)
14728 OtherTypeRange = OtherValueRange = IntRange::forBoolType();
14729
14730 // Check if all values in the range of possible values of this expression
14731 // lead to the same comparison outcome.
14732 PromotedRange OtherPromotedValueRange(OtherValueRange, Value.getBitWidth(),
14733 Value.isUnsigned());
14734 auto Cmp = OtherPromotedValueRange.compare(Value);
14735 auto Result = PromotedRange::constantValue(Op: E->getOpcode(), R: Cmp, ConstantOnRHS: RhsConstant);
14736 if (!Result)
14737 return false;
14738
14739 // Also consider the range determined by the type alone. This allows us to
14740 // classify the warning under the proper diagnostic group.
14741 bool TautologicalTypeCompare = false;
14742 {
14743 PromotedRange OtherPromotedTypeRange(OtherTypeRange, Value.getBitWidth(),
14744 Value.isUnsigned());
14745 auto TypeCmp = OtherPromotedTypeRange.compare(Value);
14746 if (auto TypeResult = PromotedRange::constantValue(Op: E->getOpcode(), R: TypeCmp,
14747 ConstantOnRHS: RhsConstant)) {
14748 TautologicalTypeCompare = true;
14749 Cmp = TypeCmp;
14750 Result = TypeResult;
14751 }
14752 }
14753
14754 // Don't warn if the non-constant operand actually always evaluates to the
14755 // same value.
14756 if (!TautologicalTypeCompare && OtherValueRange.Width == 0)
14757 return false;
14758
14759 // Suppress the diagnostic for an in-range comparison if the constant comes
14760 // from a macro or enumerator. We don't want to diagnose
14761 //
14762 // some_long_value <= INT_MAX
14763 //
14764 // when sizeof(int) == sizeof(long).
14765 bool InRange = Cmp & PromotedRange::InRangeFlag;
14766 if (InRange && IsEnumConstOrFromMacro(S, E: Constant))
14767 return false;
14768
14769 // A comparison of an unsigned bit-field against 0 is really a type problem,
14770 // even though at the type level the bit-field might promote to 'signed int'.
14771 if (Other->refersToBitField() && InRange && Value == 0 &&
14772 Other->getType()->isUnsignedIntegerOrEnumerationType())
14773 TautologicalTypeCompare = true;
14774
14775 // If this is a comparison to an enum constant, include that
14776 // constant in the diagnostic.
14777 const EnumConstantDecl *ED = nullptr;
14778 if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Val: Constant))
14779 ED = dyn_cast<EnumConstantDecl>(Val: DR->getDecl());
14780
14781 // Should be enough for uint128 (39 decimal digits)
14782 SmallString<64> PrettySourceValue;
14783 llvm::raw_svector_ostream OS(PrettySourceValue);
14784 if (ED) {
14785 OS << '\'' << *ED << "' (" << Value << ")";
14786 } else if (auto *BL = dyn_cast<ObjCBoolLiteralExpr>(
14787 Val: Constant->IgnoreParenImpCasts())) {
14788 OS << (BL->getValue() ? "YES" : "NO");
14789 } else {
14790 OS << Value;
14791 }
14792
14793 if (!TautologicalTypeCompare) {
14794 S.Diag(E->getOperatorLoc(), diag::warn_tautological_compare_value_range)
14795 << RhsConstant << OtherValueRange.Width << OtherValueRange.NonNegative
14796 << E->getOpcodeStr() << OS.str() << *Result
14797 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange();
14798 return true;
14799 }
14800
14801 if (IsObjCSignedCharBool) {
14802 S.DiagRuntimeBehavior(E->getOperatorLoc(), E,
14803 S.PDiag(diag::warn_tautological_compare_objc_bool)
14804 << OS.str() << *Result);
14805 return true;
14806 }
14807
14808 // FIXME: We use a somewhat different formatting for the in-range cases and
14809 // cases involving boolean values for historical reasons. We should pick a
14810 // consistent way of presenting these diagnostics.
14811 if (!InRange || Other->isKnownToHaveBooleanValue()) {
14812
14813 S.DiagRuntimeBehavior(
14814 E->getOperatorLoc(), E,
14815 S.PDiag(!InRange ? diag::warn_out_of_range_compare
14816 : diag::warn_tautological_bool_compare)
14817 << OS.str() << classifyConstantValue(Constant) << OtherT
14818 << OtherIsBooleanDespiteType << *Result
14819 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange());
14820 } else {
14821 bool IsCharTy = OtherT.withoutLocalFastQualifiers() == S.Context.CharTy;
14822 unsigned Diag =
14823 (isKnownToHaveUnsignedValue(OriginalOther) && Value == 0)
14824 ? (HasEnumType(OriginalOther)
14825 ? diag::warn_unsigned_enum_always_true_comparison
14826 : IsCharTy ? diag::warn_unsigned_char_always_true_comparison
14827 : diag::warn_unsigned_always_true_comparison)
14828 : diag::warn_tautological_constant_compare;
14829
14830 S.Diag(Loc: E->getOperatorLoc(), DiagID: Diag)
14831 << RhsConstant << OtherT << E->getOpcodeStr() << OS.str() << *Result
14832 << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange();
14833 }
14834
14835 return true;
14836}
14837
14838/// Analyze the operands of the given comparison. Implements the
14839/// fallback case from AnalyzeComparison.
14840static void AnalyzeImpConvsInComparison(Sema &S, BinaryOperator *E) {
14841 AnalyzeImplicitConversions(S, E: E->getLHS(), CC: E->getOperatorLoc());
14842 AnalyzeImplicitConversions(S, E: E->getRHS(), CC: E->getOperatorLoc());
14843}
14844
14845/// Implements -Wsign-compare.
14846///
14847/// \param E the binary operator to check for warnings
14848static void AnalyzeComparison(Sema &S, BinaryOperator *E) {
14849 // The type the comparison is being performed in.
14850 QualType T = E->getLHS()->getType();
14851
14852 // Only analyze comparison operators where both sides have been converted to
14853 // the same type.
14854 if (!S.Context.hasSameUnqualifiedType(T1: T, T2: E->getRHS()->getType()))
14855 return AnalyzeImpConvsInComparison(S, E);
14856
14857 // Don't analyze value-dependent comparisons directly.
14858 if (E->isValueDependent())
14859 return AnalyzeImpConvsInComparison(S, E);
14860
14861 Expr *LHS = E->getLHS();
14862 Expr *RHS = E->getRHS();
14863
14864 if (T->isIntegralType(Ctx: S.Context)) {
14865 std::optional<llvm::APSInt> RHSValue =
14866 RHS->getIntegerConstantExpr(Ctx: S.Context);
14867 std::optional<llvm::APSInt> LHSValue =
14868 LHS->getIntegerConstantExpr(Ctx: S.Context);
14869
14870 // We don't care about expressions whose result is a constant.
14871 if (RHSValue && LHSValue)
14872 return AnalyzeImpConvsInComparison(S, E);
14873
14874 // We only care about expressions where just one side is literal
14875 if ((bool)RHSValue ^ (bool)LHSValue) {
14876 // Is the constant on the RHS or LHS?
14877 const bool RhsConstant = (bool)RHSValue;
14878 Expr *Const = RhsConstant ? RHS : LHS;
14879 Expr *Other = RhsConstant ? LHS : RHS;
14880 const llvm::APSInt &Value = RhsConstant ? *RHSValue : *LHSValue;
14881
14882 // Check whether an integer constant comparison results in a value
14883 // of 'true' or 'false'.
14884 if (CheckTautologicalComparison(S, E, Constant: Const, Other, Value, RhsConstant))
14885 return AnalyzeImpConvsInComparison(S, E);
14886 }
14887 }
14888
14889 if (!T->hasUnsignedIntegerRepresentation()) {
14890 // We don't do anything special if this isn't an unsigned integral
14891 // comparison: we're only interested in integral comparisons, and
14892 // signed comparisons only happen in cases we don't care to warn about.
14893 return AnalyzeImpConvsInComparison(S, E);
14894 }
14895
14896 LHS = LHS->IgnoreParenImpCasts();
14897 RHS = RHS->IgnoreParenImpCasts();
14898
14899 if (!S.getLangOpts().CPlusPlus) {
14900 // Avoid warning about comparison of integers with different signs when
14901 // RHS/LHS has a `typeof(E)` type whose sign is different from the sign of
14902 // the type of `E`.
14903 if (const auto *TET = dyn_cast<TypeOfExprType>(Val: LHS->getType()))
14904 LHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts();
14905 if (const auto *TET = dyn_cast<TypeOfExprType>(Val: RHS->getType()))
14906 RHS = TET->getUnderlyingExpr()->IgnoreParenImpCasts();
14907 }
14908
14909 // Check to see if one of the (unmodified) operands is of different
14910 // signedness.
14911 Expr *signedOperand, *unsignedOperand;
14912 if (LHS->getType()->hasSignedIntegerRepresentation()) {
14913 assert(!RHS->getType()->hasSignedIntegerRepresentation() &&
14914 "unsigned comparison between two signed integer expressions?");
14915 signedOperand = LHS;
14916 unsignedOperand = RHS;
14917 } else if (RHS->getType()->hasSignedIntegerRepresentation()) {
14918 signedOperand = RHS;
14919 unsignedOperand = LHS;
14920 } else {
14921 return AnalyzeImpConvsInComparison(S, E);
14922 }
14923
14924 // Otherwise, calculate the effective range of the signed operand.
14925 IntRange signedRange =
14926 GetExprRange(C&: S.Context, E: signedOperand, InConstantContext: S.isConstantEvaluatedContext(),
14927 /*Approximate=*/true);
14928
14929 // Go ahead and analyze implicit conversions in the operands. Note
14930 // that we skip the implicit conversions on both sides.
14931 AnalyzeImplicitConversions(S, E: LHS, CC: E->getOperatorLoc());
14932 AnalyzeImplicitConversions(S, E: RHS, CC: E->getOperatorLoc());
14933
14934 // If the signed range is non-negative, -Wsign-compare won't fire.
14935 if (signedRange.NonNegative)
14936 return;
14937
14938 // For (in)equality comparisons, if the unsigned operand is a
14939 // constant which cannot collide with a overflowed signed operand,
14940 // then reinterpreting the signed operand as unsigned will not
14941 // change the result of the comparison.
14942 if (E->isEqualityOp()) {
14943 unsigned comparisonWidth = S.Context.getIntWidth(T);
14944 IntRange unsignedRange =
14945 GetExprRange(C&: S.Context, E: unsignedOperand, InConstantContext: S.isConstantEvaluatedContext(),
14946 /*Approximate=*/true);
14947
14948 // We should never be unable to prove that the unsigned operand is
14949 // non-negative.
14950 assert(unsignedRange.NonNegative && "unsigned range includes negative?");
14951
14952 if (unsignedRange.Width < comparisonWidth)
14953 return;
14954 }
14955
14956 S.DiagRuntimeBehavior(E->getOperatorLoc(), E,
14957 S.PDiag(diag::warn_mixed_sign_comparison)
14958 << LHS->getType() << RHS->getType()
14959 << LHS->getSourceRange() << RHS->getSourceRange());
14960}
14961
14962/// Analyzes an attempt to assign the given value to a bitfield.
14963///
14964/// Returns true if there was something fishy about the attempt.
14965static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init,
14966 SourceLocation InitLoc) {
14967 assert(Bitfield->isBitField());
14968 if (Bitfield->isInvalidDecl())
14969 return false;
14970
14971 // White-list bool bitfields.
14972 QualType BitfieldType = Bitfield->getType();
14973 if (BitfieldType->isBooleanType())
14974 return false;
14975
14976 if (BitfieldType->isEnumeralType()) {
14977 EnumDecl *BitfieldEnumDecl = BitfieldType->castAs<EnumType>()->getDecl();
14978 // If the underlying enum type was not explicitly specified as an unsigned
14979 // type and the enum contain only positive values, MSVC++ will cause an
14980 // inconsistency by storing this as a signed type.
14981 if (S.getLangOpts().CPlusPlus11 &&
14982 !BitfieldEnumDecl->getIntegerTypeSourceInfo() &&
14983 BitfieldEnumDecl->getNumPositiveBits() > 0 &&
14984 BitfieldEnumDecl->getNumNegativeBits() == 0) {
14985 S.Diag(InitLoc, diag::warn_no_underlying_type_specified_for_enum_bitfield)
14986 << BitfieldEnumDecl;
14987 }
14988 }
14989
14990 // Ignore value- or type-dependent expressions.
14991 if (Bitfield->getBitWidth()->isValueDependent() ||
14992 Bitfield->getBitWidth()->isTypeDependent() ||
14993 Init->isValueDependent() ||
14994 Init->isTypeDependent())
14995 return false;
14996
14997 Expr *OriginalInit = Init->IgnoreParenImpCasts();
14998 unsigned FieldWidth = Bitfield->getBitWidthValue(Ctx: S.Context);
14999
15000 Expr::EvalResult Result;
15001 if (!OriginalInit->EvaluateAsInt(Result, Ctx: S.Context,
15002 AllowSideEffects: Expr::SE_AllowSideEffects)) {
15003 // The RHS is not constant. If the RHS has an enum type, make sure the
15004 // bitfield is wide enough to hold all the values of the enum without
15005 // truncation.
15006 if (const auto *EnumTy = OriginalInit->getType()->getAs<EnumType>()) {
15007 EnumDecl *ED = EnumTy->getDecl();
15008 bool SignedBitfield = BitfieldType->isSignedIntegerType();
15009
15010 // Enum types are implicitly signed on Windows, so check if there are any
15011 // negative enumerators to see if the enum was intended to be signed or
15012 // not.
15013 bool SignedEnum = ED->getNumNegativeBits() > 0;
15014
15015 // Check for surprising sign changes when assigning enum values to a
15016 // bitfield of different signedness. If the bitfield is signed and we
15017 // have exactly the right number of bits to store this unsigned enum,
15018 // suggest changing the enum to an unsigned type. This typically happens
15019 // on Windows where unfixed enums always use an underlying type of 'int'.
15020 unsigned DiagID = 0;
15021 if (SignedEnum && !SignedBitfield) {
15022 DiagID = diag::warn_unsigned_bitfield_assigned_signed_enum;
15023 } else if (SignedBitfield && !SignedEnum &&
15024 ED->getNumPositiveBits() == FieldWidth) {
15025 DiagID = diag::warn_signed_bitfield_enum_conversion;
15026 }
15027
15028 if (DiagID) {
15029 S.Diag(Loc: InitLoc, DiagID) << Bitfield << ED;
15030 TypeSourceInfo *TSI = Bitfield->getTypeSourceInfo();
15031 SourceRange TypeRange =
15032 TSI ? TSI->getTypeLoc().getSourceRange() : SourceRange();
15033 S.Diag(Bitfield->getTypeSpecStartLoc(), diag::note_change_bitfield_sign)
15034 << SignedEnum << TypeRange;
15035 }
15036
15037 // Compute the required bitwidth. If the enum has negative values, we need
15038 // one more bit than the normal number of positive bits to represent the
15039 // sign bit.
15040 unsigned BitsNeeded = SignedEnum ? std::max(a: ED->getNumPositiveBits() + 1,
15041 b: ED->getNumNegativeBits())
15042 : ED->getNumPositiveBits();
15043
15044 // Check the bitwidth.
15045 if (BitsNeeded > FieldWidth) {
15046 Expr *WidthExpr = Bitfield->getBitWidth();
15047 S.Diag(InitLoc, diag::warn_bitfield_too_small_for_enum)
15048 << Bitfield << ED;
15049 S.Diag(WidthExpr->getExprLoc(), diag::note_widen_bitfield)
15050 << BitsNeeded << ED << WidthExpr->getSourceRange();
15051 }
15052 }
15053
15054 return false;
15055 }
15056
15057 llvm::APSInt Value = Result.Val.getInt();
15058
15059 unsigned OriginalWidth = Value.getBitWidth();
15060
15061 // In C, the macro 'true' from stdbool.h will evaluate to '1'; To reduce
15062 // false positives where the user is demonstrating they intend to use the
15063 // bit-field as a Boolean, check to see if the value is 1 and we're assigning
15064 // to a one-bit bit-field to see if the value came from a macro named 'true'.
15065 bool OneAssignedToOneBitBitfield = FieldWidth == 1 && Value == 1;
15066 if (OneAssignedToOneBitBitfield && !S.LangOpts.CPlusPlus) {
15067 SourceLocation MaybeMacroLoc = OriginalInit->getBeginLoc();
15068 if (S.SourceMgr.isInSystemMacro(loc: MaybeMacroLoc) &&
15069 S.findMacroSpelling(loc&: MaybeMacroLoc, name: "true"))
15070 return false;
15071 }
15072
15073 if (!Value.isSigned() || Value.isNegative())
15074 if (UnaryOperator *UO = dyn_cast<UnaryOperator>(Val: OriginalInit))
15075 if (UO->getOpcode() == UO_Minus || UO->getOpcode() == UO_Not)
15076 OriginalWidth = Value.getSignificantBits();
15077
15078 if (OriginalWidth <= FieldWidth)
15079 return false;
15080
15081 // Compute the value which the bitfield will contain.
15082 llvm::APSInt TruncatedValue = Value.trunc(width: FieldWidth);
15083 TruncatedValue.setIsSigned(BitfieldType->isSignedIntegerType());
15084
15085 // Check whether the stored value is equal to the original value.
15086 TruncatedValue = TruncatedValue.extend(width: OriginalWidth);
15087 if (llvm::APSInt::isSameValue(I1: Value, I2: TruncatedValue))
15088 return false;
15089
15090 std::string PrettyValue = toString(I: Value, Radix: 10);
15091 std::string PrettyTrunc = toString(I: TruncatedValue, Radix: 10);
15092
15093 S.Diag(InitLoc, OneAssignedToOneBitBitfield
15094 ? diag::warn_impcast_single_bit_bitield_precision_constant
15095 : diag::warn_impcast_bitfield_precision_constant)
15096 << PrettyValue << PrettyTrunc << OriginalInit->getType()
15097 << Init->getSourceRange();
15098
15099 return true;
15100}
15101
15102/// Analyze the given simple or compound assignment for warning-worthy
15103/// operations.
15104static void AnalyzeAssignment(Sema &S, BinaryOperator *E) {
15105 // Just recurse on the LHS.
15106 AnalyzeImplicitConversions(S, E: E->getLHS(), CC: E->getOperatorLoc());
15107
15108 // We want to recurse on the RHS as normal unless we're assigning to
15109 // a bitfield.
15110 if (FieldDecl *Bitfield = E->getLHS()->getSourceBitField()) {
15111 if (AnalyzeBitFieldAssignment(S, Bitfield, Init: E->getRHS(),
15112 InitLoc: E->getOperatorLoc())) {
15113 // Recurse, ignoring any implicit conversions on the RHS.
15114 return AnalyzeImplicitConversions(S, E: E->getRHS()->IgnoreParenImpCasts(),
15115 CC: E->getOperatorLoc());
15116 }
15117 }
15118
15119 AnalyzeImplicitConversions(S, E: E->getRHS(), CC: E->getOperatorLoc());
15120
15121 // Diagnose implicitly sequentially-consistent atomic assignment.
15122 if (E->getLHS()->getType()->isAtomicType())
15123 S.Diag(E->getRHS()->getBeginLoc(), diag::warn_atomic_implicit_seq_cst);
15124}
15125
15126/// Diagnose an implicit cast; purely a helper for CheckImplicitConversion.
15127static void DiagnoseImpCast(Sema &S, Expr *E, QualType SourceType, QualType T,
15128 SourceLocation CContext, unsigned diag,
15129 bool pruneControlFlow = false) {
15130 if (pruneControlFlow) {
15131 S.DiagRuntimeBehavior(E->getExprLoc(), E,
15132 S.PDiag(DiagID: diag)
15133 << SourceType << T << E->getSourceRange()
15134 << SourceRange(CContext));
15135 return;
15136 }
15137 S.Diag(Loc: E->getExprLoc(), DiagID: diag)
15138 << SourceType << T << E->getSourceRange() << SourceRange(CContext);
15139}
15140
15141/// Diagnose an implicit cast; purely a helper for CheckImplicitConversion.
15142static void DiagnoseImpCast(Sema &S, Expr *E, QualType T,
15143 SourceLocation CContext,
15144 unsigned diag, bool pruneControlFlow = false) {
15145 DiagnoseImpCast(S, E, SourceType: E->getType(), T, CContext, diag, pruneControlFlow);
15146}
15147
15148static bool isObjCSignedCharBool(Sema &S, QualType Ty) {
15149 return Ty->isSpecificBuiltinType(K: BuiltinType::SChar) &&
15150 S.getLangOpts().ObjC && S.NSAPIObj->isObjCBOOLType(T: Ty);
15151}
15152
15153static void adornObjCBoolConversionDiagWithTernaryFixit(
15154 Sema &S, Expr *SourceExpr, const Sema::SemaDiagnosticBuilder &Builder) {
15155 Expr *Ignored = SourceExpr->IgnoreImplicit();
15156 if (const auto *OVE = dyn_cast<OpaqueValueExpr>(Val: Ignored))
15157 Ignored = OVE->getSourceExpr();
15158 bool NeedsParens = isa<AbstractConditionalOperator>(Val: Ignored) ||
15159 isa<BinaryOperator>(Val: Ignored) ||
15160 isa<CXXOperatorCallExpr>(Val: Ignored);
15161 SourceLocation EndLoc = S.getLocForEndOfToken(Loc: SourceExpr->getEndLoc());
15162 if (NeedsParens)
15163 Builder << FixItHint::CreateInsertion(InsertionLoc: SourceExpr->getBeginLoc(), Code: "(")
15164 << FixItHint::CreateInsertion(InsertionLoc: EndLoc, Code: ")");
15165 Builder << FixItHint::CreateInsertion(InsertionLoc: EndLoc, Code: " ? YES : NO");
15166}
15167
15168/// Diagnose an implicit cast from a floating point value to an integer value.
15169static void DiagnoseFloatingImpCast(Sema &S, Expr *E, QualType T,
15170 SourceLocation CContext) {
15171 const bool IsBool = T->isSpecificBuiltinType(K: BuiltinType::Bool);
15172 const bool PruneWarnings = S.inTemplateInstantiation();
15173
15174 Expr *InnerE = E->IgnoreParenImpCasts();
15175 // We also want to warn on, e.g., "int i = -1.234"
15176 if (UnaryOperator *UOp = dyn_cast<UnaryOperator>(Val: InnerE))
15177 if (UOp->getOpcode() == UO_Minus || UOp->getOpcode() == UO_Plus)
15178 InnerE = UOp->getSubExpr()->IgnoreParenImpCasts();
15179
15180 const bool IsLiteral =
15181 isa<FloatingLiteral>(Val: E) || isa<FloatingLiteral>(Val: InnerE);
15182
15183 llvm::APFloat Value(0.0);
15184 bool IsConstant =
15185 E->EvaluateAsFloat(Result&: Value, Ctx: S.Context, AllowSideEffects: Expr::SE_AllowSideEffects);
15186 if (!IsConstant) {
15187 if (isObjCSignedCharBool(S, Ty: T)) {
15188 return adornObjCBoolConversionDiagWithTernaryFixit(
15189 S, E,
15190 S.Diag(CContext, diag::warn_impcast_float_to_objc_signed_char_bool)
15191 << E->getType());
15192 }
15193
15194 return DiagnoseImpCast(S, E, T, CContext,
15195 diag::warn_impcast_float_integer, PruneWarnings);
15196 }
15197
15198 bool isExact = false;
15199
15200 llvm::APSInt IntegerValue(S.Context.getIntWidth(T),
15201 T->hasUnsignedIntegerRepresentation());
15202 llvm::APFloat::opStatus Result = Value.convertToInteger(
15203 Result&: IntegerValue, RM: llvm::APFloat::rmTowardZero, IsExact: &isExact);
15204
15205 // FIXME: Force the precision of the source value down so we don't print
15206 // digits which are usually useless (we don't really care here if we
15207 // truncate a digit by accident in edge cases). Ideally, APFloat::toString
15208 // would automatically print the shortest representation, but it's a bit
15209 // tricky to implement.
15210 SmallString<16> PrettySourceValue;
15211 unsigned precision = llvm::APFloat::semanticsPrecision(Value.getSemantics());
15212 precision = (precision * 59 + 195) / 196;
15213 Value.toString(Str&: PrettySourceValue, FormatPrecision: precision);
15214
15215 if (isObjCSignedCharBool(S, Ty: T) && IntegerValue != 0 && IntegerValue != 1) {
15216 return adornObjCBoolConversionDiagWithTernaryFixit(
15217 S, E,
15218 S.Diag(CContext, diag::warn_impcast_constant_value_to_objc_bool)
15219 << PrettySourceValue);
15220 }
15221
15222 if (Result == llvm::APFloat::opOK && isExact) {
15223 if (IsLiteral) return;
15224 return DiagnoseImpCast(S, E, T, CContext, diag::warn_impcast_float_integer,
15225 PruneWarnings);
15226 }
15227
15228 // Conversion of a floating-point value to a non-bool integer where the
15229 // integral part cannot be represented by the integer type is undefined.
15230 if (!IsBool && Result == llvm::APFloat::opInvalidOp)
15231 return DiagnoseImpCast(
15232 S, E, T, CContext,
15233 IsLiteral ? diag::warn_impcast_literal_float_to_integer_out_of_range
15234 : diag::warn_impcast_float_to_integer_out_of_range,
15235 PruneWarnings);
15236
15237 unsigned DiagID = 0;
15238 if (IsLiteral) {
15239 // Warn on floating point literal to integer.
15240 DiagID = diag::warn_impcast_literal_float_to_integer;
15241 } else if (IntegerValue == 0) {
15242 if (Value.isZero()) { // Skip -0.0 to 0 conversion.
15243 return DiagnoseImpCast(S, E, T, CContext,
15244 diag::warn_impcast_float_integer, PruneWarnings);
15245 }
15246 // Warn on non-zero to zero conversion.
15247 DiagID = diag::warn_impcast_float_to_integer_zero;
15248 } else {
15249 if (IntegerValue.isUnsigned()) {
15250 if (!IntegerValue.isMaxValue()) {
15251 return DiagnoseImpCast(S, E, T, CContext,
15252 diag::warn_impcast_float_integer, PruneWarnings);
15253 }
15254 } else { // IntegerValue.isSigned()
15255 if (!IntegerValue.isMaxSignedValue() &&
15256 !IntegerValue.isMinSignedValue()) {
15257 return DiagnoseImpCast(S, E, T, CContext,
15258 diag::warn_impcast_float_integer, PruneWarnings);
15259 }
15260 }
15261 // Warn on evaluatable floating point expression to integer conversion.
15262 DiagID = diag::warn_impcast_float_to_integer;
15263 }
15264
15265 SmallString<16> PrettyTargetValue;
15266 if (IsBool)
15267 PrettyTargetValue = Value.isZero() ? "false" : "true";
15268 else
15269 IntegerValue.toString(Str&: PrettyTargetValue);
15270
15271 if (PruneWarnings) {
15272 S.DiagRuntimeBehavior(E->getExprLoc(), E,
15273 S.PDiag(DiagID)
15274 << E->getType() << T.getUnqualifiedType()
15275 << PrettySourceValue << PrettyTargetValue
15276 << E->getSourceRange() << SourceRange(CContext));
15277 } else {
15278 S.Diag(Loc: E->getExprLoc(), DiagID)
15279 << E->getType() << T.getUnqualifiedType() << PrettySourceValue
15280 << PrettyTargetValue << E->getSourceRange() << SourceRange(CContext);
15281 }
15282}
15283
15284/// Analyze the given compound assignment for the possible losing of
15285/// floating-point precision.
15286static void AnalyzeCompoundAssignment(Sema &S, BinaryOperator *E) {
15287 assert(isa<CompoundAssignOperator>(E) &&
15288 "Must be compound assignment operation");
15289 // Recurse on the LHS and RHS in here
15290 AnalyzeImplicitConversions(S, E: E->getLHS(), CC: E->getOperatorLoc());
15291 AnalyzeImplicitConversions(S, E: E->getRHS(), CC: E->getOperatorLoc());
15292
15293 if (E->getLHS()->getType()->isAtomicType())
15294 S.Diag(E->getOperatorLoc(), diag::warn_atomic_implicit_seq_cst);
15295
15296 // Now check the outermost expression
15297 const auto *ResultBT = E->getLHS()->getType()->getAs<BuiltinType>();
15298 const auto *RBT = cast<CompoundAssignOperator>(Val: E)
15299 ->getComputationResultType()
15300 ->getAs<BuiltinType>();
15301
15302 // The below checks assume source is floating point.
15303 if (!ResultBT || !RBT || !RBT->isFloatingPoint()) return;
15304
15305 // If source is floating point but target is an integer.
15306 if (ResultBT->isInteger())
15307 return DiagnoseImpCast(S, E, E->getRHS()->getType(), E->getLHS()->getType(),
15308 E->getExprLoc(), diag::warn_impcast_float_integer);
15309
15310 if (!ResultBT->isFloatingPoint())
15311 return;
15312
15313 // If both source and target are floating points, warn about losing precision.
15314 int Order = S.getASTContext().getFloatingTypeSemanticOrder(
15315 LHS: QualType(ResultBT, 0), RHS: QualType(RBT, 0));
15316 if (Order < 0 && !S.SourceMgr.isInSystemMacro(E->getOperatorLoc()))
15317 // warn about dropping FP rank.
15318 DiagnoseImpCast(S, E->getRHS(), E->getLHS()->getType(), E->getOperatorLoc(),
15319 diag::warn_impcast_float_result_precision);
15320}
15321
15322static std::string PrettyPrintInRange(const llvm::APSInt &Value,
15323 IntRange Range) {
15324 if (!Range.Width) return "0";
15325
15326 llvm::APSInt ValueInRange = Value;
15327 ValueInRange.setIsSigned(!Range.NonNegative);
15328 ValueInRange = ValueInRange.trunc(width: Range.Width);
15329 return toString(I: ValueInRange, Radix: 10);
15330}
15331
15332static bool IsImplicitBoolFloatConversion(Sema &S, Expr *Ex, bool ToBool) {
15333 if (!isa<ImplicitCastExpr>(Val: Ex))
15334 return false;
15335
15336 Expr *InnerE = Ex->IgnoreParenImpCasts();
15337 const Type *Target = S.Context.getCanonicalType(T: Ex->getType()).getTypePtr();
15338 const Type *Source =
15339 S.Context.getCanonicalType(T: InnerE->getType()).getTypePtr();
15340 if (Target->isDependentType())
15341 return false;
15342
15343 const BuiltinType *FloatCandidateBT =
15344 dyn_cast<BuiltinType>(Val: ToBool ? Source : Target);
15345 const Type *BoolCandidateType = ToBool ? Target : Source;
15346
15347 return (BoolCandidateType->isSpecificBuiltinType(K: BuiltinType::Bool) &&
15348 FloatCandidateBT && (FloatCandidateBT->isFloatingPoint()));
15349}
15350
15351static void CheckImplicitArgumentConversions(Sema &S, CallExpr *TheCall,
15352 SourceLocation CC) {
15353 unsigned NumArgs = TheCall->getNumArgs();
15354 for (unsigned i = 0; i < NumArgs; ++i) {
15355 Expr *CurrA = TheCall->getArg(Arg: i);
15356 if (!IsImplicitBoolFloatConversion(S, Ex: CurrA, ToBool: true))
15357 continue;
15358
15359 bool IsSwapped = ((i > 0) &&
15360 IsImplicitBoolFloatConversion(S, Ex: TheCall->getArg(Arg: i - 1), ToBool: false));
15361 IsSwapped |= ((i < (NumArgs - 1)) &&
15362 IsImplicitBoolFloatConversion(S, Ex: TheCall->getArg(Arg: i + 1), ToBool: false));
15363 if (IsSwapped) {
15364 // Warn on this floating-point to bool conversion.
15365 DiagnoseImpCast(S, CurrA->IgnoreParenImpCasts(),
15366 CurrA->getType(), CC,
15367 diag::warn_impcast_floating_point_to_bool);
15368 }
15369 }
15370}
15371
15372static void DiagnoseNullConversion(Sema &S, Expr *E, QualType T,
15373 SourceLocation CC) {
15374 if (S.Diags.isIgnored(diag::warn_impcast_null_pointer_to_integer,
15375 E->getExprLoc()))
15376 return;
15377
15378 // Don't warn on functions which have return type nullptr_t.
15379 if (isa<CallExpr>(Val: E))
15380 return;
15381
15382 // Check for NULL (GNUNull) or nullptr (CXX11_nullptr).
15383 const Expr *NewE = E->IgnoreParenImpCasts();
15384 bool IsGNUNullExpr = isa<GNUNullExpr>(Val: NewE);
15385 bool HasNullPtrType = NewE->getType()->isNullPtrType();
15386 if (!IsGNUNullExpr && !HasNullPtrType)
15387 return;
15388
15389 // Return if target type is a safe conversion.
15390 if (T->isAnyPointerType() || T->isBlockPointerType() ||
15391 T->isMemberPointerType() || !T->isScalarType() || T->isNullPtrType())
15392 return;
15393
15394 SourceLocation Loc = E->getSourceRange().getBegin();
15395
15396 // Venture through the macro stacks to get to the source of macro arguments.
15397 // The new location is a better location than the complete location that was
15398 // passed in.
15399 Loc = S.SourceMgr.getTopMacroCallerLoc(Loc);
15400 CC = S.SourceMgr.getTopMacroCallerLoc(Loc: CC);
15401
15402 // __null is usually wrapped in a macro. Go up a macro if that is the case.
15403 if (IsGNUNullExpr && Loc.isMacroID()) {
15404 StringRef MacroName = Lexer::getImmediateMacroNameForDiagnostics(
15405 Loc, SM: S.SourceMgr, LangOpts: S.getLangOpts());
15406 if (MacroName == "NULL")
15407 Loc = S.SourceMgr.getImmediateExpansionRange(Loc).getBegin();
15408 }
15409
15410 // Only warn if the null and context location are in the same macro expansion.
15411 if (S.SourceMgr.getFileID(SpellingLoc: Loc) != S.SourceMgr.getFileID(SpellingLoc: CC))
15412 return;
15413
15414 S.Diag(Loc, diag::warn_impcast_null_pointer_to_integer)
15415 << HasNullPtrType << T << SourceRange(CC)
15416 << FixItHint::CreateReplacement(Loc,
15417 S.getFixItZeroLiteralForType(T, Loc));
15418}
15419
15420static void checkObjCArrayLiteral(Sema &S, QualType TargetType,
15421 ObjCArrayLiteral *ArrayLiteral);
15422
15423static void
15424checkObjCDictionaryLiteral(Sema &S, QualType TargetType,
15425 ObjCDictionaryLiteral *DictionaryLiteral);
15426
15427/// Check a single element within a collection literal against the
15428/// target element type.
15429static void checkObjCCollectionLiteralElement(Sema &S,
15430 QualType TargetElementType,
15431 Expr *Element,
15432 unsigned ElementKind) {
15433 // Skip a bitcast to 'id' or qualified 'id'.
15434 if (auto ICE = dyn_cast<ImplicitCastExpr>(Val: Element)) {
15435 if (ICE->getCastKind() == CK_BitCast &&
15436 ICE->getSubExpr()->getType()->getAs<ObjCObjectPointerType>())
15437 Element = ICE->getSubExpr();
15438 }
15439
15440 QualType ElementType = Element->getType();
15441 ExprResult ElementResult(Element);
15442 if (ElementType->getAs<ObjCObjectPointerType>() &&
15443 S.CheckSingleAssignmentConstraints(LHSType: TargetElementType,
15444 RHS&: ElementResult,
15445 Diagnose: false, DiagnoseCFAudited: false)
15446 != Sema::Compatible) {
15447 S.Diag(Element->getBeginLoc(), diag::warn_objc_collection_literal_element)
15448 << ElementType << ElementKind << TargetElementType
15449 << Element->getSourceRange();
15450 }
15451
15452 if (auto ArrayLiteral = dyn_cast<ObjCArrayLiteral>(Val: Element))
15453 checkObjCArrayLiteral(S, TargetType: TargetElementType, ArrayLiteral);
15454 else if (auto DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(Val: Element))
15455 checkObjCDictionaryLiteral(S, TargetType: TargetElementType, DictionaryLiteral);
15456}
15457
15458/// Check an Objective-C array literal being converted to the given
15459/// target type.
15460static void checkObjCArrayLiteral(Sema &S, QualType TargetType,
15461 ObjCArrayLiteral *ArrayLiteral) {
15462 if (!S.NSArrayDecl)
15463 return;
15464
15465 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>();
15466 if (!TargetObjCPtr)
15467 return;
15468
15469 if (TargetObjCPtr->isUnspecialized() ||
15470 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl()
15471 != S.NSArrayDecl->getCanonicalDecl())
15472 return;
15473
15474 auto TypeArgs = TargetObjCPtr->getTypeArgs();
15475 if (TypeArgs.size() != 1)
15476 return;
15477
15478 QualType TargetElementType = TypeArgs[0];
15479 for (unsigned I = 0, N = ArrayLiteral->getNumElements(); I != N; ++I) {
15480 checkObjCCollectionLiteralElement(S, TargetElementType,
15481 Element: ArrayLiteral->getElement(Index: I),
15482 ElementKind: 0);
15483 }
15484}
15485
15486/// Check an Objective-C dictionary literal being converted to the given
15487/// target type.
15488static void
15489checkObjCDictionaryLiteral(Sema &S, QualType TargetType,
15490 ObjCDictionaryLiteral *DictionaryLiteral) {
15491 if (!S.NSDictionaryDecl)
15492 return;
15493
15494 const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>();
15495 if (!TargetObjCPtr)
15496 return;
15497
15498 if (TargetObjCPtr->isUnspecialized() ||
15499 TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl()
15500 != S.NSDictionaryDecl->getCanonicalDecl())
15501 return;
15502
15503 auto TypeArgs = TargetObjCPtr->getTypeArgs();
15504 if (TypeArgs.size() != 2)
15505 return;
15506
15507 QualType TargetKeyType = TypeArgs[0];
15508 QualType TargetObjectType = TypeArgs[1];
15509 for (unsigned I = 0, N = DictionaryLiteral->getNumElements(); I != N; ++I) {
15510 auto Element = DictionaryLiteral->getKeyValueElement(Index: I);
15511 checkObjCCollectionLiteralElement(S, TargetElementType: TargetKeyType, Element: Element.Key, ElementKind: 1);
15512 checkObjCCollectionLiteralElement(S, TargetElementType: TargetObjectType, Element: Element.Value, ElementKind: 2);
15513 }
15514}
15515
15516// Helper function to filter out cases for constant width constant conversion.
15517// Don't warn on char array initialization or for non-decimal values.
15518static bool isSameWidthConstantConversion(Sema &S, Expr *E, QualType T,
15519 SourceLocation CC) {
15520 // If initializing from a constant, and the constant starts with '0',
15521 // then it is a binary, octal, or hexadecimal. Allow these constants
15522 // to fill all the bits, even if there is a sign change.
15523 if (auto *IntLit = dyn_cast<IntegerLiteral>(Val: E->IgnoreParenImpCasts())) {
15524 const char FirstLiteralCharacter =
15525 S.getSourceManager().getCharacterData(SL: IntLit->getBeginLoc())[0];
15526 if (FirstLiteralCharacter == '0')
15527 return false;
15528 }
15529
15530 // If the CC location points to a '{', and the type is char, then assume
15531 // assume it is an array initialization.
15532 if (CC.isValid() && T->isCharType()) {
15533 const char FirstContextCharacter =
15534 S.getSourceManager().getCharacterData(SL: CC)[0];
15535 if (FirstContextCharacter == '{')
15536 return false;
15537 }
15538
15539 return true;
15540}
15541
15542static const IntegerLiteral *getIntegerLiteral(Expr *E) {
15543 const auto *IL = dyn_cast<IntegerLiteral>(Val: E);
15544 if (!IL) {
15545 if (auto *UO = dyn_cast<UnaryOperator>(Val: E)) {
15546 if (UO->getOpcode() == UO_Minus)
15547 return dyn_cast<IntegerLiteral>(Val: UO->getSubExpr());
15548 }
15549 }
15550
15551 return IL;
15552}
15553
15554static void DiagnoseIntInBoolContext(Sema &S, Expr *E) {
15555 E = E->IgnoreParenImpCasts();
15556 SourceLocation ExprLoc = E->getExprLoc();
15557
15558 if (const auto *BO = dyn_cast<BinaryOperator>(Val: E)) {
15559 BinaryOperator::Opcode Opc = BO->getOpcode();
15560 Expr::EvalResult Result;
15561 // Do not diagnose unsigned shifts.
15562 if (Opc == BO_Shl) {
15563 const auto *LHS = getIntegerLiteral(E: BO->getLHS());
15564 const auto *RHS = getIntegerLiteral(E: BO->getRHS());
15565 if (LHS && LHS->getValue() == 0)
15566 S.Diag(ExprLoc, diag::warn_left_shift_always) << 0;
15567 else if (!E->isValueDependent() && LHS && RHS &&
15568 RHS->getValue().isNonNegative() &&
15569 E->EvaluateAsInt(Result, S.Context, Expr::SE_AllowSideEffects))
15570 S.Diag(ExprLoc, diag::warn_left_shift_always)
15571 << (Result.Val.getInt() != 0);
15572 else if (E->getType()->isSignedIntegerType())
15573 S.Diag(ExprLoc, diag::warn_left_shift_in_bool_context) << E;
15574 }
15575 }
15576
15577 if (const auto *CO = dyn_cast<ConditionalOperator>(Val: E)) {
15578 const auto *LHS = getIntegerLiteral(E: CO->getTrueExpr());
15579 const auto *RHS = getIntegerLiteral(E: CO->getFalseExpr());
15580 if (!LHS || !RHS)
15581 return;
15582 if ((LHS->getValue() == 0 || LHS->getValue() == 1) &&
15583 (RHS->getValue() == 0 || RHS->getValue() == 1))
15584 // Do not diagnose common idioms.
15585 return;
15586 if (LHS->getValue() != 0 && RHS->getValue() != 0)
15587 S.Diag(ExprLoc, diag::warn_integer_constants_in_conditional_always_true);
15588 }
15589}
15590
15591static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
15592 SourceLocation CC,
15593 bool *ICContext = nullptr,
15594 bool IsListInit = false) {
15595 if (E->isTypeDependent() || E->isValueDependent()) return;
15596
15597 const Type *Source = S.Context.getCanonicalType(T: E->getType()).getTypePtr();
15598 const Type *Target = S.Context.getCanonicalType(T).getTypePtr();
15599 if (Source == Target) return;
15600 if (Target->isDependentType()) return;
15601
15602 // If the conversion context location is invalid don't complain. We also
15603 // don't want to emit a warning if the issue occurs from the expansion of
15604 // a system macro. The problem is that 'getSpellingLoc()' is slow, so we
15605 // delay this check as long as possible. Once we detect we are in that
15606 // scenario, we just return.
15607 if (CC.isInvalid())
15608 return;
15609
15610 if (Source->isAtomicType())
15611 S.Diag(E->getExprLoc(), diag::warn_atomic_implicit_seq_cst);
15612
15613 // Diagnose implicit casts to bool.
15614 if (Target->isSpecificBuiltinType(K: BuiltinType::Bool)) {
15615 if (isa<StringLiteral>(E))
15616 // Warn on string literal to bool. Checks for string literals in logical
15617 // and expressions, for instance, assert(0 && "error here"), are
15618 // prevented by a check in AnalyzeImplicitConversions().
15619 return DiagnoseImpCast(S, E, T, CC,
15620 diag::warn_impcast_string_literal_to_bool);
15621 if (isa<ObjCStringLiteral>(Val: E) || isa<ObjCArrayLiteral>(Val: E) ||
15622 isa<ObjCDictionaryLiteral>(Val: E) || isa<ObjCBoxedExpr>(Val: E)) {
15623 // This covers the literal expressions that evaluate to Objective-C
15624 // objects.
15625 return DiagnoseImpCast(S, E, T, CC,
15626 diag::warn_impcast_objective_c_literal_to_bool);
15627 }
15628 if (Source->isPointerType() || Source->canDecayToPointerType()) {
15629 // Warn on pointer to bool conversion that is always true.
15630 S.DiagnoseAlwaysNonNullPointer(E, NullType: Expr::NPCK_NotNull, /*IsEqual*/ false,
15631 Range: SourceRange(CC));
15632 }
15633 }
15634
15635 // If the we're converting a constant to an ObjC BOOL on a platform where BOOL
15636 // is a typedef for signed char (macOS), then that constant value has to be 1
15637 // or 0.
15638 if (isObjCSignedCharBool(S, Ty: T) && Source->isIntegralType(Ctx: S.Context)) {
15639 Expr::EvalResult Result;
15640 if (E->EvaluateAsInt(Result, Ctx: S.getASTContext(),
15641 AllowSideEffects: Expr::SE_AllowSideEffects)) {
15642 if (Result.Val.getInt() != 1 && Result.Val.getInt() != 0) {
15643 adornObjCBoolConversionDiagWithTernaryFixit(
15644 S, E,
15645 S.Diag(CC, diag::warn_impcast_constant_value_to_objc_bool)
15646 << toString(Result.Val.getInt(), 10));
15647 }
15648 return;
15649 }
15650 }
15651
15652 // Check implicit casts from Objective-C collection literals to specialized
15653 // collection types, e.g., NSArray<NSString *> *.
15654 if (auto *ArrayLiteral = dyn_cast<ObjCArrayLiteral>(Val: E))
15655 checkObjCArrayLiteral(S, TargetType: QualType(Target, 0), ArrayLiteral);
15656 else if (auto *DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(Val: E))
15657 checkObjCDictionaryLiteral(S, TargetType: QualType(Target, 0), DictionaryLiteral);
15658
15659 // Strip vector types.
15660 if (isa<VectorType>(Val: Source)) {
15661 if (Target->isSveVLSBuiltinType() &&
15662 (S.Context.areCompatibleSveTypes(FirstType: QualType(Target, 0),
15663 SecondType: QualType(Source, 0)) ||
15664 S.Context.areLaxCompatibleSveTypes(FirstType: QualType(Target, 0),
15665 SecondType: QualType(Source, 0))))
15666 return;
15667
15668 if (Target->isRVVVLSBuiltinType() &&
15669 (S.Context.areCompatibleRVVTypes(FirstType: QualType(Target, 0),
15670 SecondType: QualType(Source, 0)) ||
15671 S.Context.areLaxCompatibleRVVTypes(FirstType: QualType(Target, 0),
15672 SecondType: QualType(Source, 0))))
15673 return;
15674
15675 if (!isa<VectorType>(Val: Target)) {
15676 if (S.SourceMgr.isInSystemMacro(loc: CC))
15677 return;
15678 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_vector_scalar);
15679 }
15680
15681 // If the vector cast is cast between two vectors of the same size, it is
15682 // a bitcast, not a conversion.
15683 if (S.Context.getTypeSize(T: Source) == S.Context.getTypeSize(T: Target))
15684 return;
15685
15686 Source = cast<VectorType>(Val: Source)->getElementType().getTypePtr();
15687 Target = cast<VectorType>(Val: Target)->getElementType().getTypePtr();
15688 }
15689 if (auto VecTy = dyn_cast<VectorType>(Val: Target))
15690 Target = VecTy->getElementType().getTypePtr();
15691
15692 // Strip complex types.
15693 if (isa<ComplexType>(Val: Source)) {
15694 if (!isa<ComplexType>(Val: Target)) {
15695 if (S.SourceMgr.isInSystemMacro(loc: CC) || Target->isBooleanType())
15696 return;
15697
15698 return DiagnoseImpCast(S, E, T, CC,
15699 S.getLangOpts().CPlusPlus
15700 ? diag::err_impcast_complex_scalar
15701 : diag::warn_impcast_complex_scalar);
15702 }
15703
15704 Source = cast<ComplexType>(Val: Source)->getElementType().getTypePtr();
15705 Target = cast<ComplexType>(Val: Target)->getElementType().getTypePtr();
15706 }
15707
15708 const BuiltinType *SourceBT = dyn_cast<BuiltinType>(Val: Source);
15709 const BuiltinType *TargetBT = dyn_cast<BuiltinType>(Val: Target);
15710
15711 // Strip SVE vector types
15712 if (SourceBT && SourceBT->isSveVLSBuiltinType()) {
15713 // Need the original target type for vector type checks
15714 const Type *OriginalTarget = S.Context.getCanonicalType(T).getTypePtr();
15715 // Handle conversion from scalable to fixed when msve-vector-bits is
15716 // specified
15717 if (S.Context.areCompatibleSveTypes(FirstType: QualType(OriginalTarget, 0),
15718 SecondType: QualType(Source, 0)) ||
15719 S.Context.areLaxCompatibleSveTypes(FirstType: QualType(OriginalTarget, 0),
15720 SecondType: QualType(Source, 0)))
15721 return;
15722
15723 // If the vector cast is cast between two vectors of the same size, it is
15724 // a bitcast, not a conversion.
15725 if (S.Context.getTypeSize(T: Source) == S.Context.getTypeSize(T: Target))
15726 return;
15727
15728 Source = SourceBT->getSveEltType(S.Context).getTypePtr();
15729 }
15730
15731 if (TargetBT && TargetBT->isSveVLSBuiltinType())
15732 Target = TargetBT->getSveEltType(S.Context).getTypePtr();
15733
15734 // If the source is floating point...
15735 if (SourceBT && SourceBT->isFloatingPoint()) {
15736 // ...and the target is floating point...
15737 if (TargetBT && TargetBT->isFloatingPoint()) {
15738 // ...then warn if we're dropping FP rank.
15739
15740 int Order = S.getASTContext().getFloatingTypeSemanticOrder(
15741 LHS: QualType(SourceBT, 0), RHS: QualType(TargetBT, 0));
15742 if (Order > 0) {
15743 // Don't warn about float constants that are precisely
15744 // representable in the target type.
15745 Expr::EvalResult result;
15746 if (E->EvaluateAsRValue(Result&: result, Ctx: S.Context)) {
15747 // Value might be a float, a float vector, or a float complex.
15748 if (IsSameFloatAfterCast(value: result.Val,
15749 Src: S.Context.getFloatTypeSemantics(T: QualType(TargetBT, 0)),
15750 Tgt: S.Context.getFloatTypeSemantics(T: QualType(SourceBT, 0))))
15751 return;
15752 }
15753
15754 if (S.SourceMgr.isInSystemMacro(loc: CC))
15755 return;
15756
15757 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_float_precision);
15758 }
15759 // ... or possibly if we're increasing rank, too
15760 else if (Order < 0) {
15761 if (S.SourceMgr.isInSystemMacro(loc: CC))
15762 return;
15763
15764 DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_double_promotion);
15765 }
15766 return;
15767 }
15768
15769 // If the target is integral, always warn.
15770 if (TargetBT && TargetBT->isInteger()) {
15771 if (S.SourceMgr.isInSystemMacro(loc: CC))
15772 return;
15773
15774 DiagnoseFloatingImpCast(S, E, T, CContext: CC);
15775 }
15776
15777 // Detect the case where a call result is converted from floating-point to
15778 // to bool, and the final argument to the call is converted from bool, to
15779 // discover this typo:
15780 //
15781 // bool b = fabs(x < 1.0); // should be "bool b = fabs(x) < 1.0;"
15782 //
15783 // FIXME: This is an incredibly special case; is there some more general
15784 // way to detect this class of misplaced-parentheses bug?
15785 if (Target->isBooleanType() && isa<CallExpr>(Val: E)) {
15786 // Check last argument of function call to see if it is an
15787 // implicit cast from a type matching the type the result
15788 // is being cast to.
15789 CallExpr *CEx = cast<CallExpr>(Val: E);
15790 if (unsigned NumArgs = CEx->getNumArgs()) {
15791 Expr *LastA = CEx->getArg(Arg: NumArgs - 1);
15792 Expr *InnerE = LastA->IgnoreParenImpCasts();
15793 if (isa<ImplicitCastExpr>(Val: LastA) &&
15794 InnerE->getType()->isBooleanType()) {
15795 // Warn on this floating-point to bool conversion
15796 DiagnoseImpCast(S, E, T, CC,
15797 diag::warn_impcast_floating_point_to_bool);
15798 }
15799 }
15800 }
15801 return;
15802 }
15803
15804 // Valid casts involving fixed point types should be accounted for here.
15805 if (Source->isFixedPointType()) {
15806 if (Target->isUnsaturatedFixedPointType()) {
15807 Expr::EvalResult Result;
15808 if (E->EvaluateAsFixedPoint(Result, Ctx: S.Context, AllowSideEffects: Expr::SE_AllowSideEffects,
15809 InConstantContext: S.isConstantEvaluatedContext())) {
15810 llvm::APFixedPoint Value = Result.Val.getFixedPoint();
15811 llvm::APFixedPoint MaxVal = S.Context.getFixedPointMax(Ty: T);
15812 llvm::APFixedPoint MinVal = S.Context.getFixedPointMin(Ty: T);
15813 if (Value > MaxVal || Value < MinVal) {
15814 S.DiagRuntimeBehavior(E->getExprLoc(), E,
15815 S.PDiag(diag::warn_impcast_fixed_point_range)
15816 << Value.toString() << T
15817 << E->getSourceRange()
15818 << clang::SourceRange(CC));
15819 return;
15820 }
15821 }
15822 } else if (Target->isIntegerType()) {
15823 Expr::EvalResult Result;
15824 if (!S.isConstantEvaluatedContext() &&
15825 E->EvaluateAsFixedPoint(Result, Ctx: S.Context,
15826 AllowSideEffects: Expr::SE_AllowSideEffects)) {
15827 llvm::APFixedPoint FXResult = Result.Val.getFixedPoint();
15828
15829 bool Overflowed;
15830 llvm::APSInt IntResult = FXResult.convertToInt(
15831 DstWidth: S.Context.getIntWidth(T),
15832 DstSign: Target->isSignedIntegerOrEnumerationType(), Overflow: &Overflowed);
15833
15834 if (Overflowed) {
15835 S.DiagRuntimeBehavior(E->getExprLoc(), E,
15836 S.PDiag(diag::warn_impcast_fixed_point_range)
15837 << FXResult.toString() << T
15838 << E->getSourceRange()
15839 << clang::SourceRange(CC));
15840 return;
15841 }
15842 }
15843 }
15844 } else if (Target->isUnsaturatedFixedPointType()) {
15845 if (Source->isIntegerType()) {
15846 Expr::EvalResult Result;
15847 if (!S.isConstantEvaluatedContext() &&
15848 E->EvaluateAsInt(Result, Ctx: S.Context, AllowSideEffects: Expr::SE_AllowSideEffects)) {
15849 llvm::APSInt Value = Result.Val.getInt();
15850
15851 bool Overflowed;
15852 llvm::APFixedPoint IntResult = llvm::APFixedPoint::getFromIntValue(
15853 Value, DstFXSema: S.Context.getFixedPointSemantics(Ty: T), Overflow: &Overflowed);
15854
15855 if (Overflowed) {
15856 S.DiagRuntimeBehavior(E->getExprLoc(), E,
15857 S.PDiag(diag::warn_impcast_fixed_point_range)
15858 << toString(Value, /*Radix=*/10) << T
15859 << E->getSourceRange()
15860 << clang::SourceRange(CC));
15861 return;
15862 }
15863 }
15864 }
15865 }
15866
15867 // If we are casting an integer type to a floating point type without
15868 // initialization-list syntax, we might lose accuracy if the floating
15869 // point type has a narrower significand than the integer type.
15870 if (SourceBT && TargetBT && SourceBT->isIntegerType() &&
15871 TargetBT->isFloatingType() && !IsListInit) {
15872 // Determine the number of precision bits in the source integer type.
15873 IntRange SourceRange =
15874 GetExprRange(C&: S.Context, E, InConstantContext: S.isConstantEvaluatedContext(),
15875 /*Approximate=*/true);
15876 unsigned int SourcePrecision = SourceRange.Width;
15877
15878 // Determine the number of precision bits in the
15879 // target floating point type.
15880 unsigned int TargetPrecision = llvm::APFloatBase::semanticsPrecision(
15881 S.Context.getFloatTypeSemantics(T: QualType(TargetBT, 0)));
15882
15883 if (SourcePrecision > 0 && TargetPrecision > 0 &&
15884 SourcePrecision > TargetPrecision) {
15885
15886 if (std::optional<llvm::APSInt> SourceInt =
15887 E->getIntegerConstantExpr(Ctx: S.Context)) {
15888 // If the source integer is a constant, convert it to the target
15889 // floating point type. Issue a warning if the value changes
15890 // during the whole conversion.
15891 llvm::APFloat TargetFloatValue(
15892 S.Context.getFloatTypeSemantics(T: QualType(TargetBT, 0)));
15893 llvm::APFloat::opStatus ConversionStatus =
15894 TargetFloatValue.convertFromAPInt(
15895 Input: *SourceInt, IsSigned: SourceBT->isSignedInteger(),
15896 RM: llvm::APFloat::rmNearestTiesToEven);
15897
15898 if (ConversionStatus != llvm::APFloat::opOK) {
15899 SmallString<32> PrettySourceValue;
15900 SourceInt->toString(Str&: PrettySourceValue, Radix: 10);
15901 SmallString<32> PrettyTargetValue;
15902 TargetFloatValue.toString(Str&: PrettyTargetValue, FormatPrecision: TargetPrecision);
15903
15904 S.DiagRuntimeBehavior(
15905 E->getExprLoc(), E,
15906 S.PDiag(diag::warn_impcast_integer_float_precision_constant)
15907 << PrettySourceValue << PrettyTargetValue << E->getType() << T
15908 << E->getSourceRange() << clang::SourceRange(CC));
15909 }
15910 } else {
15911 // Otherwise, the implicit conversion may lose precision.
15912 DiagnoseImpCast(S, E, T, CC,
15913 diag::warn_impcast_integer_float_precision);
15914 }
15915 }
15916 }
15917
15918 DiagnoseNullConversion(S, E, T, CC);
15919
15920 S.DiscardMisalignedMemberAddress(T: Target, E);
15921
15922 if (Target->isBooleanType())
15923 DiagnoseIntInBoolContext(S, E);
15924
15925 if (!Source->isIntegerType() || !Target->isIntegerType())
15926 return;
15927
15928 // TODO: remove this early return once the false positives for constant->bool
15929 // in templates, macros, etc, are reduced or removed.
15930 if (Target->isSpecificBuiltinType(K: BuiltinType::Bool))
15931 return;
15932
15933 if (isObjCSignedCharBool(S, Ty: T) && !Source->isCharType() &&
15934 !E->isKnownToHaveBooleanValue(/*Semantic=*/false)) {
15935 return adornObjCBoolConversionDiagWithTernaryFixit(
15936 S, E,
15937 S.Diag(CC, diag::warn_impcast_int_to_objc_signed_char_bool)
15938 << E->getType());
15939 }
15940
15941 IntRange SourceTypeRange =
15942 IntRange::forTargetOfCanonicalType(C&: S.Context, T: Source);
15943 IntRange LikelySourceRange = GetExprRange(
15944 C&: S.Context, E, InConstantContext: S.isConstantEvaluatedContext(), /*Approximate=*/true);
15945 IntRange TargetRange = IntRange::forTargetOfCanonicalType(C&: S.Context, T: Target);
15946
15947 if (LikelySourceRange.Width > TargetRange.Width) {
15948 // If the source is a constant, use a default-on diagnostic.
15949 // TODO: this should happen for bitfield stores, too.
15950 Expr::EvalResult Result;
15951 if (E->EvaluateAsInt(Result, Ctx: S.Context, AllowSideEffects: Expr::SE_AllowSideEffects,
15952 InConstantContext: S.isConstantEvaluatedContext())) {
15953 llvm::APSInt Value(32);
15954 Value = Result.Val.getInt();
15955
15956 if (S.SourceMgr.isInSystemMacro(loc: CC))
15957 return;
15958
15959 std::string PrettySourceValue = toString(I: Value, Radix: 10);
15960 std::string PrettyTargetValue = PrettyPrintInRange(Value, Range: TargetRange);
15961
15962 S.DiagRuntimeBehavior(
15963 E->getExprLoc(), E,
15964 S.PDiag(diag::warn_impcast_integer_precision_constant)
15965 << PrettySourceValue << PrettyTargetValue << E->getType() << T
15966 << E->getSourceRange() << SourceRange(CC));
15967 return;
15968 }
15969
15970 // People want to build with -Wshorten-64-to-32 and not -Wconversion.
15971 if (S.SourceMgr.isInSystemMacro(loc: CC))
15972 return;
15973
15974 if (TargetRange.Width == 32 && S.Context.getIntWidth(E->getType()) == 64)
15975 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_64_32,
15976 /* pruneControlFlow */ true);
15977 return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_precision);
15978 }
15979
15980 if (TargetRange.Width > SourceTypeRange.Width) {
15981 if (auto *UO = dyn_cast<UnaryOperator>(Val: E))
15982 if (UO->getOpcode() == UO_Minus)
15983 if (Source->isUnsignedIntegerType()) {
15984 if (Target->isUnsignedIntegerType())
15985 return DiagnoseImpCast(S, E, T, CC,
15986 diag::warn_impcast_high_order_zero_bits);
15987 if (Target->isSignedIntegerType())
15988 return DiagnoseImpCast(S, E, T, CC,
15989 diag::warn_impcast_nonnegative_result);
15990 }
15991 }
15992
15993 if (TargetRange.Width == LikelySourceRange.Width &&
15994 !TargetRange.NonNegative && LikelySourceRange.NonNegative &&
15995 Source->isSignedIntegerType()) {
15996 // Warn when doing a signed to signed conversion, warn if the positive
15997 // source value is exactly the width of the target type, which will
15998 // cause a negative value to be stored.
15999
16000 Expr::EvalResult Result;
16001 if (E->EvaluateAsInt(Result, Ctx: S.Context, AllowSideEffects: Expr::SE_AllowSideEffects) &&
16002 !S.SourceMgr.isInSystemMacro(loc: CC)) {
16003 llvm::APSInt Value = Result.Val.getInt();
16004 if (isSameWidthConstantConversion(S, E, T, CC)) {
16005 std::string PrettySourceValue = toString(I: Value, Radix: 10);
16006 std::string PrettyTargetValue = PrettyPrintInRange(Value, Range: TargetRange);
16007
16008 S.DiagRuntimeBehavior(
16009 E->getExprLoc(), E,
16010 S.PDiag(diag::warn_impcast_integer_precision_constant)
16011 << PrettySourceValue << PrettyTargetValue << E->getType() << T
16012 << E->getSourceRange() << SourceRange(CC));
16013 return;
16014 }
16015 }
16016
16017 // Fall through for non-constants to give a sign conversion warning.
16018 }
16019
16020 if ((!isa<EnumType>(Val: Target) || !isa<EnumType>(Val: Source)) &&
16021 ((TargetRange.NonNegative && !LikelySourceRange.NonNegative) ||
16022 (!TargetRange.NonNegative && LikelySourceRange.NonNegative &&
16023 LikelySourceRange.Width == TargetRange.Width))) {
16024 if (S.SourceMgr.isInSystemMacro(loc: CC))
16025 return;
16026
16027 if (SourceBT && SourceBT->isInteger() && TargetBT &&
16028 TargetBT->isInteger() &&
16029 Source->isSignedIntegerType() == Target->isSignedIntegerType()) {
16030 return;
16031 }
16032
16033 unsigned DiagID = diag::warn_impcast_integer_sign;
16034
16035 // Traditionally, gcc has warned about this under -Wsign-compare.
16036 // We also want to warn about it in -Wconversion.
16037 // So if -Wconversion is off, use a completely identical diagnostic
16038 // in the sign-compare group.
16039 // The conditional-checking code will
16040 if (ICContext) {
16041 DiagID = diag::warn_impcast_integer_sign_conditional;
16042 *ICContext = true;
16043 }
16044
16045 return DiagnoseImpCast(S, E, T, CContext: CC, diag: DiagID);
16046 }
16047
16048 // Diagnose conversions between different enumeration types.
16049 // In C, we pretend that the type of an EnumConstantDecl is its enumeration
16050 // type, to give us better diagnostics.
16051 QualType SourceType = E->getType();
16052 if (!S.getLangOpts().CPlusPlus) {
16053 if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Val: E))
16054 if (EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(Val: DRE->getDecl())) {
16055 EnumDecl *Enum = cast<EnumDecl>(ECD->getDeclContext());
16056 SourceType = S.Context.getTypeDeclType(Enum);
16057 Source = S.Context.getCanonicalType(T: SourceType).getTypePtr();
16058 }
16059 }
16060
16061 if (const EnumType *SourceEnum = Source->getAs<EnumType>())
16062 if (const EnumType *TargetEnum = Target->getAs<EnumType>())
16063 if (SourceEnum->getDecl()->hasNameForLinkage() &&
16064 TargetEnum->getDecl()->hasNameForLinkage() &&
16065 SourceEnum != TargetEnum) {
16066 if (S.SourceMgr.isInSystemMacro(loc: CC))
16067 return;
16068
16069 return DiagnoseImpCast(S, E, SourceType, T, CC,
16070 diag::warn_impcast_different_enum_types);
16071 }
16072}
16073
16074static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E,
16075 SourceLocation CC, QualType T);
16076
16077static void CheckConditionalOperand(Sema &S, Expr *E, QualType T,
16078 SourceLocation CC, bool &ICContext) {
16079 E = E->IgnoreParenImpCasts();
16080 // Diagnose incomplete type for second or third operand in C.
16081 if (!S.getLangOpts().CPlusPlus && E->getType()->isRecordType())
16082 S.RequireCompleteExprType(E, diag::err_incomplete_type);
16083
16084 if (auto *CO = dyn_cast<AbstractConditionalOperator>(Val: E))
16085 return CheckConditionalOperator(S, E: CO, CC, T);
16086
16087 AnalyzeImplicitConversions(S, E, CC);
16088 if (E->getType() != T)
16089 return CheckImplicitConversion(S, E, T, CC, ICContext: &ICContext);
16090}
16091
16092static void CheckConditionalOperator(Sema &S, AbstractConditionalOperator *E,
16093 SourceLocation CC, QualType T) {
16094 AnalyzeImplicitConversions(S, E: E->getCond(), CC: E->getQuestionLoc());
16095
16096 Expr *TrueExpr = E->getTrueExpr();
16097 if (auto *BCO = dyn_cast<BinaryConditionalOperator>(Val: E))
16098 TrueExpr = BCO->getCommon();
16099
16100 bool Suspicious = false;
16101 CheckConditionalOperand(S, E: TrueExpr, T, CC, ICContext&: Suspicious);
16102 CheckConditionalOperand(S, E: E->getFalseExpr(), T, CC, ICContext&: Suspicious);
16103
16104 if (T->isBooleanType())
16105 DiagnoseIntInBoolContext(S, E);
16106
16107 // If -Wconversion would have warned about either of the candidates
16108 // for a signedness conversion to the context type...
16109 if (!Suspicious) return;
16110
16111 // ...but it's currently ignored...
16112 if (!S.Diags.isIgnored(diag::warn_impcast_integer_sign_conditional, CC))
16113 return;
16114
16115 // ...then check whether it would have warned about either of the
16116 // candidates for a signedness conversion to the condition type.
16117 if (E->getType() == T) return;
16118
16119 Suspicious = false;
16120 CheckImplicitConversion(S, TrueExpr->IgnoreParenImpCasts(),
16121 E->getType(), CC, &Suspicious);
16122 if (!Suspicious)
16123 CheckImplicitConversion(S, E->getFalseExpr()->IgnoreParenImpCasts(),
16124 E->getType(), CC, &Suspicious);
16125}
16126
16127/// Check conversion of given expression to boolean.
16128/// Input argument E is a logical expression.
16129static void CheckBoolLikeConversion(Sema &S, Expr *E, SourceLocation CC) {
16130 // Run the bool-like conversion checks only for C since there bools are
16131 // still not used as the return type from "boolean" operators or as the input
16132 // type for conditional operators.
16133 if (S.getLangOpts().CPlusPlus)
16134 return;
16135 if (E->IgnoreParenImpCasts()->getType()->isAtomicType())
16136 return;
16137 CheckImplicitConversion(S, E->IgnoreParenImpCasts(), S.Context.BoolTy, CC);
16138}
16139
16140namespace {
16141struct AnalyzeImplicitConversionsWorkItem {
16142 Expr *E;
16143 SourceLocation CC;
16144 bool IsListInit;
16145};
16146}
16147
16148/// Data recursive variant of AnalyzeImplicitConversions. Subexpressions
16149/// that should be visited are added to WorkList.
16150static void AnalyzeImplicitConversions(
16151 Sema &S, AnalyzeImplicitConversionsWorkItem Item,
16152 llvm::SmallVectorImpl<AnalyzeImplicitConversionsWorkItem> &WorkList) {
16153 Expr *OrigE = Item.E;
16154 SourceLocation CC = Item.CC;
16155
16156 QualType T = OrigE->getType();
16157 Expr *E = OrigE->IgnoreParenImpCasts();
16158
16159 // Propagate whether we are in a C++ list initialization expression.
16160 // If so, we do not issue warnings for implicit int-float conversion
16161 // precision loss, because C++11 narrowing already handles it.
16162 bool IsListInit = Item.IsListInit ||
16163 (isa<InitListExpr>(Val: OrigE) && S.getLangOpts().CPlusPlus);
16164
16165 if (E->isTypeDependent() || E->isValueDependent())
16166 return;
16167
16168 Expr *SourceExpr = E;
16169 // Examine, but don't traverse into the source expression of an
16170 // OpaqueValueExpr, since it may have multiple parents and we don't want to
16171 // emit duplicate diagnostics. Its fine to examine the form or attempt to
16172 // evaluate it in the context of checking the specific conversion to T though.
16173 if (auto *OVE = dyn_cast<OpaqueValueExpr>(Val: E))
16174 if (auto *Src = OVE->getSourceExpr())
16175 SourceExpr = Src;
16176
16177 if (const auto *UO = dyn_cast<UnaryOperator>(SourceExpr))
16178 if (UO->getOpcode() == UO_Not &&
16179 UO->getSubExpr()->isKnownToHaveBooleanValue())
16180 S.Diag(UO->getBeginLoc(), diag::warn_bitwise_negation_bool)
16181 << OrigE->getSourceRange() << T->isBooleanType()
16182 << FixItHint::CreateReplacement(UO->getBeginLoc(), "!");
16183
16184 if (const auto *BO = dyn_cast<BinaryOperator>(Val: SourceExpr))
16185 if ((BO->getOpcode() == BO_And || BO->getOpcode() == BO_Or) &&
16186 BO->getLHS()->isKnownToHaveBooleanValue() &&
16187 BO->getRHS()->isKnownToHaveBooleanValue() &&
16188 BO->getLHS()->HasSideEffects(Ctx: S.Context) &&
16189 BO->getRHS()->HasSideEffects(Ctx: S.Context)) {
16190 S.Diag(BO->getBeginLoc(), diag::warn_bitwise_instead_of_logical)
16191 << (BO->getOpcode() == BO_And ? "&" : "|") << OrigE->getSourceRange()
16192 << FixItHint::CreateReplacement(
16193 BO->getOperatorLoc(),
16194 (BO->getOpcode() == BO_And ? "&&" : "||"));
16195 S.Diag(BO->getBeginLoc(), diag::note_cast_operand_to_int);
16196 }
16197
16198 // For conditional operators, we analyze the arguments as if they
16199 // were being fed directly into the output.
16200 if (auto *CO = dyn_cast<AbstractConditionalOperator>(Val: SourceExpr)) {
16201 CheckConditionalOperator(S, E: CO, CC, T);
16202 return;
16203 }
16204
16205 // Check implicit argument conversions for function calls.
16206 if (CallExpr *Call = dyn_cast<CallExpr>(Val: SourceExpr))
16207 CheckImplicitArgumentConversions(S, TheCall: Call, CC);
16208
16209 // Go ahead and check any implicit conversions we might have skipped.
16210 // The non-canonical typecheck is just an optimization;
16211 // CheckImplicitConversion will filter out dead implicit conversions.
16212 if (SourceExpr->getType() != T)
16213 CheckImplicitConversion(S, E: SourceExpr, T, CC, ICContext: nullptr, IsListInit);
16214
16215 // Now continue drilling into this expression.
16216
16217 if (PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(Val: E)) {
16218 // The bound subexpressions in a PseudoObjectExpr are not reachable
16219 // as transitive children.
16220 // FIXME: Use a more uniform representation for this.
16221 for (auto *SE : POE->semantics())
16222 if (auto *OVE = dyn_cast<OpaqueValueExpr>(Val: SE))
16223 WorkList.push_back(Elt: {.E: OVE->getSourceExpr(), .CC: CC, .IsListInit: IsListInit});
16224 }
16225
16226 // Skip past explicit casts.
16227 if (auto *CE = dyn_cast<ExplicitCastExpr>(Val: E)) {
16228 E = CE->getSubExpr()->IgnoreParenImpCasts();
16229 if (!CE->getType()->isVoidType() && E->getType()->isAtomicType())
16230 S.Diag(E->getBeginLoc(), diag::warn_atomic_implicit_seq_cst);
16231 WorkList.push_back(Elt: {.E: E, .CC: CC, .IsListInit: IsListInit});
16232 return;
16233 }
16234
16235 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Val: E)) {
16236 // Do a somewhat different check with comparison operators.
16237 if (BO->isComparisonOp())
16238 return AnalyzeComparison(S, E: BO);
16239
16240 // And with simple assignments.
16241 if (BO->getOpcode() == BO_Assign)
16242 return AnalyzeAssignment(S, E: BO);
16243 // And with compound assignments.
16244 if (BO->isAssignmentOp())
16245 return AnalyzeCompoundAssignment(S, E: BO);
16246 }
16247
16248 // These break the otherwise-useful invariant below. Fortunately,
16249 // we don't really need to recurse into them, because any internal
16250 // expressions should have been analyzed already when they were
16251 // built into statements.
16252 if (isa<StmtExpr>(Val: E)) return;
16253
16254 // Don't descend into unevaluated contexts.
16255 if (isa<UnaryExprOrTypeTraitExpr>(Val: E)) return;
16256
16257 // Now just recurse over the expression's children.
16258 CC = E->getExprLoc();
16259 BinaryOperator *BO = dyn_cast<BinaryOperator>(Val: E);
16260 bool IsLogicalAndOperator = BO && BO->getOpcode() == BO_LAnd;
16261 for (Stmt *SubStmt : E->children()) {
16262 Expr *ChildExpr = dyn_cast_or_null<Expr>(SubStmt);
16263 if (!ChildExpr)
16264 continue;
16265
16266 if (auto *CSE = dyn_cast<CoroutineSuspendExpr>(E))
16267 if (ChildExpr == CSE->getOperand())
16268 // Do not recurse over a CoroutineSuspendExpr's operand.
16269 // The operand is also a subexpression of getCommonExpr(), and
16270 // recursing into it directly would produce duplicate diagnostics.
16271 continue;
16272
16273 if (IsLogicalAndOperator &&
16274 isa<StringLiteral>(ChildExpr->IgnoreParenImpCasts()))
16275 // Ignore checking string literals that are in logical and operators.
16276 // This is a common pattern for asserts.
16277 continue;
16278 WorkList.push_back({ChildExpr, CC, IsListInit});
16279 }
16280
16281 if (BO && BO->isLogicalOp()) {
16282 Expr *SubExpr = BO->getLHS()->IgnoreParenImpCasts();
16283 if (!IsLogicalAndOperator || !isa<StringLiteral>(Val: SubExpr))
16284 ::CheckBoolLikeConversion(S, E: SubExpr, CC: BO->getExprLoc());
16285
16286 SubExpr = BO->getRHS()->IgnoreParenImpCasts();
16287 if (!IsLogicalAndOperator || !isa<StringLiteral>(Val: SubExpr))
16288 ::CheckBoolLikeConversion(S, E: SubExpr, CC: BO->getExprLoc());
16289 }
16290
16291 if (const UnaryOperator *U = dyn_cast<UnaryOperator>(Val: E)) {
16292 if (U->getOpcode() == UO_LNot) {
16293 ::CheckBoolLikeConversion(S, E: U->getSubExpr(), CC);
16294 } else if (U->getOpcode() != UO_AddrOf) {
16295 if (U->getSubExpr()->getType()->isAtomicType())
16296 S.Diag(U->getSubExpr()->getBeginLoc(),
16297 diag::warn_atomic_implicit_seq_cst);
16298 }
16299 }
16300}
16301
16302/// AnalyzeImplicitConversions - Find and report any interesting
16303/// implicit conversions in the given expression. There are a couple
16304/// of competing diagnostics here, -Wconversion and -Wsign-compare.
16305static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC,
16306 bool IsListInit/*= false*/) {
16307 llvm::SmallVector<AnalyzeImplicitConversionsWorkItem, 16> WorkList;
16308 WorkList.push_back(Elt: {.E: OrigE, .CC: CC, .IsListInit: IsListInit});
16309 while (!WorkList.empty())
16310 AnalyzeImplicitConversions(S, Item: WorkList.pop_back_val(), WorkList);
16311}
16312
16313/// Diagnose integer type and any valid implicit conversion to it.
16314static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, const QualType &IntT) {
16315 // Taking into account implicit conversions,
16316 // allow any integer.
16317 if (!E->getType()->isIntegerType()) {
16318 S.Diag(E->getBeginLoc(),
16319 diag::err_opencl_enqueue_kernel_invalid_local_size_type);
16320 return true;
16321 }
16322 // Potentially emit standard warnings for implicit conversions if enabled
16323 // using -Wconversion.
16324 CheckImplicitConversion(S, E, IntT, E->getBeginLoc());
16325 return false;
16326}
16327
16328// Helper function for Sema::DiagnoseAlwaysNonNullPointer.
16329// Returns true when emitting a warning about taking the address of a reference.
16330static bool CheckForReference(Sema &SemaRef, const Expr *E,
16331 const PartialDiagnostic &PD) {
16332 E = E->IgnoreParenImpCasts();
16333
16334 const FunctionDecl *FD = nullptr;
16335
16336 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Val: E)) {
16337 if (!DRE->getDecl()->getType()->isReferenceType())
16338 return false;
16339 } else if (const MemberExpr *M = dyn_cast<MemberExpr>(Val: E)) {
16340 if (!M->getMemberDecl()->getType()->isReferenceType())
16341 return false;
16342 } else if (const CallExpr *Call = dyn_cast<CallExpr>(Val: E)) {
16343 if (!Call->getCallReturnType(Ctx: SemaRef.Context)->isReferenceType())
16344 return false;
16345 FD = Call->getDirectCallee();
16346 } else {
16347 return false;
16348 }
16349
16350 SemaRef.Diag(Loc: E->getExprLoc(), PD);
16351
16352 // If possible, point to location of function.
16353 if (FD) {
16354 SemaRef.Diag(FD->getLocation(), diag::note_reference_is_return_value) << FD;
16355 }
16356
16357 return true;
16358}
16359
16360// Returns true if the SourceLocation is expanded from any macro body.
16361// Returns false if the SourceLocation is invalid, is from not in a macro
16362// expansion, or is from expanded from a top-level macro argument.
16363static bool IsInAnyMacroBody(const SourceManager &SM, SourceLocation Loc) {
16364 if (Loc.isInvalid())
16365 return false;
16366
16367 while (Loc.isMacroID()) {
16368 if (SM.isMacroBodyExpansion(Loc))
16369 return true;
16370 Loc = SM.getImmediateMacroCallerLoc(Loc);
16371 }
16372
16373 return false;
16374}
16375
16376/// Diagnose pointers that are always non-null.
16377/// \param E the expression containing the pointer
16378/// \param NullKind NPCK_NotNull if E is a cast to bool, otherwise, E is
16379/// compared to a null pointer
16380/// \param IsEqual True when the comparison is equal to a null pointer
16381/// \param Range Extra SourceRange to highlight in the diagnostic
16382void Sema::DiagnoseAlwaysNonNullPointer(Expr *E,
16383 Expr::NullPointerConstantKind NullKind,
16384 bool IsEqual, SourceRange Range) {
16385 if (!E)
16386 return;
16387
16388 // Don't warn inside macros.
16389 if (E->getExprLoc().isMacroID()) {
16390 const SourceManager &SM = getSourceManager();
16391 if (IsInAnyMacroBody(SM, Loc: E->getExprLoc()) ||
16392 IsInAnyMacroBody(SM, Loc: Range.getBegin()))
16393 return;
16394 }
16395 E = E->IgnoreImpCasts();
16396
16397 const bool IsCompare = NullKind != Expr::NPCK_NotNull;
16398
16399 if (isa<CXXThisExpr>(Val: E)) {
16400 unsigned DiagID = IsCompare ? diag::warn_this_null_compare
16401 : diag::warn_this_bool_conversion;
16402 Diag(Loc: E->getExprLoc(), DiagID) << E->getSourceRange() << Range << IsEqual;
16403 return;
16404 }
16405
16406 bool IsAddressOf = false;
16407
16408 if (auto *UO = dyn_cast<UnaryOperator>(Val: E->IgnoreParens())) {
16409 if (UO->getOpcode() != UO_AddrOf)
16410 return;
16411 IsAddressOf = true;
16412 E = UO->getSubExpr();
16413 }
16414
16415 if (IsAddressOf) {
16416 unsigned DiagID = IsCompare
16417 ? diag::warn_address_of_reference_null_compare
16418 : diag::warn_address_of_reference_bool_conversion;
16419 PartialDiagnostic PD = PDiag(DiagID) << E->getSourceRange() << Range
16420 << IsEqual;
16421 if (CheckForReference(SemaRef&: *this, E, PD)) {
16422 return;
16423 }
16424 }
16425
16426 auto ComplainAboutNonnullParamOrCall = [&](const Attr *NonnullAttr) {
16427 bool IsParam = isa<NonNullAttr>(NonnullAttr);
16428 std::string Str;
16429 llvm::raw_string_ostream S(Str);
16430 E->printPretty(S, nullptr, getPrintingPolicy());
16431 unsigned DiagID = IsCompare ? diag::warn_nonnull_expr_compare
16432 : diag::warn_cast_nonnull_to_bool;
16433 Diag(Loc: E->getExprLoc(), DiagID) << IsParam << S.str()
16434 << E->getSourceRange() << Range << IsEqual;
16435 Diag(NonnullAttr->getLocation(), diag::note_declared_nonnull) << IsParam;
16436 };
16437
16438 // If we have a CallExpr that is tagged with returns_nonnull, we can complain.
16439 if (auto *Call = dyn_cast<CallExpr>(Val: E->IgnoreParenImpCasts())) {
16440 if (auto *Callee = Call->getDirectCallee()) {
16441 if (const Attr *A = Callee->getAttr<ReturnsNonNullAttr>()) {
16442 ComplainAboutNonnullParamOrCall(A);
16443 return;
16444 }
16445 }
16446 }
16447
16448 // Expect to find a single Decl. Skip anything more complicated.
16449 ValueDecl *D = nullptr;
16450 if (DeclRefExpr *R = dyn_cast<DeclRefExpr>(Val: E)) {
16451 D = R->getDecl();
16452 } else if (MemberExpr *M = dyn_cast<MemberExpr>(Val: E)) {
16453 D = M->getMemberDecl();
16454 }
16455
16456 // Weak Decls can be null.
16457 if (!D || D->isWeak())
16458 return;
16459
16460 // Check for parameter decl with nonnull attribute
16461 if (const auto* PV = dyn_cast<ParmVarDecl>(Val: D)) {
16462 if (getCurFunction() &&
16463 !getCurFunction()->ModifiedNonNullParams.count(Ptr: PV)) {
16464 if (const Attr *A = PV->getAttr<NonNullAttr>()) {
16465 ComplainAboutNonnullParamOrCall(A);
16466 return;
16467 }
16468
16469 if (const auto *FD = dyn_cast<FunctionDecl>(PV->getDeclContext())) {
16470 // Skip function template not specialized yet.
16471 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate)
16472 return;
16473 auto ParamIter = llvm::find(FD->parameters(), PV);
16474 assert(ParamIter != FD->param_end());
16475 unsigned ParamNo = std::distance(FD->param_begin(), ParamIter);
16476
16477 for (const auto *NonNull : FD->specific_attrs<NonNullAttr>()) {
16478 if (!NonNull->args_size()) {
16479 ComplainAboutNonnullParamOrCall(NonNull);
16480 return;
16481 }
16482
16483 for (const ParamIdx &ArgNo : NonNull->args()) {
16484 if (ArgNo.getASTIndex() == ParamNo) {
16485 ComplainAboutNonnullParamOrCall(NonNull);
16486 return;
16487 }
16488 }
16489 }
16490 }
16491 }
16492 }
16493
16494 QualType T = D->getType();
16495 const bool IsArray = T->isArrayType();
16496 const bool IsFunction = T->isFunctionType();
16497
16498 // Address of function is used to silence the function warning.
16499 if (IsAddressOf && IsFunction) {
16500 return;
16501 }
16502
16503 // Found nothing.
16504 if (!IsAddressOf && !IsFunction && !IsArray)
16505 return;
16506
16507 // Pretty print the expression for the diagnostic.
16508 std::string Str;
16509 llvm::raw_string_ostream S(Str);
16510 E->printPretty(S, nullptr, getPrintingPolicy());
16511
16512 unsigned DiagID = IsCompare ? diag::warn_null_pointer_compare
16513 : diag::warn_impcast_pointer_to_bool;
16514 enum {
16515 AddressOf,
16516 FunctionPointer,
16517 ArrayPointer
16518 } DiagType;
16519 if (IsAddressOf)
16520 DiagType = AddressOf;
16521 else if (IsFunction)
16522 DiagType = FunctionPointer;
16523 else if (IsArray)
16524 DiagType = ArrayPointer;
16525 else
16526 llvm_unreachable("Could not determine diagnostic.");
16527 Diag(Loc: E->getExprLoc(), DiagID) << DiagType << S.str() << E->getSourceRange()
16528 << Range << IsEqual;
16529
16530 if (!IsFunction)
16531 return;
16532
16533 // Suggest '&' to silence the function warning.
16534 Diag(E->getExprLoc(), diag::note_function_warning_silence)
16535 << FixItHint::CreateInsertion(E->getBeginLoc(), "&");
16536
16537 // Check to see if '()' fixit should be emitted.
16538 QualType ReturnType;
16539 UnresolvedSet<4> NonTemplateOverloads;
16540 tryExprAsCall(E&: *E, ZeroArgCallReturnTy&: ReturnType, NonTemplateOverloads);
16541 if (ReturnType.isNull())
16542 return;
16543
16544 if (IsCompare) {
16545 // There are two cases here. If there is null constant, the only suggest
16546 // for a pointer return type. If the null is 0, then suggest if the return
16547 // type is a pointer or an integer type.
16548 if (!ReturnType->isPointerType()) {
16549 if (NullKind == Expr::NPCK_ZeroExpression ||
16550 NullKind == Expr::NPCK_ZeroLiteral) {
16551 if (!ReturnType->isIntegerType())
16552 return;
16553 } else {
16554 return;
16555 }
16556 }
16557 } else { // !IsCompare
16558 // For function to bool, only suggest if the function pointer has bool
16559 // return type.
16560 if (!ReturnType->isSpecificBuiltinType(K: BuiltinType::Bool))
16561 return;
16562 }
16563 Diag(E->getExprLoc(), diag::note_function_to_function_call)
16564 << FixItHint::CreateInsertion(getLocForEndOfToken(E->getEndLoc()), "()");
16565}
16566
16567/// Diagnoses "dangerous" implicit conversions within the given
16568/// expression (which is a full expression). Implements -Wconversion
16569/// and -Wsign-compare.
16570///
16571/// \param CC the "context" location of the implicit conversion, i.e.
16572/// the most location of the syntactic entity requiring the implicit
16573/// conversion
16574void Sema::CheckImplicitConversions(Expr *E, SourceLocation CC) {
16575 // Don't diagnose in unevaluated contexts.
16576 if (isUnevaluatedContext())
16577 return;
16578
16579 // Don't diagnose for value- or type-dependent expressions.
16580 if (E->isTypeDependent() || E->isValueDependent())
16581 return;
16582
16583 // Check for array bounds violations in cases where the check isn't triggered
16584 // elsewhere for other Expr types (like BinaryOperators), e.g. when an
16585 // ArraySubscriptExpr is on the RHS of a variable initialization.
16586 CheckArrayAccess(E);
16587
16588 // This is not the right CC for (e.g.) a variable initialization.
16589 AnalyzeImplicitConversions(S&: *this, OrigE: E, CC);
16590}
16591
16592/// CheckBoolLikeConversion - Check conversion of given expression to boolean.
16593/// Input argument E is a logical expression.
16594void Sema::CheckBoolLikeConversion(Expr *E, SourceLocation CC) {
16595 ::CheckBoolLikeConversion(S&: *this, E, CC);
16596}
16597
16598/// Diagnose when expression is an integer constant expression and its evaluation
16599/// results in integer overflow
16600void Sema::CheckForIntOverflow (const Expr *E) {
16601 // Use a work list to deal with nested struct initializers.
16602 SmallVector<const Expr *, 2> Exprs(1, E);
16603
16604 do {
16605 const Expr *OriginalE = Exprs.pop_back_val();
16606 const Expr *E = OriginalE->IgnoreParenCasts();
16607
16608 if (isa<BinaryOperator, UnaryOperator>(Val: E)) {
16609 E->EvaluateForOverflow(Ctx: Context);
16610 continue;
16611 }
16612
16613 if (const auto *InitList = dyn_cast<InitListExpr>(Val: OriginalE))
16614 Exprs.append(in_start: InitList->inits().begin(), in_end: InitList->inits().end());
16615 else if (isa<ObjCBoxedExpr>(Val: OriginalE))
16616 E->EvaluateForOverflow(Ctx: Context);
16617 else if (const auto *Call = dyn_cast<CallExpr>(Val: E))
16618 Exprs.append(in_start: Call->arg_begin(), in_end: Call->arg_end());
16619 else if (const auto *Message = dyn_cast<ObjCMessageExpr>(Val: E))
16620 Exprs.append(Message->arg_begin(), Message->arg_end());
16621 else if (const auto *Construct = dyn_cast<CXXConstructExpr>(Val: E))
16622 Exprs.append(Construct->arg_begin(), Construct->arg_end());
16623 else if (const auto *Temporary = dyn_cast<CXXBindTemporaryExpr>(Val: E))
16624 Exprs.push_back(Elt: Temporary->getSubExpr());
16625 else if (const auto *Array = dyn_cast<ArraySubscriptExpr>(Val: E))
16626 Exprs.push_back(Elt: Array->getIdx());
16627 else if (const auto *Compound = dyn_cast<CompoundLiteralExpr>(Val: E))
16628 Exprs.push_back(Elt: Compound->getInitializer());
16629 else if (const auto *New = dyn_cast<CXXNewExpr>(Val: E);
16630 New && New->isArray()) {
16631 if (auto ArraySize = New->getArraySize())
16632 Exprs.push_back(Elt: *ArraySize);
16633 }
16634 } while (!Exprs.empty());
16635}
16636
16637namespace {
16638
16639/// Visitor for expressions which looks for unsequenced operations on the
16640/// same object.
16641class SequenceChecker : public ConstEvaluatedExprVisitor<SequenceChecker> {
16642 using Base = ConstEvaluatedExprVisitor<SequenceChecker>;
16643
16644 /// A tree of sequenced regions within an expression. Two regions are
16645 /// unsequenced if one is an ancestor or a descendent of the other. When we
16646 /// finish processing an expression with sequencing, such as a comma
16647 /// expression, we fold its tree nodes into its parent, since they are
16648 /// unsequenced with respect to nodes we will visit later.
16649 class SequenceTree {
16650 struct Value {
16651 explicit Value(unsigned Parent) : Parent(Parent), Merged(false) {}
16652 unsigned Parent : 31;
16653 LLVM_PREFERRED_TYPE(bool)
16654 unsigned Merged : 1;
16655 };
16656 SmallVector<Value, 8> Values;
16657
16658 public:
16659 /// A region within an expression which may be sequenced with respect
16660 /// to some other region.
16661 class Seq {
16662 friend class SequenceTree;
16663
16664 unsigned Index;
16665
16666 explicit Seq(unsigned N) : Index(N) {}
16667
16668 public:
16669 Seq() : Index(0) {}
16670 };
16671
16672 SequenceTree() { Values.push_back(Elt: Value(0)); }
16673 Seq root() const { return Seq(0); }
16674
16675 /// Create a new sequence of operations, which is an unsequenced
16676 /// subset of \p Parent. This sequence of operations is sequenced with
16677 /// respect to other children of \p Parent.
16678 Seq allocate(Seq Parent) {
16679 Values.push_back(Elt: Value(Parent.Index));
16680 return Seq(Values.size() - 1);
16681 }
16682
16683 /// Merge a sequence of operations into its parent.
16684 void merge(Seq S) {
16685 Values[S.Index].Merged = true;
16686 }
16687
16688 /// Determine whether two operations are unsequenced. This operation
16689 /// is asymmetric: \p Cur should be the more recent sequence, and \p Old
16690 /// should have been merged into its parent as appropriate.
16691 bool isUnsequenced(Seq Cur, Seq Old) {
16692 unsigned C = representative(K: Cur.Index);
16693 unsigned Target = representative(K: Old.Index);
16694 while (C >= Target) {
16695 if (C == Target)
16696 return true;
16697 C = Values[C].Parent;
16698 }
16699 return false;
16700 }
16701
16702 private:
16703 /// Pick a representative for a sequence.
16704 unsigned representative(unsigned K) {
16705 if (Values[K].Merged)
16706 // Perform path compression as we go.
16707 return Values[K].Parent = representative(K: Values[K].Parent);
16708 return K;
16709 }
16710 };
16711
16712 /// An object for which we can track unsequenced uses.
16713 using Object = const NamedDecl *;
16714
16715 /// Different flavors of object usage which we track. We only track the
16716 /// least-sequenced usage of each kind.
16717 enum UsageKind {
16718 /// A read of an object. Multiple unsequenced reads are OK.
16719 UK_Use,
16720
16721 /// A modification of an object which is sequenced before the value
16722 /// computation of the expression, such as ++n in C++.
16723 UK_ModAsValue,
16724
16725 /// A modification of an object which is not sequenced before the value
16726 /// computation of the expression, such as n++.
16727 UK_ModAsSideEffect,
16728
16729 UK_Count = UK_ModAsSideEffect + 1
16730 };
16731
16732 /// Bundle together a sequencing region and the expression corresponding
16733 /// to a specific usage. One Usage is stored for each usage kind in UsageInfo.
16734 struct Usage {
16735 const Expr *UsageExpr = nullptr;
16736 SequenceTree::Seq Seq;
16737
16738 Usage() = default;
16739 };
16740
16741 struct UsageInfo {
16742 Usage Uses[UK_Count];
16743
16744 /// Have we issued a diagnostic for this object already?
16745 bool Diagnosed = false;
16746
16747 UsageInfo();
16748 };
16749 using UsageInfoMap = llvm::SmallDenseMap<Object, UsageInfo, 16>;
16750
16751 Sema &SemaRef;
16752
16753 /// Sequenced regions within the expression.
16754 SequenceTree Tree;
16755
16756 /// Declaration modifications and references which we have seen.
16757 UsageInfoMap UsageMap;
16758
16759 /// The region we are currently within.
16760 SequenceTree::Seq Region;
16761
16762 /// Filled in with declarations which were modified as a side-effect
16763 /// (that is, post-increment operations).
16764 SmallVectorImpl<std::pair<Object, Usage>> *ModAsSideEffect = nullptr;
16765
16766 /// Expressions to check later. We defer checking these to reduce
16767 /// stack usage.
16768 SmallVectorImpl<const Expr *> &WorkList;
16769
16770 /// RAII object wrapping the visitation of a sequenced subexpression of an
16771 /// expression. At the end of this process, the side-effects of the evaluation
16772 /// become sequenced with respect to the value computation of the result, so
16773 /// we downgrade any UK_ModAsSideEffect within the evaluation to
16774 /// UK_ModAsValue.
16775 struct SequencedSubexpression {
16776 SequencedSubexpression(SequenceChecker &Self)
16777 : Self(Self), OldModAsSideEffect(Self.ModAsSideEffect) {
16778 Self.ModAsSideEffect = &ModAsSideEffect;
16779 }
16780
16781 ~SequencedSubexpression() {
16782 for (const std::pair<Object, Usage> &M : llvm::reverse(C&: ModAsSideEffect)) {
16783 // Add a new usage with usage kind UK_ModAsValue, and then restore
16784 // the previous usage with UK_ModAsSideEffect (thus clearing it if
16785 // the previous one was empty).
16786 UsageInfo &UI = Self.UsageMap[M.first];
16787 auto &SideEffectUsage = UI.Uses[UK_ModAsSideEffect];
16788 Self.addUsage(O: M.first, UI, UsageExpr: SideEffectUsage.UsageExpr, UK: UK_ModAsValue);
16789 SideEffectUsage = M.second;
16790 }
16791 Self.ModAsSideEffect = OldModAsSideEffect;
16792 }
16793
16794 SequenceChecker &Self;
16795 SmallVector<std::pair<Object, Usage>, 4> ModAsSideEffect;
16796 SmallVectorImpl<std::pair<Object, Usage>> *OldModAsSideEffect;
16797 };
16798
16799 /// RAII object wrapping the visitation of a subexpression which we might
16800 /// choose to evaluate as a constant. If any subexpression is evaluated and
16801 /// found to be non-constant, this allows us to suppress the evaluation of
16802 /// the outer expression.
16803 class EvaluationTracker {
16804 public:
16805 EvaluationTracker(SequenceChecker &Self)
16806 : Self(Self), Prev(Self.EvalTracker) {
16807 Self.EvalTracker = this;
16808 }
16809
16810 ~EvaluationTracker() {
16811 Self.EvalTracker = Prev;
16812 if (Prev)
16813 Prev->EvalOK &= EvalOK;
16814 }
16815
16816 bool evaluate(const Expr *E, bool &Result) {
16817 if (!EvalOK || E->isValueDependent())
16818 return false;
16819 EvalOK = E->EvaluateAsBooleanCondition(
16820 Result, Ctx: Self.SemaRef.Context,
16821 InConstantContext: Self.SemaRef.isConstantEvaluatedContext());
16822 return EvalOK;
16823 }
16824
16825 private:
16826 SequenceChecker &Self;
16827 EvaluationTracker *Prev;
16828 bool EvalOK = true;
16829 } *EvalTracker = nullptr;
16830
16831 /// Find the object which is produced by the specified expression,
16832 /// if any.
16833 Object getObject(const Expr *E, bool Mod) const {
16834 E = E->IgnoreParenCasts();
16835 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(Val: E)) {
16836 if (Mod && (UO->getOpcode() == UO_PreInc || UO->getOpcode() == UO_PreDec))
16837 return getObject(E: UO->getSubExpr(), Mod);
16838 } else if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(Val: E)) {
16839 if (BO->getOpcode() == BO_Comma)
16840 return getObject(E: BO->getRHS(), Mod);
16841 if (Mod && BO->isAssignmentOp())
16842 return getObject(E: BO->getLHS(), Mod);
16843 } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(Val: E)) {
16844 // FIXME: Check for more interesting cases, like "x.n = ++x.n".
16845 if (isa<CXXThisExpr>(Val: ME->getBase()->IgnoreParenCasts()))
16846 return ME->getMemberDecl();
16847 } else if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Val: E))
16848 // FIXME: If this is a reference, map through to its value.
16849 return DRE->getDecl();
16850 return nullptr;
16851 }
16852
16853 /// Note that an object \p O was modified or used by an expression
16854 /// \p UsageExpr with usage kind \p UK. \p UI is the \p UsageInfo for
16855 /// the object \p O as obtained via the \p UsageMap.
16856 void addUsage(Object O, UsageInfo &UI, const Expr *UsageExpr, UsageKind UK) {
16857 // Get the old usage for the given object and usage kind.
16858 Usage &U = UI.Uses[UK];
16859 if (!U.UsageExpr || !Tree.isUnsequenced(Cur: Region, Old: U.Seq)) {
16860 // If we have a modification as side effect and are in a sequenced
16861 // subexpression, save the old Usage so that we can restore it later
16862 // in SequencedSubexpression::~SequencedSubexpression.
16863 if (UK == UK_ModAsSideEffect && ModAsSideEffect)
16864 ModAsSideEffect->push_back(Elt: std::make_pair(x&: O, y&: U));
16865 // Then record the new usage with the current sequencing region.
16866 U.UsageExpr = UsageExpr;
16867 U.Seq = Region;
16868 }
16869 }
16870
16871 /// Check whether a modification or use of an object \p O in an expression
16872 /// \p UsageExpr conflicts with a prior usage of kind \p OtherKind. \p UI is
16873 /// the \p UsageInfo for the object \p O as obtained via the \p UsageMap.
16874 /// \p IsModMod is true when we are checking for a mod-mod unsequenced
16875 /// usage and false we are checking for a mod-use unsequenced usage.
16876 void checkUsage(Object O, UsageInfo &UI, const Expr *UsageExpr,
16877 UsageKind OtherKind, bool IsModMod) {
16878 if (UI.Diagnosed)
16879 return;
16880
16881 const Usage &U = UI.Uses[OtherKind];
16882 if (!U.UsageExpr || !Tree.isUnsequenced(Cur: Region, Old: U.Seq))
16883 return;
16884
16885 const Expr *Mod = U.UsageExpr;
16886 const Expr *ModOrUse = UsageExpr;
16887 if (OtherKind == UK_Use)
16888 std::swap(a&: Mod, b&: ModOrUse);
16889
16890 SemaRef.DiagRuntimeBehavior(
16891 Mod->getExprLoc(), {Mod, ModOrUse},
16892 SemaRef.PDiag(IsModMod ? diag::warn_unsequenced_mod_mod
16893 : diag::warn_unsequenced_mod_use)
16894 << O << SourceRange(ModOrUse->getExprLoc()));
16895 UI.Diagnosed = true;
16896 }
16897
16898 // A note on note{Pre, Post}{Use, Mod}:
16899 //
16900 // (It helps to follow the algorithm with an expression such as
16901 // "((++k)++, k) = k" or "k = (k++, k++)". Both contain unsequenced
16902 // operations before C++17 and both are well-defined in C++17).
16903 //
16904 // When visiting a node which uses/modify an object we first call notePreUse
16905 // or notePreMod before visiting its sub-expression(s). At this point the
16906 // children of the current node have not yet been visited and so the eventual
16907 // uses/modifications resulting from the children of the current node have not
16908 // been recorded yet.
16909 //
16910 // We then visit the children of the current node. After that notePostUse or
16911 // notePostMod is called. These will 1) detect an unsequenced modification
16912 // as side effect (as in "k++ + k") and 2) add a new usage with the
16913 // appropriate usage kind.
16914 //
16915 // We also have to be careful that some operation sequences modification as
16916 // side effect as well (for example: || or ,). To account for this we wrap
16917 // the visitation of such a sub-expression (for example: the LHS of || or ,)
16918 // with SequencedSubexpression. SequencedSubexpression is an RAII object
16919 // which record usages which are modifications as side effect, and then
16920 // downgrade them (or more accurately restore the previous usage which was a
16921 // modification as side effect) when exiting the scope of the sequenced
16922 // subexpression.
16923
16924 void notePreUse(Object O, const Expr *UseExpr) {
16925 UsageInfo &UI = UsageMap[O];
16926 // Uses conflict with other modifications.
16927 checkUsage(O, UI, UsageExpr: UseExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/false);
16928 }
16929
16930 void notePostUse(Object O, const Expr *UseExpr) {
16931 UsageInfo &UI = UsageMap[O];
16932 checkUsage(O, UI, UsageExpr: UseExpr, /*OtherKind=*/UK_ModAsSideEffect,
16933 /*IsModMod=*/false);
16934 addUsage(O, UI, UsageExpr: UseExpr, /*UsageKind=*/UK: UK_Use);
16935 }
16936
16937 void notePreMod(Object O, const Expr *ModExpr) {
16938 UsageInfo &UI = UsageMap[O];
16939 // Modifications conflict with other modifications and with uses.
16940 checkUsage(O, UI, UsageExpr: ModExpr, /*OtherKind=*/UK_ModAsValue, /*IsModMod=*/true);
16941 checkUsage(O, UI, UsageExpr: ModExpr, /*OtherKind=*/UK_Use, /*IsModMod=*/false);
16942 }
16943
16944 void notePostMod(Object O, const Expr *ModExpr, UsageKind UK) {
16945 UsageInfo &UI = UsageMap[O];
16946 checkUsage(O, UI, UsageExpr: ModExpr, /*OtherKind=*/UK_ModAsSideEffect,
16947 /*IsModMod=*/true);
16948 addUsage(O, UI, UsageExpr: ModExpr, /*UsageKind=*/UK);
16949 }
16950
16951public:
16952 SequenceChecker(Sema &S, const Expr *E,
16953 SmallVectorImpl<const Expr *> &WorkList)
16954 : Base(S.Context), SemaRef(S), Region(Tree.root()), WorkList(WorkList) {
16955 Visit(E);
16956 // Silence a -Wunused-private-field since WorkList is now unused.
16957 // TODO: Evaluate if it can be used, and if not remove it.
16958 (void)this->WorkList;
16959 }
16960
16961 void VisitStmt(const Stmt *S) {
16962 // Skip all statements which aren't expressions for now.
16963 }
16964
16965 void VisitExpr(const Expr *E) {
16966 // By default, just recurse to evaluated subexpressions.
16967 Base::VisitStmt(E);
16968 }
16969
16970 void VisitCoroutineSuspendExpr(const CoroutineSuspendExpr *CSE) {
16971 for (auto *Sub : CSE->children()) {
16972 const Expr *ChildExpr = dyn_cast_or_null<Expr>(Val: Sub);
16973 if (!ChildExpr)
16974 continue;
16975
16976 if (ChildExpr == CSE->getOperand())
16977 // Do not recurse over a CoroutineSuspendExpr's operand.
16978 // The operand is also a subexpression of getCommonExpr(), and
16979 // recursing into it directly could confuse object management
16980 // for the sake of sequence tracking.
16981 continue;
16982
16983 Visit(S: Sub);
16984 }
16985 }
16986
16987 void VisitCastExpr(const CastExpr *E) {
16988 Object O = Object();
16989 if (E->getCastKind() == CK_LValueToRValue)
16990 O = getObject(E: E->getSubExpr(), Mod: false);
16991
16992 if (O)
16993 notePreUse(O, E);
16994 VisitExpr(E);
16995 if (O)
16996 notePostUse(O, E);
16997 }
16998
16999 void VisitSequencedExpressions(const Expr *SequencedBefore,
17000 const Expr *SequencedAfter) {
17001 SequenceTree::Seq BeforeRegion = Tree.allocate(Parent: Region);
17002 SequenceTree::Seq AfterRegion = Tree.allocate(Parent: Region);
17003 SequenceTree::Seq OldRegion = Region;
17004
17005 {
17006 SequencedSubexpression SeqBefore(*this);
17007 Region = BeforeRegion;
17008 Visit(SequencedBefore);
17009 }
17010
17011 Region = AfterRegion;
17012 Visit(SequencedAfter);
17013
17014 Region = OldRegion;
17015
17016 Tree.merge(S: BeforeRegion);
17017 Tree.merge(S: AfterRegion);
17018 }
17019
17020 void VisitArraySubscriptExpr(const ArraySubscriptExpr *ASE) {
17021 // C++17 [expr.sub]p1:
17022 // The expression E1[E2] is identical (by definition) to *((E1)+(E2)). The
17023 // expression E1 is sequenced before the expression E2.
17024 if (SemaRef.getLangOpts().CPlusPlus17)
17025 VisitSequencedExpressions(SequencedBefore: ASE->getLHS(), SequencedAfter: ASE->getRHS());
17026 else {
17027 Visit(ASE->getLHS());
17028 Visit(ASE->getRHS());
17029 }
17030 }
17031
17032 void VisitBinPtrMemD(const BinaryOperator *BO) { VisitBinPtrMem(BO); }
17033 void VisitBinPtrMemI(const BinaryOperator *BO) { VisitBinPtrMem(BO); }
17034 void VisitBinPtrMem(const BinaryOperator *BO) {
17035 // C++17 [expr.mptr.oper]p4:
17036 // Abbreviating pm-expression.*cast-expression as E1.*E2, [...]
17037 // the expression E1 is sequenced before the expression E2.
17038 if (SemaRef.getLangOpts().CPlusPlus17)
17039 VisitSequencedExpressions(SequencedBefore: BO->getLHS(), SequencedAfter: BO->getRHS());
17040 else {
17041 Visit(BO->getLHS());
17042 Visit(BO->getRHS());
17043 }
17044 }
17045
17046 void VisitBinShl(const BinaryOperator *BO) { VisitBinShlShr(BO); }
17047 void VisitBinShr(const BinaryOperator *BO) { VisitBinShlShr(BO); }
17048 void VisitBinShlShr(const BinaryOperator *BO) {
17049 // C++17 [expr.shift]p4:
17050 // The expression E1 is sequenced before the expression E2.
17051 if (SemaRef.getLangOpts().CPlusPlus17)
17052 VisitSequencedExpressions(SequencedBefore: BO->getLHS(), SequencedAfter: BO->getRHS());
17053 else {
17054 Visit(BO->getLHS());
17055 Visit(BO->getRHS());
17056 }
17057 }
17058
17059 void VisitBinComma(const BinaryOperator *BO) {
17060 // C++11 [expr.comma]p1:
17061 // Every value computation and side effect associated with the left
17062 // expression is sequenced before every value computation and side
17063 // effect associated with the right expression.
17064 VisitSequencedExpressions(SequencedBefore: BO->getLHS(), SequencedAfter: BO->getRHS());
17065 }
17066
17067 void VisitBinAssign(const BinaryOperator *BO) {
17068 SequenceTree::Seq RHSRegion;
17069 SequenceTree::Seq LHSRegion;
17070 if (SemaRef.getLangOpts().CPlusPlus17) {
17071 RHSRegion = Tree.allocate(Parent: Region);
17072 LHSRegion = Tree.allocate(Parent: Region);
17073 } else {
17074 RHSRegion = Region;
17075 LHSRegion = Region;
17076 }
17077 SequenceTree::Seq OldRegion = Region;
17078
17079 // C++11 [expr.ass]p1:
17080 // [...] the assignment is sequenced after the value computation
17081 // of the right and left operands, [...]
17082 //
17083 // so check it before inspecting the operands and update the
17084 // map afterwards.
17085 Object O = getObject(E: BO->getLHS(), /*Mod=*/true);
17086 if (O)
17087 notePreMod(O, BO);
17088
17089 if (SemaRef.getLangOpts().CPlusPlus17) {
17090 // C++17 [expr.ass]p1:
17091 // [...] The right operand is sequenced before the left operand. [...]
17092 {
17093 SequencedSubexpression SeqBefore(*this);
17094 Region = RHSRegion;
17095 Visit(BO->getRHS());
17096 }
17097
17098 Region = LHSRegion;
17099 Visit(BO->getLHS());
17100
17101 if (O && isa<CompoundAssignOperator>(Val: BO))
17102 notePostUse(O, BO);
17103
17104 } else {
17105 // C++11 does not specify any sequencing between the LHS and RHS.
17106 Region = LHSRegion;
17107 Visit(BO->getLHS());
17108
17109 if (O && isa<CompoundAssignOperator>(Val: BO))
17110 notePostUse(O, BO);
17111
17112 Region = RHSRegion;
17113 Visit(BO->getRHS());
17114 }
17115
17116 // C++11 [expr.ass]p1:
17117 // the assignment is sequenced [...] before the value computation of the
17118 // assignment expression.
17119 // C11 6.5.16/3 has no such rule.
17120 Region = OldRegion;
17121 if (O)
17122 notePostMod(O, BO,
17123 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue
17124 : UK_ModAsSideEffect);
17125 if (SemaRef.getLangOpts().CPlusPlus17) {
17126 Tree.merge(S: RHSRegion);
17127 Tree.merge(S: LHSRegion);
17128 }
17129 }
17130
17131 void VisitCompoundAssignOperator(const CompoundAssignOperator *CAO) {
17132 VisitBinAssign(CAO);
17133 }
17134
17135 void VisitUnaryPreInc(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); }
17136 void VisitUnaryPreDec(const UnaryOperator *UO) { VisitUnaryPreIncDec(UO); }
17137 void VisitUnaryPreIncDec(const UnaryOperator *UO) {
17138 Object O = getObject(E: UO->getSubExpr(), Mod: true);
17139 if (!O)
17140 return VisitExpr(UO);
17141
17142 notePreMod(O, UO);
17143 Visit(UO->getSubExpr());
17144 // C++11 [expr.pre.incr]p1:
17145 // the expression ++x is equivalent to x+=1
17146 notePostMod(O, UO,
17147 SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue
17148 : UK_ModAsSideEffect);
17149 }
17150
17151 void VisitUnaryPostInc(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); }
17152 void VisitUnaryPostDec(const UnaryOperator *UO) { VisitUnaryPostIncDec(UO); }
17153 void VisitUnaryPostIncDec(const UnaryOperator *UO) {
17154 Object O = getObject(E: UO->getSubExpr(), Mod: true);
17155 if (!O)
17156 return VisitExpr(UO);
17157
17158 notePreMod(O, UO);
17159 Visit(UO->getSubExpr());
17160 notePostMod(O, UO, UK_ModAsSideEffect);
17161 }
17162
17163 void VisitBinLOr(const BinaryOperator *BO) {
17164 // C++11 [expr.log.or]p2:
17165 // If the second expression is evaluated, every value computation and
17166 // side effect associated with the first expression is sequenced before
17167 // every value computation and side effect associated with the
17168 // second expression.
17169 SequenceTree::Seq LHSRegion = Tree.allocate(Parent: Region);
17170 SequenceTree::Seq RHSRegion = Tree.allocate(Parent: Region);
17171 SequenceTree::Seq OldRegion = Region;
17172
17173 EvaluationTracker Eval(*this);
17174 {
17175 SequencedSubexpression Sequenced(*this);
17176 Region = LHSRegion;
17177 Visit(BO->getLHS());
17178 }
17179
17180 // C++11 [expr.log.or]p1:
17181 // [...] the second operand is not evaluated if the first operand
17182 // evaluates to true.
17183 bool EvalResult = false;
17184 bool EvalOK = Eval.evaluate(E: BO->getLHS(), Result&: EvalResult);
17185 bool ShouldVisitRHS = !EvalOK || !EvalResult;
17186 if (ShouldVisitRHS) {
17187 Region = RHSRegion;
17188 Visit(BO->getRHS());
17189 }
17190
17191 Region = OldRegion;
17192 Tree.merge(S: LHSRegion);
17193 Tree.merge(S: RHSRegion);
17194 }
17195
17196 void VisitBinLAnd(const BinaryOperator *BO) {
17197 // C++11 [expr.log.and]p2:
17198 // If the second expression is evaluated, every value computation and
17199 // side effect associated with the first expression is sequenced before
17200 // every value computation and side effect associated with the
17201 // second expression.
17202 SequenceTree::Seq LHSRegion = Tree.allocate(Parent: Region);
17203 SequenceTree::Seq RHSRegion = Tree.allocate(Parent: Region);
17204 SequenceTree::Seq OldRegion = Region;
17205
17206 EvaluationTracker Eval(*this);
17207 {
17208 SequencedSubexpression Sequenced(*this);
17209 Region = LHSRegion;
17210 Visit(BO->getLHS());
17211 }
17212
17213 // C++11 [expr.log.and]p1:
17214 // [...] the second operand is not evaluated if the first operand is false.
17215 bool EvalResult = false;
17216 bool EvalOK = Eval.evaluate(E: BO->getLHS(), Result&: EvalResult);
17217 bool ShouldVisitRHS = !EvalOK || EvalResult;
17218 if (ShouldVisitRHS) {
17219 Region = RHSRegion;
17220 Visit(BO->getRHS());
17221 }
17222
17223 Region = OldRegion;
17224 Tree.merge(S: LHSRegion);
17225 Tree.merge(S: RHSRegion);
17226 }
17227
17228 void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO) {
17229 // C++11 [expr.cond]p1:
17230 // [...] Every value computation and side effect associated with the first
17231 // expression is sequenced before every value computation and side effect
17232 // associated with the second or third expression.
17233 SequenceTree::Seq ConditionRegion = Tree.allocate(Parent: Region);
17234
17235 // No sequencing is specified between the true and false expression.
17236 // However since exactly one of both is going to be evaluated we can
17237 // consider them to be sequenced. This is needed to avoid warning on
17238 // something like "x ? y+= 1 : y += 2;" in the case where we will visit
17239 // both the true and false expressions because we can't evaluate x.
17240 // This will still allow us to detect an expression like (pre C++17)
17241 // "(x ? y += 1 : y += 2) = y".
17242 //
17243 // We don't wrap the visitation of the true and false expression with
17244 // SequencedSubexpression because we don't want to downgrade modifications
17245 // as side effect in the true and false expressions after the visition
17246 // is done. (for example in the expression "(x ? y++ : y++) + y" we should
17247 // not warn between the two "y++", but we should warn between the "y++"
17248 // and the "y".
17249 SequenceTree::Seq TrueRegion = Tree.allocate(Parent: Region);
17250 SequenceTree::Seq FalseRegion = Tree.allocate(Parent: Region);
17251 SequenceTree::Seq OldRegion = Region;
17252
17253 EvaluationTracker Eval(*this);
17254 {
17255 SequencedSubexpression Sequenced(*this);
17256 Region = ConditionRegion;
17257 Visit(CO->getCond());
17258 }
17259
17260 // C++11 [expr.cond]p1:
17261 // [...] The first expression is contextually converted to bool (Clause 4).
17262 // It is evaluated and if it is true, the result of the conditional
17263 // expression is the value of the second expression, otherwise that of the
17264 // third expression. Only one of the second and third expressions is
17265 // evaluated. [...]
17266 bool EvalResult = false;
17267 bool EvalOK = Eval.evaluate(E: CO->getCond(), Result&: EvalResult);
17268 bool ShouldVisitTrueExpr = !EvalOK || EvalResult;
17269 bool ShouldVisitFalseExpr = !EvalOK || !EvalResult;
17270 if (ShouldVisitTrueExpr) {
17271 Region = TrueRegion;
17272 Visit(CO->getTrueExpr());
17273 }
17274 if (ShouldVisitFalseExpr) {
17275 Region = FalseRegion;
17276 Visit(CO->getFalseExpr());
17277 }
17278
17279 Region = OldRegion;
17280 Tree.merge(S: ConditionRegion);
17281 Tree.merge(S: TrueRegion);
17282 Tree.merge(S: FalseRegion);
17283 }
17284
17285 void VisitCallExpr(const CallExpr *CE) {
17286 // FIXME: CXXNewExpr and CXXDeleteExpr implicitly call functions.
17287
17288 if (CE->isUnevaluatedBuiltinCall(Ctx: Context))
17289 return;
17290
17291 // C++11 [intro.execution]p15:
17292 // When calling a function [...], every value computation and side effect
17293 // associated with any argument expression, or with the postfix expression
17294 // designating the called function, is sequenced before execution of every
17295 // expression or statement in the body of the function [and thus before
17296 // the value computation of its result].
17297 SequencedSubexpression Sequenced(*this);
17298 SemaRef.runWithSufficientStackSpace(Loc: CE->getExprLoc(), Fn: [&] {
17299 // C++17 [expr.call]p5
17300 // The postfix-expression is sequenced before each expression in the
17301 // expression-list and any default argument. [...]
17302 SequenceTree::Seq CalleeRegion;
17303 SequenceTree::Seq OtherRegion;
17304 if (SemaRef.getLangOpts().CPlusPlus17) {
17305 CalleeRegion = Tree.allocate(Parent: Region);
17306 OtherRegion = Tree.allocate(Parent: Region);
17307 } else {
17308 CalleeRegion = Region;
17309 OtherRegion = Region;
17310 }
17311 SequenceTree::Seq OldRegion = Region;
17312
17313 // Visit the callee expression first.
17314 Region = CalleeRegion;
17315 if (SemaRef.getLangOpts().CPlusPlus17) {
17316 SequencedSubexpression Sequenced(*this);
17317 Visit(CE->getCallee());
17318 } else {
17319 Visit(CE->getCallee());
17320 }
17321
17322 // Then visit the argument expressions.
17323 Region = OtherRegion;
17324 for (const Expr *Argument : CE->arguments())
17325 Visit(Argument);
17326
17327 Region = OldRegion;
17328 if (SemaRef.getLangOpts().CPlusPlus17) {
17329 Tree.merge(S: CalleeRegion);
17330 Tree.merge(S: OtherRegion);
17331 }
17332 });
17333 }
17334
17335 void VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *CXXOCE) {
17336 // C++17 [over.match.oper]p2:
17337 // [...] the operator notation is first transformed to the equivalent
17338 // function-call notation as summarized in Table 12 (where @ denotes one
17339 // of the operators covered in the specified subclause). However, the
17340 // operands are sequenced in the order prescribed for the built-in
17341 // operator (Clause 8).
17342 //
17343 // From the above only overloaded binary operators and overloaded call
17344 // operators have sequencing rules in C++17 that we need to handle
17345 // separately.
17346 if (!SemaRef.getLangOpts().CPlusPlus17 ||
17347 (CXXOCE->getNumArgs() != 2 && CXXOCE->getOperator() != OO_Call))
17348 return VisitCallExpr(CXXOCE);
17349
17350 enum {
17351 NoSequencing,
17352 LHSBeforeRHS,
17353 RHSBeforeLHS,
17354 LHSBeforeRest
17355 } SequencingKind;
17356 switch (CXXOCE->getOperator()) {
17357 case OO_Equal:
17358 case OO_PlusEqual:
17359 case OO_MinusEqual:
17360 case OO_StarEqual:
17361 case OO_SlashEqual:
17362 case OO_PercentEqual:
17363 case OO_CaretEqual:
17364 case OO_AmpEqual:
17365 case OO_PipeEqual:
17366 case OO_LessLessEqual:
17367 case OO_GreaterGreaterEqual:
17368 SequencingKind = RHSBeforeLHS;
17369 break;
17370
17371 case OO_LessLess:
17372 case OO_GreaterGreater:
17373 case OO_AmpAmp:
17374 case OO_PipePipe:
17375 case OO_Comma:
17376 case OO_ArrowStar:
17377 case OO_Subscript:
17378 SequencingKind = LHSBeforeRHS;
17379 break;
17380
17381 case OO_Call:
17382 SequencingKind = LHSBeforeRest;
17383 break;
17384
17385 default:
17386 SequencingKind = NoSequencing;
17387 break;
17388 }
17389
17390 if (SequencingKind == NoSequencing)
17391 return VisitCallExpr(CXXOCE);
17392
17393 // This is a call, so all subexpressions are sequenced before the result.
17394 SequencedSubexpression Sequenced(*this);
17395
17396 SemaRef.runWithSufficientStackSpace(CXXOCE->getExprLoc(), [&] {
17397 assert(SemaRef.getLangOpts().CPlusPlus17 &&
17398 "Should only get there with C++17 and above!");
17399 assert((CXXOCE->getNumArgs() == 2 || CXXOCE->getOperator() == OO_Call) &&
17400 "Should only get there with an overloaded binary operator"
17401 " or an overloaded call operator!");
17402
17403 if (SequencingKind == LHSBeforeRest) {
17404 assert(CXXOCE->getOperator() == OO_Call &&
17405 "We should only have an overloaded call operator here!");
17406
17407 // This is very similar to VisitCallExpr, except that we only have the
17408 // C++17 case. The postfix-expression is the first argument of the
17409 // CXXOperatorCallExpr. The expressions in the expression-list, if any,
17410 // are in the following arguments.
17411 //
17412 // Note that we intentionally do not visit the callee expression since
17413 // it is just a decayed reference to a function.
17414 SequenceTree::Seq PostfixExprRegion = Tree.allocate(Parent: Region);
17415 SequenceTree::Seq ArgsRegion = Tree.allocate(Parent: Region);
17416 SequenceTree::Seq OldRegion = Region;
17417
17418 assert(CXXOCE->getNumArgs() >= 1 &&
17419 "An overloaded call operator must have at least one argument"
17420 " for the postfix-expression!");
17421 const Expr *PostfixExpr = CXXOCE->getArgs()[0];
17422 llvm::ArrayRef<const Expr *> Args(CXXOCE->getArgs() + 1,
17423 CXXOCE->getNumArgs() - 1);
17424
17425 // Visit the postfix-expression first.
17426 {
17427 Region = PostfixExprRegion;
17428 SequencedSubexpression Sequenced(*this);
17429 Visit(PostfixExpr);
17430 }
17431
17432 // Then visit the argument expressions.
17433 Region = ArgsRegion;
17434 for (const Expr *Arg : Args)
17435 Visit(Arg);
17436
17437 Region = OldRegion;
17438 Tree.merge(S: PostfixExprRegion);
17439 Tree.merge(S: ArgsRegion);
17440 } else {
17441 assert(CXXOCE->getNumArgs() == 2 &&
17442 "Should only have two arguments here!");
17443 assert((SequencingKind == LHSBeforeRHS ||
17444 SequencingKind == RHSBeforeLHS) &&
17445 "Unexpected sequencing kind!");
17446
17447 // We do not visit the callee expression since it is just a decayed
17448 // reference to a function.
17449 const Expr *E1 = CXXOCE->getArg(0);
17450 const Expr *E2 = CXXOCE->getArg(1);
17451 if (SequencingKind == RHSBeforeLHS)
17452 std::swap(a&: E1, b&: E2);
17453
17454 return VisitSequencedExpressions(E1, E2);
17455 }
17456 });
17457 }
17458
17459 void VisitCXXConstructExpr(const CXXConstructExpr *CCE) {
17460 // This is a call, so all subexpressions are sequenced before the result.
17461 SequencedSubexpression Sequenced(*this);
17462
17463 if (!CCE->isListInitialization())
17464 return VisitExpr(CCE);
17465
17466 // In C++11, list initializations are sequenced.
17467 SmallVector<SequenceTree::Seq, 32> Elts;
17468 SequenceTree::Seq Parent = Region;
17469 for (CXXConstructExpr::const_arg_iterator I = CCE->arg_begin(),
17470 E = CCE->arg_end();
17471 I != E; ++I) {
17472 Region = Tree.allocate(Parent);
17473 Elts.push_back(Region);
17474 Visit(*I);
17475 }
17476
17477 // Forget that the initializers are sequenced.
17478 Region = Parent;
17479 for (unsigned I = 0; I < Elts.size(); ++I)
17480 Tree.merge(S: Elts[I]);
17481 }
17482
17483 void VisitInitListExpr(const InitListExpr *ILE) {
17484 if (!SemaRef.getLangOpts().CPlusPlus11)
17485 return VisitExpr(ILE);
17486
17487 // In C++11, list initializations are sequenced.
17488 SmallVector<SequenceTree::Seq, 32> Elts;
17489 SequenceTree::Seq Parent = Region;
17490 for (unsigned I = 0; I < ILE->getNumInits(); ++I) {
17491 const Expr *E = ILE->getInit(Init: I);
17492 if (!E)
17493 continue;
17494 Region = Tree.allocate(Parent);
17495 Elts.push_back(Elt: Region);
17496 Visit(E);
17497 }
17498
17499 // Forget that the initializers are sequenced.
17500 Region = Parent;
17501 for (unsigned I = 0; I < Elts.size(); ++I)
17502 Tree.merge(S: Elts[I]);
17503 }
17504};
17505
17506SequenceChecker::UsageInfo::UsageInfo() = default;
17507
17508} // namespace
17509
17510void Sema::CheckUnsequencedOperations(const Expr *E) {
17511 SmallVector<const Expr *, 8> WorkList;
17512 WorkList.push_back(Elt: E);
17513 while (!WorkList.empty()) {
17514 const Expr *Item = WorkList.pop_back_val();
17515 SequenceChecker(*this, Item, WorkList);
17516 }
17517}
17518
17519void Sema::CheckCompletedExpr(Expr *E, SourceLocation CheckLoc,
17520 bool IsConstexpr) {
17521 llvm::SaveAndRestore ConstantContext(isConstantEvaluatedOverride,
17522 IsConstexpr || isa<ConstantExpr>(Val: E));
17523 CheckImplicitConversions(E, CC: CheckLoc);
17524 if (!E->isInstantiationDependent())
17525 CheckUnsequencedOperations(E);
17526 if (!IsConstexpr && !E->isValueDependent())
17527 CheckForIntOverflow(E);
17528 DiagnoseMisalignedMembers();
17529}
17530
17531void Sema::CheckBitFieldInitialization(SourceLocation InitLoc,
17532 FieldDecl *BitField,
17533 Expr *Init) {
17534 (void) AnalyzeBitFieldAssignment(S&: *this, Bitfield: BitField, Init, InitLoc);
17535}
17536
17537static void diagnoseArrayStarInParamType(Sema &S, QualType PType,
17538 SourceLocation Loc) {
17539 if (!PType->isVariablyModifiedType())
17540 return;
17541 if (const auto *PointerTy = dyn_cast<PointerType>(Val&: PType)) {
17542 diagnoseArrayStarInParamType(S, PType: PointerTy->getPointeeType(), Loc);
17543 return;
17544 }
17545 if (const auto *ReferenceTy = dyn_cast<ReferenceType>(Val&: PType)) {
17546 diagnoseArrayStarInParamType(S, PType: ReferenceTy->getPointeeType(), Loc);
17547 return;
17548 }
17549 if (const auto *ParenTy = dyn_cast<ParenType>(Val&: PType)) {
17550 diagnoseArrayStarInParamType(S, PType: ParenTy->getInnerType(), Loc);
17551 return;
17552 }
17553
17554 const ArrayType *AT = S.Context.getAsArrayType(T: PType);
17555 if (!AT)
17556 return;
17557
17558 if (AT->getSizeModifier() != ArraySizeModifier::Star) {
17559 diagnoseArrayStarInParamType(S, PType: AT->getElementType(), Loc);
17560 return;
17561 }
17562
17563 S.Diag(Loc, diag::err_array_star_in_function_definition);
17564}
17565
17566/// CheckParmsForFunctionDef - Check that the parameters of the given
17567/// function are appropriate for the definition of a function. This
17568/// takes care of any checks that cannot be performed on the
17569/// declaration itself, e.g., that the types of each of the function
17570/// parameters are complete.
17571bool Sema::CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
17572 bool CheckParameterNames) {
17573 bool HasInvalidParm = false;
17574 for (ParmVarDecl *Param : Parameters) {
17575 assert(Param && "null in a parameter list");
17576 // C99 6.7.5.3p4: the parameters in a parameter type list in a
17577 // function declarator that is part of a function definition of
17578 // that function shall not have incomplete type.
17579 //
17580 // C++23 [dcl.fct.def.general]/p2
17581 // The type of a parameter [...] for a function definition
17582 // shall not be a (possibly cv-qualified) class type that is incomplete
17583 // or abstract within the function body unless the function is deleted.
17584 if (!Param->isInvalidDecl() &&
17585 (RequireCompleteType(Param->getLocation(), Param->getType(),
17586 diag::err_typecheck_decl_incomplete_type) ||
17587 RequireNonAbstractType(Param->getBeginLoc(), Param->getOriginalType(),
17588 diag::err_abstract_type_in_decl,
17589 AbstractParamType))) {
17590 Param->setInvalidDecl();
17591 HasInvalidParm = true;
17592 }
17593
17594 // C99 6.9.1p5: If the declarator includes a parameter type list, the
17595 // declaration of each parameter shall include an identifier.
17596 if (CheckParameterNames && Param->getIdentifier() == nullptr &&
17597 !Param->isImplicit() && !getLangOpts().CPlusPlus) {
17598 // Diagnose this as an extension in C17 and earlier.
17599 if (!getLangOpts().C23)
17600 Diag(Param->getLocation(), diag::ext_parameter_name_omitted_c23);
17601 }
17602
17603 // C99 6.7.5.3p12:
17604 // If the function declarator is not part of a definition of that
17605 // function, parameters may have incomplete type and may use the [*]
17606 // notation in their sequences of declarator specifiers to specify
17607 // variable length array types.
17608 QualType PType = Param->getOriginalType();
17609 // FIXME: This diagnostic should point the '[*]' if source-location
17610 // information is added for it.
17611 diagnoseArrayStarInParamType(*this, PType, Param->getLocation());
17612
17613 // If the parameter is a c++ class type and it has to be destructed in the
17614 // callee function, declare the destructor so that it can be called by the
17615 // callee function. Do not perform any direct access check on the dtor here.
17616 if (!Param->isInvalidDecl()) {
17617 if (CXXRecordDecl *ClassDecl = Param->getType()->getAsCXXRecordDecl()) {
17618 if (!ClassDecl->isInvalidDecl() &&
17619 !ClassDecl->hasIrrelevantDestructor() &&
17620 !ClassDecl->isDependentContext() &&
17621 ClassDecl->isParamDestroyedInCallee()) {
17622 CXXDestructorDecl *Destructor = LookupDestructor(Class: ClassDecl);
17623 MarkFunctionReferenced(Loc: Param->getLocation(), Func: Destructor);
17624 DiagnoseUseOfDecl(D: Destructor, Locs: Param->getLocation());
17625 }
17626 }
17627 }
17628
17629 // Parameters with the pass_object_size attribute only need to be marked
17630 // constant at function definitions. Because we lack information about
17631 // whether we're on a declaration or definition when we're instantiating the
17632 // attribute, we need to check for constness here.
17633 if (const auto *Attr = Param->getAttr<PassObjectSizeAttr>())
17634 if (!Param->getType().isConstQualified())
17635 Diag(Param->getLocation(), diag::err_attribute_pointers_only)
17636 << Attr->getSpelling() << 1;
17637
17638 // Check for parameter names shadowing fields from the class.
17639 if (LangOpts.CPlusPlus && !Param->isInvalidDecl()) {
17640 // The owning context for the parameter should be the function, but we
17641 // want to see if this function's declaration context is a record.
17642 DeclContext *DC = Param->getDeclContext();
17643 if (DC && DC->isFunctionOrMethod()) {
17644 if (auto *RD = dyn_cast<CXXRecordDecl>(DC->getParent()))
17645 CheckShadowInheritedFields(Loc: Param->getLocation(), FieldName: Param->getDeclName(),
17646 RD: RD, /*DeclIsField*/ false);
17647 }
17648 }
17649
17650 if (!Param->isInvalidDecl() &&
17651 Param->getOriginalType()->isWebAssemblyTableType()) {
17652 Param->setInvalidDecl();
17653 HasInvalidParm = true;
17654 Diag(Param->getLocation(), diag::err_wasm_table_as_function_parameter);
17655 }
17656 }
17657
17658 return HasInvalidParm;
17659}
17660
17661std::optional<std::pair<
17662 CharUnits, CharUnits>> static getBaseAlignmentAndOffsetFromPtr(const Expr
17663 *E,
17664 ASTContext
17665 &Ctx);
17666
17667/// Compute the alignment and offset of the base class object given the
17668/// derived-to-base cast expression and the alignment and offset of the derived
17669/// class object.
17670static std::pair<CharUnits, CharUnits>
17671getDerivedToBaseAlignmentAndOffset(const CastExpr *CE, QualType DerivedType,
17672 CharUnits BaseAlignment, CharUnits Offset,
17673 ASTContext &Ctx) {
17674 for (auto PathI = CE->path_begin(), PathE = CE->path_end(); PathI != PathE;
17675 ++PathI) {
17676 const CXXBaseSpecifier *Base = *PathI;
17677 const CXXRecordDecl *BaseDecl = Base->getType()->getAsCXXRecordDecl();
17678 if (Base->isVirtual()) {
17679 // The complete object may have a lower alignment than the non-virtual
17680 // alignment of the base, in which case the base may be misaligned. Choose
17681 // the smaller of the non-virtual alignment and BaseAlignment, which is a
17682 // conservative lower bound of the complete object alignment.
17683 CharUnits NonVirtualAlignment =
17684 Ctx.getASTRecordLayout(BaseDecl).getNonVirtualAlignment();
17685 BaseAlignment = std::min(a: BaseAlignment, b: NonVirtualAlignment);
17686 Offset = CharUnits::Zero();
17687 } else {
17688 const ASTRecordLayout &RL =
17689 Ctx.getASTRecordLayout(DerivedType->getAsCXXRecordDecl());
17690 Offset += RL.getBaseClassOffset(Base: BaseDecl);
17691 }
17692 DerivedType = Base->getType();
17693 }
17694
17695 return std::make_pair(x&: BaseAlignment, y&: Offset);
17696}
17697
17698/// Compute the alignment and offset of a binary additive operator.
17699static std::optional<std::pair<CharUnits, CharUnits>>
17700getAlignmentAndOffsetFromBinAddOrSub(const Expr *PtrE, const Expr *IntE,
17701 bool IsSub, ASTContext &Ctx) {
17702 QualType PointeeType = PtrE->getType()->getPointeeType();
17703
17704 if (!PointeeType->isConstantSizeType())
17705 return std::nullopt;
17706
17707 auto P = getBaseAlignmentAndOffsetFromPtr(E: PtrE, Ctx);
17708
17709 if (!P)
17710 return std::nullopt;
17711
17712 CharUnits EltSize = Ctx.getTypeSizeInChars(T: PointeeType);
17713 if (std::optional<llvm::APSInt> IdxRes = IntE->getIntegerConstantExpr(Ctx)) {
17714 CharUnits Offset = EltSize * IdxRes->getExtValue();
17715 if (IsSub)
17716 Offset = -Offset;
17717 return std::make_pair(x&: P->first, y: P->second + Offset);
17718 }
17719
17720 // If the integer expression isn't a constant expression, compute the lower
17721 // bound of the alignment using the alignment and offset of the pointer
17722 // expression and the element size.
17723 return std::make_pair(
17724 x: P->first.alignmentAtOffset(offset: P->second).alignmentAtOffset(offset: EltSize),
17725 y: CharUnits::Zero());
17726}
17727
17728/// This helper function takes an lvalue expression and returns the alignment of
17729/// a VarDecl and a constant offset from the VarDecl.
17730std::optional<std::pair<
17731 CharUnits,
17732 CharUnits>> static getBaseAlignmentAndOffsetFromLValue(const Expr *E,
17733 ASTContext &Ctx) {
17734 E = E->IgnoreParens();
17735 switch (E->getStmtClass()) {
17736 default:
17737 break;
17738 case Stmt::CStyleCastExprClass:
17739 case Stmt::CXXStaticCastExprClass:
17740 case Stmt::ImplicitCastExprClass: {
17741 auto *CE = cast<CastExpr>(Val: E);
17742 const Expr *From = CE->getSubExpr();
17743 switch (CE->getCastKind()) {
17744 default:
17745 break;
17746 case CK_NoOp:
17747 return getBaseAlignmentAndOffsetFromLValue(E: From, Ctx);
17748 case CK_UncheckedDerivedToBase:
17749 case CK_DerivedToBase: {
17750 auto P = getBaseAlignmentAndOffsetFromLValue(E: From, Ctx);
17751 if (!P)
17752 break;
17753 return getDerivedToBaseAlignmentAndOffset(CE, DerivedType: From->getType(), BaseAlignment: P->first,
17754 Offset: P->second, Ctx);
17755 }
17756 }
17757 break;
17758 }
17759 case Stmt::ArraySubscriptExprClass: {
17760 auto *ASE = cast<ArraySubscriptExpr>(Val: E);
17761 return getAlignmentAndOffsetFromBinAddOrSub(PtrE: ASE->getBase(), IntE: ASE->getIdx(),
17762 IsSub: false, Ctx);
17763 }
17764 case Stmt::DeclRefExprClass: {
17765 if (auto *VD = dyn_cast<VarDecl>(Val: cast<DeclRefExpr>(Val: E)->getDecl())) {
17766 // FIXME: If VD is captured by copy or is an escaping __block variable,
17767 // use the alignment of VD's type.
17768 if (!VD->getType()->isReferenceType()) {
17769 // Dependent alignment cannot be resolved -> bail out.
17770 if (VD->hasDependentAlignment())
17771 break;
17772 return std::make_pair(x: Ctx.getDeclAlign(VD), y: CharUnits::Zero());
17773 }
17774 if (VD->hasInit())
17775 return getBaseAlignmentAndOffsetFromLValue(E: VD->getInit(), Ctx);
17776 }
17777 break;
17778 }
17779 case Stmt::MemberExprClass: {
17780 auto *ME = cast<MemberExpr>(Val: E);
17781 auto *FD = dyn_cast<FieldDecl>(Val: ME->getMemberDecl());
17782 if (!FD || FD->getType()->isReferenceType() ||
17783 FD->getParent()->isInvalidDecl())
17784 break;
17785 std::optional<std::pair<CharUnits, CharUnits>> P;
17786 if (ME->isArrow())
17787 P = getBaseAlignmentAndOffsetFromPtr(E: ME->getBase(), Ctx);
17788 else
17789 P = getBaseAlignmentAndOffsetFromLValue(E: ME->getBase(), Ctx);
17790 if (!P)
17791 break;
17792 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(D: FD->getParent());
17793 uint64_t Offset = Layout.getFieldOffset(FieldNo: FD->getFieldIndex());
17794 return std::make_pair(x&: P->first,
17795 y: P->second + CharUnits::fromQuantity(Quantity: Offset));
17796 }
17797 case Stmt::UnaryOperatorClass: {
17798 auto *UO = cast<UnaryOperator>(Val: E);
17799 switch (UO->getOpcode()) {
17800 default:
17801 break;
17802 case UO_Deref:
17803 return getBaseAlignmentAndOffsetFromPtr(E: UO->getSubExpr(), Ctx);
17804 }
17805 break;
17806 }
17807 case Stmt::BinaryOperatorClass: {
17808 auto *BO = cast<BinaryOperator>(Val: E);
17809 auto Opcode = BO->getOpcode();
17810 switch (Opcode) {
17811 default:
17812 break;
17813 case BO_Comma:
17814 return getBaseAlignmentAndOffsetFromLValue(E: BO->getRHS(), Ctx);
17815 }
17816 break;
17817 }
17818 }
17819 return std::nullopt;
17820}
17821
17822/// This helper function takes a pointer expression and returns the alignment of
17823/// a VarDecl and a constant offset from the VarDecl.
17824std::optional<std::pair<
17825 CharUnits, CharUnits>> static getBaseAlignmentAndOffsetFromPtr(const Expr
17826 *E,
17827 ASTContext
17828 &Ctx) {
17829 E = E->IgnoreParens();
17830 switch (E->getStmtClass()) {
17831 default:
17832 break;
17833 case Stmt::CStyleCastExprClass:
17834 case Stmt::CXXStaticCastExprClass:
17835 case Stmt::ImplicitCastExprClass: {
17836 auto *CE = cast<CastExpr>(Val: E);
17837 const Expr *From = CE->getSubExpr();
17838 switch (CE->getCastKind()) {
17839 default:
17840 break;
17841 case CK_NoOp:
17842 return getBaseAlignmentAndOffsetFromPtr(E: From, Ctx);
17843 case CK_ArrayToPointerDecay:
17844 return getBaseAlignmentAndOffsetFromLValue(E: From, Ctx);
17845 case CK_UncheckedDerivedToBase:
17846 case CK_DerivedToBase: {
17847 auto P = getBaseAlignmentAndOffsetFromPtr(E: From, Ctx);
17848 if (!P)
17849 break;
17850 return getDerivedToBaseAlignmentAndOffset(
17851 CE, DerivedType: From->getType()->getPointeeType(), BaseAlignment: P->first, Offset: P->second, Ctx);
17852 }
17853 }
17854 break;
17855 }
17856 case Stmt::CXXThisExprClass: {
17857 auto *RD = E->getType()->getPointeeType()->getAsCXXRecordDecl();
17858 CharUnits Alignment = Ctx.getASTRecordLayout(RD).getNonVirtualAlignment();
17859 return std::make_pair(x&: Alignment, y: CharUnits::Zero());
17860 }
17861 case Stmt::UnaryOperatorClass: {
17862 auto *UO = cast<UnaryOperator>(Val: E);
17863 if (UO->getOpcode() == UO_AddrOf)
17864 return getBaseAlignmentAndOffsetFromLValue(E: UO->getSubExpr(), Ctx);
17865 break;
17866 }
17867 case Stmt::BinaryOperatorClass: {
17868 auto *BO = cast<BinaryOperator>(Val: E);
17869 auto Opcode = BO->getOpcode();
17870 switch (Opcode) {
17871 default:
17872 break;
17873 case BO_Add:
17874 case BO_Sub: {
17875 const Expr *LHS = BO->getLHS(), *RHS = BO->getRHS();
17876 if (Opcode == BO_Add && !RHS->getType()->isIntegralOrEnumerationType())
17877 std::swap(a&: LHS, b&: RHS);
17878 return getAlignmentAndOffsetFromBinAddOrSub(PtrE: LHS, IntE: RHS, IsSub: Opcode == BO_Sub,
17879 Ctx);
17880 }
17881 case BO_Comma:
17882 return getBaseAlignmentAndOffsetFromPtr(E: BO->getRHS(), Ctx);
17883 }
17884 break;
17885 }
17886 }
17887 return std::nullopt;
17888}
17889
17890static CharUnits getPresumedAlignmentOfPointer(const Expr *E, Sema &S) {
17891 // See if we can compute the alignment of a VarDecl and an offset from it.
17892 std::optional<std::pair<CharUnits, CharUnits>> P =
17893 getBaseAlignmentAndOffsetFromPtr(E, Ctx&: S.Context);
17894
17895 if (P)
17896 return P->first.alignmentAtOffset(offset: P->second);
17897
17898 // If that failed, return the type's alignment.
17899 return S.Context.getTypeAlignInChars(T: E->getType()->getPointeeType());
17900}
17901
17902/// CheckCastAlign - Implements -Wcast-align, which warns when a
17903/// pointer cast increases the alignment requirements.
17904void Sema::CheckCastAlign(Expr *Op, QualType T, SourceRange TRange) {
17905 // This is actually a lot of work to potentially be doing on every
17906 // cast; don't do it if we're ignoring -Wcast_align (as is the default).
17907 if (getDiagnostics().isIgnored(diag::warn_cast_align, TRange.getBegin()))
17908 return;
17909
17910 // Ignore dependent types.
17911 if (T->isDependentType() || Op->getType()->isDependentType())
17912 return;
17913
17914 // Require that the destination be a pointer type.
17915 const PointerType *DestPtr = T->getAs<PointerType>();
17916 if (!DestPtr) return;
17917
17918 // If the destination has alignment 1, we're done.
17919 QualType DestPointee = DestPtr->getPointeeType();
17920 if (DestPointee->isIncompleteType()) return;
17921 CharUnits DestAlign = Context.getTypeAlignInChars(T: DestPointee);
17922 if (DestAlign.isOne()) return;
17923
17924 // Require that the source be a pointer type.
17925 const PointerType *SrcPtr = Op->getType()->getAs<PointerType>();
17926 if (!SrcPtr) return;
17927 QualType SrcPointee = SrcPtr->getPointeeType();
17928
17929 // Explicitly allow casts from cv void*. We already implicitly
17930 // allowed casts to cv void*, since they have alignment 1.
17931 // Also allow casts involving incomplete types, which implicitly
17932 // includes 'void'.
17933 if (SrcPointee->isIncompleteType()) return;
17934
17935 CharUnits SrcAlign = getPresumedAlignmentOfPointer(E: Op, S&: *this);
17936
17937 if (SrcAlign >= DestAlign) return;
17938
17939 Diag(TRange.getBegin(), diag::warn_cast_align)
17940 << Op->getType() << T
17941 << static_cast<unsigned>(SrcAlign.getQuantity())
17942 << static_cast<unsigned>(DestAlign.getQuantity())
17943 << TRange << Op->getSourceRange();
17944}
17945
17946void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
17947 const ArraySubscriptExpr *ASE,
17948 bool AllowOnePastEnd, bool IndexNegated) {
17949 // Already diagnosed by the constant evaluator.
17950 if (isConstantEvaluatedContext())
17951 return;
17952
17953 IndexExpr = IndexExpr->IgnoreParenImpCasts();
17954 if (IndexExpr->isValueDependent())
17955 return;
17956
17957 const Type *EffectiveType =
17958 BaseExpr->getType()->getPointeeOrArrayElementType();
17959 BaseExpr = BaseExpr->IgnoreParenCasts();
17960 const ConstantArrayType *ArrayTy =
17961 Context.getAsConstantArrayType(T: BaseExpr->getType());
17962
17963 LangOptions::StrictFlexArraysLevelKind
17964 StrictFlexArraysLevel = getLangOpts().getStrictFlexArraysLevel();
17965
17966 const Type *BaseType =
17967 ArrayTy == nullptr ? nullptr : ArrayTy->getElementType().getTypePtr();
17968 bool IsUnboundedArray =
17969 BaseType == nullptr || BaseExpr->isFlexibleArrayMemberLike(
17970 Context, StrictFlexArraysLevel,
17971 /*IgnoreTemplateOrMacroSubstitution=*/true);
17972 if (EffectiveType->isDependentType() ||
17973 (!IsUnboundedArray && BaseType->isDependentType()))
17974 return;
17975
17976 Expr::EvalResult Result;
17977 if (!IndexExpr->EvaluateAsInt(Result, Ctx: Context, AllowSideEffects: Expr::SE_AllowSideEffects))
17978 return;
17979
17980 llvm::APSInt index = Result.Val.getInt();
17981 if (IndexNegated) {
17982 index.setIsUnsigned(false);
17983 index = -index;
17984 }
17985
17986 if (IsUnboundedArray) {
17987 if (EffectiveType->isFunctionType())
17988 return;
17989 if (index.isUnsigned() || !index.isNegative()) {
17990 const auto &ASTC = getASTContext();
17991 unsigned AddrBits = ASTC.getTargetInfo().getPointerWidth(
17992 AddrSpace: EffectiveType->getCanonicalTypeInternal().getAddressSpace());
17993 if (index.getBitWidth() < AddrBits)
17994 index = index.zext(width: AddrBits);
17995 std::optional<CharUnits> ElemCharUnits =
17996 ASTC.getTypeSizeInCharsIfKnown(Ty: EffectiveType);
17997 // PR50741 - If EffectiveType has unknown size (e.g., if it's a void
17998 // pointer) bounds-checking isn't meaningful.
17999 if (!ElemCharUnits || ElemCharUnits->isZero())
18000 return;
18001 llvm::APInt ElemBytes(index.getBitWidth(), ElemCharUnits->getQuantity());
18002 // If index has more active bits than address space, we already know
18003 // we have a bounds violation to warn about. Otherwise, compute
18004 // address of (index + 1)th element, and warn about bounds violation
18005 // only if that address exceeds address space.
18006 if (index.getActiveBits() <= AddrBits) {
18007 bool Overflow;
18008 llvm::APInt Product(index);
18009 Product += 1;
18010 Product = Product.umul_ov(RHS: ElemBytes, Overflow);
18011 if (!Overflow && Product.getActiveBits() <= AddrBits)
18012 return;
18013 }
18014
18015 // Need to compute max possible elements in address space, since that
18016 // is included in diag message.
18017 llvm::APInt MaxElems = llvm::APInt::getMaxValue(numBits: AddrBits);
18018 MaxElems = MaxElems.zext(width: std::max(a: AddrBits + 1, b: ElemBytes.getBitWidth()));
18019 MaxElems += 1;
18020 ElemBytes = ElemBytes.zextOrTrunc(width: MaxElems.getBitWidth());
18021 MaxElems = MaxElems.udiv(RHS: ElemBytes);
18022
18023 unsigned DiagID =
18024 ASE ? diag::warn_array_index_exceeds_max_addressable_bounds
18025 : diag::warn_ptr_arith_exceeds_max_addressable_bounds;
18026
18027 // Diag message shows element size in bits and in "bytes" (platform-
18028 // dependent CharUnits)
18029 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr,
18030 PDiag(DiagID)
18031 << toString(I: index, Radix: 10, Signed: true) << AddrBits
18032 << (unsigned)ASTC.toBits(CharSize: *ElemCharUnits)
18033 << toString(I: ElemBytes, Radix: 10, Signed: false)
18034 << toString(I: MaxElems, Radix: 10, Signed: false)
18035 << (unsigned)MaxElems.getLimitedValue(Limit: ~0U)
18036 << IndexExpr->getSourceRange());
18037
18038 const NamedDecl *ND = nullptr;
18039 // Try harder to find a NamedDecl to point at in the note.
18040 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Val: BaseExpr))
18041 BaseExpr = ASE->getBase()->IgnoreParenCasts();
18042 if (const auto *DRE = dyn_cast<DeclRefExpr>(Val: BaseExpr))
18043 ND = DRE->getDecl();
18044 if (const auto *ME = dyn_cast<MemberExpr>(Val: BaseExpr))
18045 ND = ME->getMemberDecl();
18046
18047 if (ND)
18048 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr,
18049 PDiag(diag::note_array_declared_here) << ND);
18050 }
18051 return;
18052 }
18053
18054 if (index.isUnsigned() || !index.isNegative()) {
18055 // It is possible that the type of the base expression after
18056 // IgnoreParenCasts is incomplete, even though the type of the base
18057 // expression before IgnoreParenCasts is complete (see PR39746 for an
18058 // example). In this case we have no information about whether the array
18059 // access exceeds the array bounds. However we can still diagnose an array
18060 // access which precedes the array bounds.
18061 if (BaseType->isIncompleteType())
18062 return;
18063
18064 llvm::APInt size = ArrayTy->getSize();
18065
18066 if (BaseType != EffectiveType) {
18067 // Make sure we're comparing apples to apples when comparing index to
18068 // size.
18069 uint64_t ptrarith_typesize = Context.getTypeSize(T: EffectiveType);
18070 uint64_t array_typesize = Context.getTypeSize(T: BaseType);
18071
18072 // Handle ptrarith_typesize being zero, such as when casting to void*.
18073 // Use the size in bits (what "getTypeSize()" returns) rather than bytes.
18074 if (!ptrarith_typesize)
18075 ptrarith_typesize = Context.getCharWidth();
18076
18077 if (ptrarith_typesize != array_typesize) {
18078 // There's a cast to a different size type involved.
18079 uint64_t ratio = array_typesize / ptrarith_typesize;
18080
18081 // TODO: Be smarter about handling cases where array_typesize is not a
18082 // multiple of ptrarith_typesize.
18083 if (ptrarith_typesize * ratio == array_typesize)
18084 size *= llvm::APInt(size.getBitWidth(), ratio);
18085 }
18086 }
18087
18088 if (size.getBitWidth() > index.getBitWidth())
18089 index = index.zext(width: size.getBitWidth());
18090 else if (size.getBitWidth() < index.getBitWidth())
18091 size = size.zext(width: index.getBitWidth());
18092
18093 // For array subscripting the index must be less than size, but for pointer
18094 // arithmetic also allow the index (offset) to be equal to size since
18095 // computing the next address after the end of the array is legal and
18096 // commonly done e.g. in C++ iterators and range-based for loops.
18097 if (AllowOnePastEnd ? index.ule(RHS: size) : index.ult(RHS: size))
18098 return;
18099
18100 // Suppress the warning if the subscript expression (as identified by the
18101 // ']' location) and the index expression are both from macro expansions
18102 // within a system header.
18103 if (ASE) {
18104 SourceLocation RBracketLoc = SourceMgr.getSpellingLoc(
18105 Loc: ASE->getRBracketLoc());
18106 if (SourceMgr.isInSystemHeader(Loc: RBracketLoc)) {
18107 SourceLocation IndexLoc =
18108 SourceMgr.getSpellingLoc(Loc: IndexExpr->getBeginLoc());
18109 if (SourceMgr.isWrittenInSameFile(Loc1: RBracketLoc, Loc2: IndexLoc))
18110 return;
18111 }
18112 }
18113
18114 unsigned DiagID = ASE ? diag::warn_array_index_exceeds_bounds
18115 : diag::warn_ptr_arith_exceeds_bounds;
18116 unsigned CastMsg = (!ASE || BaseType == EffectiveType) ? 0 : 1;
18117 QualType CastMsgTy = ASE ? ASE->getLHS()->getType() : QualType();
18118
18119 DiagRuntimeBehavior(
18120 BaseExpr->getBeginLoc(), BaseExpr,
18121 PDiag(DiagID) << toString(I: index, Radix: 10, Signed: true) << ArrayTy->desugar()
18122 << CastMsg << CastMsgTy << IndexExpr->getSourceRange());
18123 } else {
18124 unsigned DiagID = diag::warn_array_index_precedes_bounds;
18125 if (!ASE) {
18126 DiagID = diag::warn_ptr_arith_precedes_bounds;
18127 if (index.isNegative()) index = -index;
18128 }
18129
18130 DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr,
18131 PDiag(DiagID) << toString(I: index, Radix: 10, Signed: true)
18132 << IndexExpr->getSourceRange());
18133 }
18134
18135 const NamedDecl *ND = nullptr;
18136 // Try harder to find a NamedDecl to point at in the note.
18137 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Val: BaseExpr))
18138 BaseExpr = ASE->getBase()->IgnoreParenCasts();
18139 if (const auto *DRE = dyn_cast<DeclRefExpr>(Val: BaseExpr))
18140 ND = DRE->getDecl();
18141 if (const auto *ME = dyn_cast<MemberExpr>(Val: BaseExpr))
18142 ND = ME->getMemberDecl();
18143
18144 if (ND)
18145 DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr,
18146 PDiag(diag::note_array_declared_here) << ND);
18147}
18148
18149void Sema::CheckArrayAccess(const Expr *expr) {
18150 int AllowOnePastEnd = 0;
18151 while (expr) {
18152 expr = expr->IgnoreParenImpCasts();
18153 switch (expr->getStmtClass()) {
18154 case Stmt::ArraySubscriptExprClass: {
18155 const ArraySubscriptExpr *ASE = cast<ArraySubscriptExpr>(Val: expr);
18156 CheckArrayAccess(BaseExpr: ASE->getBase(), IndexExpr: ASE->getIdx(), ASE,
18157 AllowOnePastEnd: AllowOnePastEnd > 0);
18158 expr = ASE->getBase();
18159 break;
18160 }
18161 case Stmt::MemberExprClass: {
18162 expr = cast<MemberExpr>(Val: expr)->getBase();
18163 break;
18164 }
18165 case Stmt::OMPArraySectionExprClass: {
18166 const OMPArraySectionExpr *ASE = cast<OMPArraySectionExpr>(Val: expr);
18167 if (ASE->getLowerBound())
18168 CheckArrayAccess(BaseExpr: ASE->getBase(), IndexExpr: ASE->getLowerBound(),
18169 /*ASE=*/nullptr, AllowOnePastEnd: AllowOnePastEnd > 0);
18170 return;
18171 }
18172 case Stmt::UnaryOperatorClass: {
18173 // Only unwrap the * and & unary operators
18174 const UnaryOperator *UO = cast<UnaryOperator>(Val: expr);
18175 expr = UO->getSubExpr();
18176 switch (UO->getOpcode()) {
18177 case UO_AddrOf:
18178 AllowOnePastEnd++;
18179 break;
18180 case UO_Deref:
18181 AllowOnePastEnd--;
18182 break;
18183 default:
18184 return;
18185 }
18186 break;
18187 }
18188 case Stmt::ConditionalOperatorClass: {
18189 const ConditionalOperator *cond = cast<ConditionalOperator>(Val: expr);
18190 if (const Expr *lhs = cond->getLHS())
18191 CheckArrayAccess(expr: lhs);
18192 if (const Expr *rhs = cond->getRHS())
18193 CheckArrayAccess(expr: rhs);
18194 return;
18195 }
18196 case Stmt::CXXOperatorCallExprClass: {
18197 const auto *OCE = cast<CXXOperatorCallExpr>(Val: expr);
18198 for (const auto *Arg : OCE->arguments())
18199 CheckArrayAccess(Arg);
18200 return;
18201 }
18202 default:
18203 return;
18204 }
18205 }
18206}
18207
18208//===--- CHECK: Objective-C retain cycles ----------------------------------//
18209
18210namespace {
18211
18212struct RetainCycleOwner {
18213 VarDecl *Variable = nullptr;
18214 SourceRange Range;
18215 SourceLocation Loc;
18216 bool Indirect = false;
18217
18218 RetainCycleOwner() = default;
18219
18220 void setLocsFrom(Expr *e) {
18221 Loc = e->getExprLoc();
18222 Range = e->getSourceRange();
18223 }
18224};
18225
18226} // namespace
18227
18228/// Consider whether capturing the given variable can possibly lead to
18229/// a retain cycle.
18230static bool considerVariable(VarDecl *var, Expr *ref, RetainCycleOwner &owner) {
18231 // In ARC, it's captured strongly iff the variable has __strong
18232 // lifetime. In MRR, it's captured strongly if the variable is
18233 // __block and has an appropriate type.
18234 if (var->getType().getObjCLifetime() != Qualifiers::OCL_Strong)
18235 return false;
18236
18237 owner.Variable = var;
18238 if (ref)
18239 owner.setLocsFrom(ref);
18240 return true;
18241}
18242
18243static bool findRetainCycleOwner(Sema &S, Expr *e, RetainCycleOwner &owner) {
18244 while (true) {
18245 e = e->IgnoreParens();
18246 if (CastExpr *cast = dyn_cast<CastExpr>(Val: e)) {
18247 switch (cast->getCastKind()) {
18248 case CK_BitCast:
18249 case CK_LValueBitCast:
18250 case CK_LValueToRValue:
18251 case CK_ARCReclaimReturnedObject:
18252 e = cast->getSubExpr();
18253 continue;
18254
18255 default:
18256 return false;
18257 }
18258 }
18259
18260 if (ObjCIvarRefExpr *ref = dyn_cast<ObjCIvarRefExpr>(Val: e)) {
18261 ObjCIvarDecl *ivar = ref->getDecl();
18262 if (ivar->getType().getObjCLifetime() != Qualifiers::OCL_Strong)
18263 return false;
18264
18265 // Try to find a retain cycle in the base.
18266 if (!findRetainCycleOwner(S, e: ref->getBase(), owner))
18267 return false;
18268
18269 if (ref->isFreeIvar()) owner.setLocsFrom(ref);
18270 owner.Indirect = true;
18271 return true;
18272 }
18273
18274 if (DeclRefExpr *ref = dyn_cast<DeclRefExpr>(Val: e)) {
18275 VarDecl *var = dyn_cast<VarDecl>(Val: ref->getDecl());
18276 if (!var) return false;
18277 return considerVariable(var, ref, owner);
18278 }
18279
18280 if (MemberExpr *member = dyn_cast<MemberExpr>(Val: e)) {
18281 if (member->isArrow()) return false;
18282
18283 // Don't count this as an indirect ownership.
18284 e = member->getBase();
18285 continue;
18286 }
18287
18288 if (PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(Val: e)) {
18289 // Only pay attention to pseudo-objects on property references.
18290 ObjCPropertyRefExpr *pre
18291 = dyn_cast<ObjCPropertyRefExpr>(Val: pseudo->getSyntacticForm()
18292 ->IgnoreParens());
18293 if (!pre) return false;
18294 if (pre->isImplicitProperty()) return false;
18295 ObjCPropertyDecl *property = pre->getExplicitProperty();
18296 if (!property->isRetaining() &&
18297 !(property->getPropertyIvarDecl() &&
18298 property->getPropertyIvarDecl()->getType()
18299 .getObjCLifetime() == Qualifiers::OCL_Strong))
18300 return false;
18301
18302 owner.Indirect = true;
18303 if (pre->isSuperReceiver()) {
18304 owner.Variable = S.getCurMethodDecl()->getSelfDecl();
18305 if (!owner.Variable)
18306 return false;
18307 owner.Loc = pre->getLocation();
18308 owner.Range = pre->getSourceRange();
18309 return true;
18310 }
18311 e = const_cast<Expr*>(cast<OpaqueValueExpr>(Val: pre->getBase())
18312 ->getSourceExpr());
18313 continue;
18314 }
18315
18316 // Array ivars?
18317
18318 return false;
18319 }
18320}
18321
18322namespace {
18323
18324 struct FindCaptureVisitor : EvaluatedExprVisitor<FindCaptureVisitor> {
18325 VarDecl *Variable;
18326 Expr *Capturer = nullptr;
18327 bool VarWillBeReased = false;
18328
18329 FindCaptureVisitor(ASTContext &Context, VarDecl *variable)
18330 : EvaluatedExprVisitor<FindCaptureVisitor>(Context),
18331 Variable(variable) {}
18332
18333 void VisitDeclRefExpr(DeclRefExpr *ref) {
18334 if (ref->getDecl() == Variable && !Capturer)
18335 Capturer = ref;
18336 }
18337
18338 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *ref) {
18339 if (Capturer) return;
18340 Visit(ref->getBase());
18341 if (Capturer && ref->isFreeIvar())
18342 Capturer = ref;
18343 }
18344
18345 void VisitBlockExpr(BlockExpr *block) {
18346 // Look inside nested blocks
18347 if (block->getBlockDecl()->capturesVariable(var: Variable))
18348 Visit(S: block->getBlockDecl()->getBody());
18349 }
18350
18351 void VisitOpaqueValueExpr(OpaqueValueExpr *OVE) {
18352 if (Capturer) return;
18353 if (OVE->getSourceExpr())
18354 Visit(OVE->getSourceExpr());
18355 }
18356
18357 void VisitBinaryOperator(BinaryOperator *BinOp) {
18358 if (!Variable || VarWillBeReased || BinOp->getOpcode() != BO_Assign)
18359 return;
18360 Expr *LHS = BinOp->getLHS();
18361 if (const DeclRefExpr *DRE = dyn_cast_or_null<DeclRefExpr>(Val: LHS)) {
18362 if (DRE->getDecl() != Variable)
18363 return;
18364 if (Expr *RHS = BinOp->getRHS()) {
18365 RHS = RHS->IgnoreParenCasts();
18366 std::optional<llvm::APSInt> Value;
18367 VarWillBeReased =
18368 (RHS && (Value = RHS->getIntegerConstantExpr(Ctx: Context)) &&
18369 *Value == 0);
18370 }
18371 }
18372 }
18373 };
18374
18375} // namespace
18376
18377/// Check whether the given argument is a block which captures a
18378/// variable.
18379static Expr *findCapturingExpr(Sema &S, Expr *e, RetainCycleOwner &owner) {
18380 assert(owner.Variable && owner.Loc.isValid());
18381
18382 e = e->IgnoreParenCasts();
18383
18384 // Look through [^{...} copy] and Block_copy(^{...}).
18385 if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(Val: e)) {
18386 Selector Cmd = ME->getSelector();
18387 if (Cmd.isUnarySelector() && Cmd.getNameForSlot(argIndex: 0) == "copy") {
18388 e = ME->getInstanceReceiver();
18389 if (!e)
18390 return nullptr;
18391 e = e->IgnoreParenCasts();
18392 }
18393 } else if (CallExpr *CE = dyn_cast<CallExpr>(Val: e)) {
18394 if (CE->getNumArgs() == 1) {
18395 FunctionDecl *Fn = dyn_cast_or_null<FunctionDecl>(Val: CE->getCalleeDecl());
18396 if (Fn) {
18397 const IdentifierInfo *FnI = Fn->getIdentifier();
18398 if (FnI && FnI->isStr(Str: "_Block_copy")) {
18399 e = CE->getArg(Arg: 0)->IgnoreParenCasts();
18400 }
18401 }
18402 }
18403 }
18404
18405 BlockExpr *block = dyn_cast<BlockExpr>(Val: e);
18406 if (!block || !block->getBlockDecl()->capturesVariable(var: owner.Variable))
18407 return nullptr;
18408
18409 FindCaptureVisitor visitor(S.Context, owner.Variable);
18410 visitor.Visit(S: block->getBlockDecl()->getBody());
18411 return visitor.VarWillBeReased ? nullptr : visitor.Capturer;
18412}
18413
18414static void diagnoseRetainCycle(Sema &S, Expr *capturer,
18415 RetainCycleOwner &owner) {
18416 assert(capturer);
18417 assert(owner.Variable && owner.Loc.isValid());
18418
18419 S.Diag(capturer->getExprLoc(), diag::warn_arc_retain_cycle)
18420 << owner.Variable << capturer->getSourceRange();
18421 S.Diag(owner.Loc, diag::note_arc_retain_cycle_owner)
18422 << owner.Indirect << owner.Range;
18423}
18424
18425/// Check for a keyword selector that starts with the word 'add' or
18426/// 'set'.
18427static bool isSetterLikeSelector(Selector sel) {
18428 if (sel.isUnarySelector()) return false;
18429
18430 StringRef str = sel.getNameForSlot(argIndex: 0);
18431 str = str.ltrim(Char: '_');
18432 if (str.starts_with(Prefix: "set"))
18433 str = str.substr(Start: 3);
18434 else if (str.starts_with(Prefix: "add")) {
18435 // Specially allow 'addOperationWithBlock:'.
18436 if (sel.getNumArgs() == 1 && str.starts_with(Prefix: "addOperationWithBlock"))
18437 return false;
18438 str = str.substr(Start: 3);
18439 } else
18440 return false;
18441
18442 if (str.empty()) return true;
18443 return !isLowercase(c: str.front());
18444}
18445
18446static std::optional<int>
18447GetNSMutableArrayArgumentIndex(Sema &S, ObjCMessageExpr *Message) {
18448 bool IsMutableArray = S.NSAPIObj->isSubclassOfNSClass(
18449 InterfaceDecl: Message->getReceiverInterface(),
18450 NSClassKind: NSAPI::ClassId_NSMutableArray);
18451 if (!IsMutableArray) {
18452 return std::nullopt;
18453 }
18454
18455 Selector Sel = Message->getSelector();
18456
18457 std::optional<NSAPI::NSArrayMethodKind> MKOpt =
18458 S.NSAPIObj->getNSArrayMethodKind(Sel);
18459 if (!MKOpt) {
18460 return std::nullopt;
18461 }
18462
18463 NSAPI::NSArrayMethodKind MK = *MKOpt;
18464
18465 switch (MK) {
18466 case NSAPI::NSMutableArr_addObject:
18467 case NSAPI::NSMutableArr_insertObjectAtIndex:
18468 case NSAPI::NSMutableArr_setObjectAtIndexedSubscript:
18469 return 0;
18470 case NSAPI::NSMutableArr_replaceObjectAtIndex:
18471 return 1;
18472
18473 default:
18474 return std::nullopt;
18475 }
18476
18477 return std::nullopt;
18478}
18479
18480static std::optional<int>
18481GetNSMutableDictionaryArgumentIndex(Sema &S, ObjCMessageExpr *Message) {
18482 bool IsMutableDictionary = S.NSAPIObj->isSubclassOfNSClass(
18483 InterfaceDecl: Message->getReceiverInterface(),
18484 NSClassKind: NSAPI::ClassId_NSMutableDictionary);
18485 if (!IsMutableDictionary) {
18486 return std::nullopt;
18487 }
18488
18489 Selector Sel = Message->getSelector();
18490
18491 std::optional<NSAPI::NSDictionaryMethodKind> MKOpt =
18492 S.NSAPIObj->getNSDictionaryMethodKind(Sel);
18493 if (!MKOpt) {
18494 return std::nullopt;
18495 }
18496
18497 NSAPI::NSDictionaryMethodKind MK = *MKOpt;
18498
18499 switch (MK) {
18500 case NSAPI::NSMutableDict_setObjectForKey:
18501 case NSAPI::NSMutableDict_setValueForKey:
18502 case NSAPI::NSMutableDict_setObjectForKeyedSubscript:
18503 return 0;
18504
18505 default:
18506 return std::nullopt;
18507 }
18508
18509 return std::nullopt;
18510}
18511
18512static std::optional<int> GetNSSetArgumentIndex(Sema &S,
18513 ObjCMessageExpr *Message) {
18514 bool IsMutableSet = S.NSAPIObj->isSubclassOfNSClass(
18515 InterfaceDecl: Message->getReceiverInterface(),
18516 NSClassKind: NSAPI::ClassId_NSMutableSet);
18517
18518 bool IsMutableOrderedSet = S.NSAPIObj->isSubclassOfNSClass(
18519 InterfaceDecl: Message->getReceiverInterface(),
18520 NSClassKind: NSAPI::ClassId_NSMutableOrderedSet);
18521 if (!IsMutableSet && !IsMutableOrderedSet) {
18522 return std::nullopt;
18523 }
18524
18525 Selector Sel = Message->getSelector();
18526
18527 std::optional<NSAPI::NSSetMethodKind> MKOpt =
18528 S.NSAPIObj->getNSSetMethodKind(Sel);
18529 if (!MKOpt) {
18530 return std::nullopt;
18531 }
18532
18533 NSAPI::NSSetMethodKind MK = *MKOpt;
18534
18535 switch (MK) {
18536 case NSAPI::NSMutableSet_addObject:
18537 case NSAPI::NSOrderedSet_setObjectAtIndex:
18538 case NSAPI::NSOrderedSet_setObjectAtIndexedSubscript:
18539 case NSAPI::NSOrderedSet_insertObjectAtIndex:
18540 return 0;
18541 case NSAPI::NSOrderedSet_replaceObjectAtIndexWithObject:
18542 return 1;
18543 }
18544
18545 return std::nullopt;
18546}
18547
18548void Sema::CheckObjCCircularContainer(ObjCMessageExpr *Message) {
18549 if (!Message->isInstanceMessage()) {
18550 return;
18551 }
18552
18553 std::optional<int> ArgOpt;
18554
18555 if (!(ArgOpt = GetNSMutableArrayArgumentIndex(S&: *this, Message)) &&
18556 !(ArgOpt = GetNSMutableDictionaryArgumentIndex(S&: *this, Message)) &&
18557 !(ArgOpt = GetNSSetArgumentIndex(S&: *this, Message))) {
18558 return;
18559 }
18560
18561 int ArgIndex = *ArgOpt;
18562
18563 Expr *Arg = Message->getArg(Arg: ArgIndex)->IgnoreImpCasts();
18564 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Val: Arg)) {
18565 Arg = OE->getSourceExpr()->IgnoreImpCasts();
18566 }
18567
18568 if (Message->getReceiverKind() == ObjCMessageExpr::SuperInstance) {
18569 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Val: Arg)) {
18570 if (ArgRE->isObjCSelfExpr()) {
18571 Diag(Message->getSourceRange().getBegin(),
18572 diag::warn_objc_circular_container)
18573 << ArgRE->getDecl() << StringRef("'super'");
18574 }
18575 }
18576 } else {
18577 Expr *Receiver = Message->getInstanceReceiver()->IgnoreImpCasts();
18578
18579 if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Val: Receiver)) {
18580 Receiver = OE->getSourceExpr()->IgnoreImpCasts();
18581 }
18582
18583 if (DeclRefExpr *ReceiverRE = dyn_cast<DeclRefExpr>(Val: Receiver)) {
18584 if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Val: Arg)) {
18585 if (ReceiverRE->getDecl() == ArgRE->getDecl()) {
18586 ValueDecl *Decl = ReceiverRE->getDecl();
18587 Diag(Message->getSourceRange().getBegin(),
18588 diag::warn_objc_circular_container)
18589 << Decl << Decl;
18590 if (!ArgRE->isObjCSelfExpr()) {
18591 Diag(Decl->getLocation(),
18592 diag::note_objc_circular_container_declared_here)
18593 << Decl;
18594 }
18595 }
18596 }
18597 } else if (ObjCIvarRefExpr *IvarRE = dyn_cast<ObjCIvarRefExpr>(Val: Receiver)) {
18598 if (ObjCIvarRefExpr *IvarArgRE = dyn_cast<ObjCIvarRefExpr>(Val: Arg)) {
18599 if (IvarRE->getDecl() == IvarArgRE->getDecl()) {
18600 ObjCIvarDecl *Decl = IvarRE->getDecl();
18601 Diag(Message->getSourceRange().getBegin(),
18602 diag::warn_objc_circular_container)
18603 << Decl << Decl;
18604 Diag(Decl->getLocation(),
18605 diag::note_objc_circular_container_declared_here)
18606 << Decl;
18607 }
18608 }
18609 }
18610 }
18611}
18612
18613/// Check a message send to see if it's likely to cause a retain cycle.
18614void Sema::checkRetainCycles(ObjCMessageExpr *msg) {
18615 // Only check instance methods whose selector looks like a setter.
18616 if (!msg->isInstanceMessage() || !isSetterLikeSelector(sel: msg->getSelector()))
18617 return;
18618
18619 // Try to find a variable that the receiver is strongly owned by.
18620 RetainCycleOwner owner;
18621 if (msg->getReceiverKind() == ObjCMessageExpr::Instance) {
18622 if (!findRetainCycleOwner(S&: *this, e: msg->getInstanceReceiver(), owner))
18623 return;
18624 } else {
18625 assert(msg->getReceiverKind() == ObjCMessageExpr::SuperInstance);
18626 owner.Variable = getCurMethodDecl()->getSelfDecl();
18627 owner.Loc = msg->getSuperLoc();
18628 owner.Range = msg->getSuperLoc();
18629 }
18630
18631 // Check whether the receiver is captured by any of the arguments.
18632 const ObjCMethodDecl *MD = msg->getMethodDecl();
18633 for (unsigned i = 0, e = msg->getNumArgs(); i != e; ++i) {
18634 if (Expr *capturer = findCapturingExpr(S&: *this, e: msg->getArg(Arg: i), owner)) {
18635 // noescape blocks should not be retained by the method.
18636 if (MD && MD->parameters()[i]->hasAttr<NoEscapeAttr>())
18637 continue;
18638 return diagnoseRetainCycle(S&: *this, capturer, owner);
18639 }
18640 }
18641}
18642
18643/// Check a property assign to see if it's likely to cause a retain cycle.
18644void Sema::checkRetainCycles(Expr *receiver, Expr *argument) {
18645 RetainCycleOwner owner;
18646 if (!findRetainCycleOwner(S&: *this, e: receiver, owner))
18647 return;
18648
18649 if (Expr *capturer = findCapturingExpr(S&: *this, e: argument, owner))
18650 diagnoseRetainCycle(S&: *this, capturer, owner);
18651}
18652
18653void Sema::checkRetainCycles(VarDecl *Var, Expr *Init) {
18654 RetainCycleOwner Owner;
18655 if (!considerVariable(var: Var, /*DeclRefExpr=*/ref: nullptr, owner&: Owner))
18656 return;
18657
18658 // Because we don't have an expression for the variable, we have to set the
18659 // location explicitly here.
18660 Owner.Loc = Var->getLocation();
18661 Owner.Range = Var->getSourceRange();
18662
18663 if (Expr *Capturer = findCapturingExpr(S&: *this, e: Init, owner&: Owner))
18664 diagnoseRetainCycle(S&: *this, capturer: Capturer, owner&: Owner);
18665}
18666
18667static bool checkUnsafeAssignLiteral(Sema &S, SourceLocation Loc,
18668 Expr *RHS, bool isProperty) {
18669 // Check if RHS is an Objective-C object literal, which also can get
18670 // immediately zapped in a weak reference. Note that we explicitly
18671 // allow ObjCStringLiterals, since those are designed to never really die.
18672 RHS = RHS->IgnoreParenImpCasts();
18673
18674 // This enum needs to match with the 'select' in
18675 // warn_objc_arc_literal_assign (off-by-1).
18676 Sema::ObjCLiteralKind Kind = S.CheckLiteralKind(FromE: RHS);
18677 if (Kind == Sema::LK_String || Kind == Sema::LK_None)
18678 return false;
18679
18680 S.Diag(Loc, diag::warn_arc_literal_assign)
18681 << (unsigned) Kind
18682 << (isProperty ? 0 : 1)
18683 << RHS->getSourceRange();
18684
18685 return true;
18686}
18687
18688static bool checkUnsafeAssignObject(Sema &S, SourceLocation Loc,
18689 Qualifiers::ObjCLifetime LT,
18690 Expr *RHS, bool isProperty) {
18691 // Strip off any implicit cast added to get to the one ARC-specific.
18692 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(Val: RHS)) {
18693 if (cast->getCastKind() == CK_ARCConsumeObject) {
18694 S.Diag(Loc, diag::warn_arc_retained_assign)
18695 << (LT == Qualifiers::OCL_ExplicitNone)
18696 << (isProperty ? 0 : 1)
18697 << RHS->getSourceRange();
18698 return true;
18699 }
18700 RHS = cast->getSubExpr();
18701 }
18702
18703 if (LT == Qualifiers::OCL_Weak &&
18704 checkUnsafeAssignLiteral(S, Loc, RHS, isProperty))
18705 return true;
18706
18707 return false;
18708}
18709
18710bool Sema::checkUnsafeAssigns(SourceLocation Loc,
18711 QualType LHS, Expr *RHS) {
18712 Qualifiers::ObjCLifetime LT = LHS.getObjCLifetime();
18713
18714 if (LT != Qualifiers::OCL_Weak && LT != Qualifiers::OCL_ExplicitNone)
18715 return false;
18716
18717 if (checkUnsafeAssignObject(S&: *this, Loc, LT, RHS, isProperty: false))
18718 return true;
18719
18720 return false;
18721}
18722
18723void Sema::checkUnsafeExprAssigns(SourceLocation Loc,
18724 Expr *LHS, Expr *RHS) {
18725 QualType LHSType;
18726 // PropertyRef on LHS type need be directly obtained from
18727 // its declaration as it has a PseudoType.
18728 ObjCPropertyRefExpr *PRE
18729 = dyn_cast<ObjCPropertyRefExpr>(Val: LHS->IgnoreParens());
18730 if (PRE && !PRE->isImplicitProperty()) {
18731 const ObjCPropertyDecl *PD = PRE->getExplicitProperty();
18732 if (PD)
18733 LHSType = PD->getType();
18734 }
18735
18736 if (LHSType.isNull())
18737 LHSType = LHS->getType();
18738
18739 Qualifiers::ObjCLifetime LT = LHSType.getObjCLifetime();
18740
18741 if (LT == Qualifiers::OCL_Weak) {
18742 if (!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, Loc))
18743 getCurFunction()->markSafeWeakUse(E: LHS);
18744 }
18745
18746 if (checkUnsafeAssigns(Loc, LHS: LHSType, RHS))
18747 return;
18748
18749 // FIXME. Check for other life times.
18750 if (LT != Qualifiers::OCL_None)
18751 return;
18752
18753 if (PRE) {
18754 if (PRE->isImplicitProperty())
18755 return;
18756 const ObjCPropertyDecl *PD = PRE->getExplicitProperty();
18757 if (!PD)
18758 return;
18759
18760 unsigned Attributes = PD->getPropertyAttributes();
18761 if (Attributes & ObjCPropertyAttribute::kind_assign) {
18762 // when 'assign' attribute was not explicitly specified
18763 // by user, ignore it and rely on property type itself
18764 // for lifetime info.
18765 unsigned AsWrittenAttr = PD->getPropertyAttributesAsWritten();
18766 if (!(AsWrittenAttr & ObjCPropertyAttribute::kind_assign) &&
18767 LHSType->isObjCRetainableType())
18768 return;
18769
18770 while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(Val: RHS)) {
18771 if (cast->getCastKind() == CK_ARCConsumeObject) {
18772 Diag(Loc, diag::warn_arc_retained_property_assign)
18773 << RHS->getSourceRange();
18774 return;
18775 }
18776 RHS = cast->getSubExpr();
18777 }
18778 } else if (Attributes & ObjCPropertyAttribute::kind_weak) {
18779 if (checkUnsafeAssignObject(S&: *this, Loc, LT: Qualifiers::OCL_Weak, RHS, isProperty: true))
18780 return;
18781 }
18782 }
18783}
18784
18785//===--- CHECK: Empty statement body (-Wempty-body) ---------------------===//
18786
18787static bool ShouldDiagnoseEmptyStmtBody(const SourceManager &SourceMgr,
18788 SourceLocation StmtLoc,
18789 const NullStmt *Body) {
18790 // Do not warn if the body is a macro that expands to nothing, e.g:
18791 //
18792 // #define CALL(x)
18793 // if (condition)
18794 // CALL(0);
18795 if (Body->hasLeadingEmptyMacro())
18796 return false;
18797
18798 // Get line numbers of statement and body.
18799 bool StmtLineInvalid;
18800 unsigned StmtLine = SourceMgr.getPresumedLineNumber(Loc: StmtLoc,
18801 Invalid: &StmtLineInvalid);
18802 if (StmtLineInvalid)
18803 return false;
18804
18805 bool BodyLineInvalid;
18806 unsigned BodyLine = SourceMgr.getSpellingLineNumber(Loc: Body->getSemiLoc(),
18807 Invalid: &BodyLineInvalid);
18808 if (BodyLineInvalid)
18809 return false;
18810
18811 // Warn if null statement and body are on the same line.
18812 if (StmtLine != BodyLine)
18813 return false;
18814
18815 return true;
18816}
18817
18818void Sema::DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
18819 const Stmt *Body,
18820 unsigned DiagID) {
18821 // Since this is a syntactic check, don't emit diagnostic for template
18822 // instantiations, this just adds noise.
18823 if (CurrentInstantiationScope)
18824 return;
18825
18826 // The body should be a null statement.
18827 const NullStmt *NBody = dyn_cast<NullStmt>(Val: Body);
18828 if (!NBody)
18829 return;
18830
18831 // Do the usual checks.
18832 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, Body: NBody))
18833 return;
18834
18835 Diag(Loc: NBody->getSemiLoc(), DiagID);
18836 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line);
18837}
18838
18839void Sema::DiagnoseEmptyLoopBody(const Stmt *S,
18840 const Stmt *PossibleBody) {
18841 assert(!CurrentInstantiationScope); // Ensured by caller
18842
18843 SourceLocation StmtLoc;
18844 const Stmt *Body;
18845 unsigned DiagID;
18846 if (const ForStmt *FS = dyn_cast<ForStmt>(Val: S)) {
18847 StmtLoc = FS->getRParenLoc();
18848 Body = FS->getBody();
18849 DiagID = diag::warn_empty_for_body;
18850 } else if (const WhileStmt *WS = dyn_cast<WhileStmt>(Val: S)) {
18851 StmtLoc = WS->getRParenLoc();
18852 Body = WS->getBody();
18853 DiagID = diag::warn_empty_while_body;
18854 } else
18855 return; // Neither `for' nor `while'.
18856
18857 // The body should be a null statement.
18858 const NullStmt *NBody = dyn_cast<NullStmt>(Val: Body);
18859 if (!NBody)
18860 return;
18861
18862 // Skip expensive checks if diagnostic is disabled.
18863 if (Diags.isIgnored(DiagID, Loc: NBody->getSemiLoc()))
18864 return;
18865
18866 // Do the usual checks.
18867 if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, Body: NBody))
18868 return;
18869
18870 // `for(...);' and `while(...);' are popular idioms, so in order to keep
18871 // noise level low, emit diagnostics only if for/while is followed by a
18872 // CompoundStmt, e.g.:
18873 // for (int i = 0; i < n; i++);
18874 // {
18875 // a(i);
18876 // }
18877 // or if for/while is followed by a statement with more indentation
18878 // than for/while itself:
18879 // for (int i = 0; i < n; i++);
18880 // a(i);
18881 bool ProbableTypo = isa<CompoundStmt>(Val: PossibleBody);
18882 if (!ProbableTypo) {
18883 bool BodyColInvalid;
18884 unsigned BodyCol = SourceMgr.getPresumedColumnNumber(
18885 Loc: PossibleBody->getBeginLoc(), Invalid: &BodyColInvalid);
18886 if (BodyColInvalid)
18887 return;
18888
18889 bool StmtColInvalid;
18890 unsigned StmtCol =
18891 SourceMgr.getPresumedColumnNumber(Loc: S->getBeginLoc(), Invalid: &StmtColInvalid);
18892 if (StmtColInvalid)
18893 return;
18894
18895 if (BodyCol > StmtCol)
18896 ProbableTypo = true;
18897 }
18898
18899 if (ProbableTypo) {
18900 Diag(Loc: NBody->getSemiLoc(), DiagID);
18901 Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line);
18902 }
18903}
18904
18905//===--- CHECK: Warn on self move with std::move. -------------------------===//
18906
18907/// DiagnoseSelfMove - Emits a warning if a value is moved to itself.
18908void Sema::DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
18909 SourceLocation OpLoc) {
18910 if (Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, OpLoc))
18911 return;
18912
18913 if (inTemplateInstantiation())
18914 return;
18915
18916 // Strip parens and casts away.
18917 LHSExpr = LHSExpr->IgnoreParenImpCasts();
18918 RHSExpr = RHSExpr->IgnoreParenImpCasts();
18919
18920 // Check for a call expression
18921 const CallExpr *CE = dyn_cast<CallExpr>(Val: RHSExpr);
18922 if (!CE || CE->getNumArgs() != 1)
18923 return;
18924
18925 // Check for a call to std::move
18926 if (!CE->isCallToStdMove())
18927 return;
18928
18929 // Get argument from std::move
18930 RHSExpr = CE->getArg(Arg: 0);
18931
18932 const DeclRefExpr *LHSDeclRef = dyn_cast<DeclRefExpr>(Val: LHSExpr);
18933 const DeclRefExpr *RHSDeclRef = dyn_cast<DeclRefExpr>(Val: RHSExpr);
18934
18935 // Two DeclRefExpr's, check that the decls are the same.
18936 if (LHSDeclRef && RHSDeclRef) {
18937 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl())
18938 return;
18939 if (LHSDeclRef->getDecl()->getCanonicalDecl() !=
18940 RHSDeclRef->getDecl()->getCanonicalDecl())
18941 return;
18942
18943 auto D = Diag(OpLoc, diag::warn_self_move)
18944 << LHSExpr->getType() << LHSExpr->getSourceRange()
18945 << RHSExpr->getSourceRange();
18946 if (const FieldDecl *F =
18947 getSelfAssignmentClassMemberCandidate(SelfAssigned: RHSDeclRef->getDecl()))
18948 D << 1 << F
18949 << FixItHint::CreateInsertion(InsertionLoc: LHSDeclRef->getBeginLoc(), Code: "this->");
18950 else
18951 D << 0;
18952 return;
18953 }
18954
18955 // Member variables require a different approach to check for self moves.
18956 // MemberExpr's are the same if every nested MemberExpr refers to the same
18957 // Decl and that the base Expr's are DeclRefExpr's with the same Decl or
18958 // the base Expr's are CXXThisExpr's.
18959 const Expr *LHSBase = LHSExpr;
18960 const Expr *RHSBase = RHSExpr;
18961 const MemberExpr *LHSME = dyn_cast<MemberExpr>(Val: LHSExpr);
18962 const MemberExpr *RHSME = dyn_cast<MemberExpr>(Val: RHSExpr);
18963 if (!LHSME || !RHSME)
18964 return;
18965
18966 while (LHSME && RHSME) {
18967 if (LHSME->getMemberDecl()->getCanonicalDecl() !=
18968 RHSME->getMemberDecl()->getCanonicalDecl())
18969 return;
18970
18971 LHSBase = LHSME->getBase();
18972 RHSBase = RHSME->getBase();
18973 LHSME = dyn_cast<MemberExpr>(Val: LHSBase);
18974 RHSME = dyn_cast<MemberExpr>(Val: RHSBase);
18975 }
18976
18977 LHSDeclRef = dyn_cast<DeclRefExpr>(Val: LHSBase);
18978 RHSDeclRef = dyn_cast<DeclRefExpr>(Val: RHSBase);
18979 if (LHSDeclRef && RHSDeclRef) {
18980 if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl())
18981 return;
18982 if (LHSDeclRef->getDecl()->getCanonicalDecl() !=
18983 RHSDeclRef->getDecl()->getCanonicalDecl())
18984 return;
18985
18986 Diag(OpLoc, diag::warn_self_move)
18987 << LHSExpr->getType() << 0 << LHSExpr->getSourceRange()
18988 << RHSExpr->getSourceRange();
18989 return;
18990 }
18991
18992 if (isa<CXXThisExpr>(LHSBase) && isa<CXXThisExpr>(RHSBase))
18993 Diag(OpLoc, diag::warn_self_move)
18994 << LHSExpr->getType() << 0 << LHSExpr->getSourceRange()
18995 << RHSExpr->getSourceRange();
18996}
18997
18998//===--- Layout compatibility ----------------------------------------------//
18999
19000static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2);
19001
19002/// Check if two enumeration types are layout-compatible.
19003static bool isLayoutCompatible(ASTContext &C, EnumDecl *ED1, EnumDecl *ED2) {
19004 // C++11 [dcl.enum] p8:
19005 // Two enumeration types are layout-compatible if they have the same
19006 // underlying type.
19007 return ED1->isComplete() && ED2->isComplete() &&
19008 C.hasSameType(T1: ED1->getIntegerType(), T2: ED2->getIntegerType());
19009}
19010
19011/// Check if two fields are layout-compatible.
19012static bool isLayoutCompatible(ASTContext &C, FieldDecl *Field1,
19013 FieldDecl *Field2) {
19014 if (!isLayoutCompatible(C, Field1->getType(), Field2->getType()))
19015 return false;
19016
19017 if (Field1->isBitField() != Field2->isBitField())
19018 return false;
19019
19020 if (Field1->isBitField()) {
19021 // Make sure that the bit-fields are the same length.
19022 unsigned Bits1 = Field1->getBitWidthValue(Ctx: C);
19023 unsigned Bits2 = Field2->getBitWidthValue(Ctx: C);
19024
19025 if (Bits1 != Bits2)
19026 return false;
19027 }
19028
19029 return true;
19030}
19031
19032/// Check if two standard-layout structs are layout-compatible.
19033/// (C++11 [class.mem] p17)
19034static bool isLayoutCompatibleStruct(ASTContext &C, RecordDecl *RD1,
19035 RecordDecl *RD2) {
19036 // If both records are C++ classes, check that base classes match.
19037 if (const CXXRecordDecl *D1CXX = dyn_cast<CXXRecordDecl>(Val: RD1)) {
19038 // If one of records is a CXXRecordDecl we are in C++ mode,
19039 // thus the other one is a CXXRecordDecl, too.
19040 const CXXRecordDecl *D2CXX = cast<CXXRecordDecl>(Val: RD2);
19041 // Check number of base classes.
19042 if (D1CXX->getNumBases() != D2CXX->getNumBases())
19043 return false;
19044
19045 // Check the base classes.
19046 for (CXXRecordDecl::base_class_const_iterator
19047 Base1 = D1CXX->bases_begin(),
19048 BaseEnd1 = D1CXX->bases_end(),
19049 Base2 = D2CXX->bases_begin();
19050 Base1 != BaseEnd1;
19051 ++Base1, ++Base2) {
19052 if (!isLayoutCompatible(C, T1: Base1->getType(), T2: Base2->getType()))
19053 return false;
19054 }
19055 } else if (const CXXRecordDecl *D2CXX = dyn_cast<CXXRecordDecl>(Val: RD2)) {
19056 // If only RD2 is a C++ class, it should have zero base classes.
19057 if (D2CXX->getNumBases() > 0)
19058 return false;
19059 }
19060
19061 // Check the fields.
19062 RecordDecl::field_iterator Field2 = RD2->field_begin(),
19063 Field2End = RD2->field_end(),
19064 Field1 = RD1->field_begin(),
19065 Field1End = RD1->field_end();
19066 for ( ; Field1 != Field1End && Field2 != Field2End; ++Field1, ++Field2) {
19067 if (!isLayoutCompatible(C, Field1: *Field1, Field2: *Field2))
19068 return false;
19069 }
19070 if (Field1 != Field1End || Field2 != Field2End)
19071 return false;
19072
19073 return true;
19074}
19075
19076/// Check if two standard-layout unions are layout-compatible.
19077/// (C++11 [class.mem] p18)
19078static bool isLayoutCompatibleUnion(ASTContext &C, RecordDecl *RD1,
19079 RecordDecl *RD2) {
19080 llvm::SmallPtrSet<FieldDecl *, 8> UnmatchedFields;
19081 for (auto *Field2 : RD2->fields())
19082 UnmatchedFields.insert(Ptr: Field2);
19083
19084 for (auto *Field1 : RD1->fields()) {
19085 llvm::SmallPtrSet<FieldDecl *, 8>::iterator
19086 I = UnmatchedFields.begin(),
19087 E = UnmatchedFields.end();
19088
19089 for ( ; I != E; ++I) {
19090 if (isLayoutCompatible(C, Field1, Field2: *I)) {
19091 bool Result = UnmatchedFields.erase(Ptr: *I);
19092 (void) Result;
19093 assert(Result);
19094 break;
19095 }
19096 }
19097 if (I == E)
19098 return false;
19099 }
19100
19101 return UnmatchedFields.empty();
19102}
19103
19104static bool isLayoutCompatible(ASTContext &C, RecordDecl *RD1,
19105 RecordDecl *RD2) {
19106 if (RD1->isUnion() != RD2->isUnion())
19107 return false;
19108
19109 if (RD1->isUnion())
19110 return isLayoutCompatibleUnion(C, RD1, RD2);
19111 else
19112 return isLayoutCompatibleStruct(C, RD1, RD2);
19113}
19114
19115/// Check if two types are layout-compatible in C++11 sense.
19116static bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2) {
19117 if (T1.isNull() || T2.isNull())
19118 return false;
19119
19120 // C++11 [basic.types] p11:
19121 // If two types T1 and T2 are the same type, then T1 and T2 are
19122 // layout-compatible types.
19123 if (C.hasSameType(T1, T2))
19124 return true;
19125
19126 T1 = T1.getCanonicalType().getUnqualifiedType();
19127 T2 = T2.getCanonicalType().getUnqualifiedType();
19128
19129 const Type::TypeClass TC1 = T1->getTypeClass();
19130 const Type::TypeClass TC2 = T2->getTypeClass();
19131
19132 if (TC1 != TC2)
19133 return false;
19134
19135 if (TC1 == Type::Enum) {
19136 return isLayoutCompatible(C,
19137 ED1: cast<EnumType>(Val&: T1)->getDecl(),
19138 ED2: cast<EnumType>(Val&: T2)->getDecl());
19139 } else if (TC1 == Type::Record) {
19140 if (!T1->isStandardLayoutType() || !T2->isStandardLayoutType())
19141 return false;
19142
19143 return isLayoutCompatible(C,
19144 RD1: cast<RecordType>(Val&: T1)->getDecl(),
19145 RD2: cast<RecordType>(Val&: T2)->getDecl());
19146 }
19147
19148 return false;
19149}
19150
19151//===--- CHECK: pointer_with_type_tag attribute: datatypes should match ----//
19152
19153/// Given a type tag expression find the type tag itself.
19154///
19155/// \param TypeExpr Type tag expression, as it appears in user's code.
19156///
19157/// \param VD Declaration of an identifier that appears in a type tag.
19158///
19159/// \param MagicValue Type tag magic value.
19160///
19161/// \param isConstantEvaluated whether the evalaution should be performed in
19162
19163/// constant context.
19164static bool FindTypeTagExpr(const Expr *TypeExpr, const ASTContext &Ctx,
19165 const ValueDecl **VD, uint64_t *MagicValue,
19166 bool isConstantEvaluated) {
19167 while(true) {
19168 if (!TypeExpr)
19169 return false;
19170
19171 TypeExpr = TypeExpr->IgnoreParenImpCasts()->IgnoreParenCasts();
19172
19173 switch (TypeExpr->getStmtClass()) {
19174 case Stmt::UnaryOperatorClass: {
19175 const UnaryOperator *UO = cast<UnaryOperator>(Val: TypeExpr);
19176 if (UO->getOpcode() == UO_AddrOf || UO->getOpcode() == UO_Deref) {
19177 TypeExpr = UO->getSubExpr();
19178 continue;
19179 }
19180 return false;
19181 }
19182
19183 case Stmt::DeclRefExprClass: {
19184 const DeclRefExpr *DRE = cast<DeclRefExpr>(Val: TypeExpr);
19185 *VD = DRE->getDecl();
19186 return true;
19187 }
19188
19189 case Stmt::IntegerLiteralClass: {
19190 const IntegerLiteral *IL = cast<IntegerLiteral>(Val: TypeExpr);
19191 llvm::APInt MagicValueAPInt = IL->getValue();
19192 if (MagicValueAPInt.getActiveBits() <= 64) {
19193 *MagicValue = MagicValueAPInt.getZExtValue();
19194 return true;
19195 } else
19196 return false;
19197 }
19198
19199 case Stmt::BinaryConditionalOperatorClass:
19200 case Stmt::ConditionalOperatorClass: {
19201 const AbstractConditionalOperator *ACO =
19202 cast<AbstractConditionalOperator>(Val: TypeExpr);
19203 bool Result;
19204 if (ACO->getCond()->EvaluateAsBooleanCondition(Result, Ctx,
19205 InConstantContext: isConstantEvaluated)) {
19206 if (Result)
19207 TypeExpr = ACO->getTrueExpr();
19208 else
19209 TypeExpr = ACO->getFalseExpr();
19210 continue;
19211 }
19212 return false;
19213 }
19214
19215 case Stmt::BinaryOperatorClass: {
19216 const BinaryOperator *BO = cast<BinaryOperator>(Val: TypeExpr);
19217 if (BO->getOpcode() == BO_Comma) {
19218 TypeExpr = BO->getRHS();
19219 continue;
19220 }
19221 return false;
19222 }
19223
19224 default:
19225 return false;
19226 }
19227 }
19228}
19229
19230/// Retrieve the C type corresponding to type tag TypeExpr.
19231///
19232/// \param TypeExpr Expression that specifies a type tag.
19233///
19234/// \param MagicValues Registered magic values.
19235///
19236/// \param FoundWrongKind Set to true if a type tag was found, but of a wrong
19237/// kind.
19238///
19239/// \param TypeInfo Information about the corresponding C type.
19240///
19241/// \param isConstantEvaluated whether the evalaution should be performed in
19242/// constant context.
19243///
19244/// \returns true if the corresponding C type was found.
19245static bool GetMatchingCType(
19246 const IdentifierInfo *ArgumentKind, const Expr *TypeExpr,
19247 const ASTContext &Ctx,
19248 const llvm::DenseMap<Sema::TypeTagMagicValue, Sema::TypeTagData>
19249 *MagicValues,
19250 bool &FoundWrongKind, Sema::TypeTagData &TypeInfo,
19251 bool isConstantEvaluated) {
19252 FoundWrongKind = false;
19253
19254 // Variable declaration that has type_tag_for_datatype attribute.
19255 const ValueDecl *VD = nullptr;
19256
19257 uint64_t MagicValue;
19258
19259 if (!FindTypeTagExpr(TypeExpr, Ctx, VD: &VD, MagicValue: &MagicValue, isConstantEvaluated))
19260 return false;
19261
19262 if (VD) {
19263 if (TypeTagForDatatypeAttr *I = VD->getAttr<TypeTagForDatatypeAttr>()) {
19264 if (I->getArgumentKind() != ArgumentKind) {
19265 FoundWrongKind = true;
19266 return false;
19267 }
19268 TypeInfo.Type = I->getMatchingCType();
19269 TypeInfo.LayoutCompatible = I->getLayoutCompatible();
19270 TypeInfo.MustBeNull = I->getMustBeNull();
19271 return true;
19272 }
19273 return false;
19274 }
19275
19276 if (!MagicValues)
19277 return false;
19278
19279 llvm::DenseMap<Sema::TypeTagMagicValue,
19280 Sema::TypeTagData>::const_iterator I =
19281 MagicValues->find(Val: std::make_pair(x&: ArgumentKind, y&: MagicValue));
19282 if (I == MagicValues->end())
19283 return false;
19284
19285 TypeInfo = I->second;
19286 return true;
19287}
19288
19289void Sema::RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
19290 uint64_t MagicValue, QualType Type,
19291 bool LayoutCompatible,
19292 bool MustBeNull) {
19293 if (!TypeTagForDatatypeMagicValues)
19294 TypeTagForDatatypeMagicValues.reset(
19295 p: new llvm::DenseMap<TypeTagMagicValue, TypeTagData>);
19296
19297 TypeTagMagicValue Magic(ArgumentKind, MagicValue);
19298 (*TypeTagForDatatypeMagicValues)[Magic] =
19299 TypeTagData(Type, LayoutCompatible, MustBeNull);
19300}
19301
19302static bool IsSameCharType(QualType T1, QualType T2) {
19303 const BuiltinType *BT1 = T1->getAs<BuiltinType>();
19304 if (!BT1)
19305 return false;
19306
19307 const BuiltinType *BT2 = T2->getAs<BuiltinType>();
19308 if (!BT2)
19309 return false;
19310
19311 BuiltinType::Kind T1Kind = BT1->getKind();
19312 BuiltinType::Kind T2Kind = BT2->getKind();
19313
19314 return (T1Kind == BuiltinType::SChar && T2Kind == BuiltinType::Char_S) ||
19315 (T1Kind == BuiltinType::UChar && T2Kind == BuiltinType::Char_U) ||
19316 (T1Kind == BuiltinType::Char_U && T2Kind == BuiltinType::UChar) ||
19317 (T1Kind == BuiltinType::Char_S && T2Kind == BuiltinType::SChar);
19318}
19319
19320void Sema::CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
19321 const ArrayRef<const Expr *> ExprArgs,
19322 SourceLocation CallSiteLoc) {
19323 const IdentifierInfo *ArgumentKind = Attr->getArgumentKind();
19324 bool IsPointerAttr = Attr->getIsPointer();
19325
19326 // Retrieve the argument representing the 'type_tag'.
19327 unsigned TypeTagIdxAST = Attr->getTypeTagIdx().getASTIndex();
19328 if (TypeTagIdxAST >= ExprArgs.size()) {
19329 Diag(CallSiteLoc, diag::err_tag_index_out_of_range)
19330 << 0 << Attr->getTypeTagIdx().getSourceIndex();
19331 return;
19332 }
19333 const Expr *TypeTagExpr = ExprArgs[TypeTagIdxAST];
19334 bool FoundWrongKind;
19335 TypeTagData TypeInfo;
19336 if (!GetMatchingCType(ArgumentKind, TypeExpr: TypeTagExpr, Ctx: Context,
19337 MagicValues: TypeTagForDatatypeMagicValues.get(), FoundWrongKind,
19338 TypeInfo, isConstantEvaluated: isConstantEvaluatedContext())) {
19339 if (FoundWrongKind)
19340 Diag(TypeTagExpr->getExprLoc(),
19341 diag::warn_type_tag_for_datatype_wrong_kind)
19342 << TypeTagExpr->getSourceRange();
19343 return;
19344 }
19345
19346 // Retrieve the argument representing the 'arg_idx'.
19347 unsigned ArgumentIdxAST = Attr->getArgumentIdx().getASTIndex();
19348 if (ArgumentIdxAST >= ExprArgs.size()) {
19349 Diag(CallSiteLoc, diag::err_tag_index_out_of_range)
19350 << 1 << Attr->getArgumentIdx().getSourceIndex();
19351 return;
19352 }
19353 const Expr *ArgumentExpr = ExprArgs[ArgumentIdxAST];
19354 if (IsPointerAttr) {
19355 // Skip implicit cast of pointer to `void *' (as a function argument).
19356 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Val: ArgumentExpr))
19357 if (ICE->getType()->isVoidPointerType() &&
19358 ICE->getCastKind() == CK_BitCast)
19359 ArgumentExpr = ICE->getSubExpr();
19360 }
19361 QualType ArgumentType = ArgumentExpr->getType();
19362
19363 // Passing a `void*' pointer shouldn't trigger a warning.
19364 if (IsPointerAttr && ArgumentType->isVoidPointerType())
19365 return;
19366
19367 if (TypeInfo.MustBeNull) {
19368 // Type tag with matching void type requires a null pointer.
19369 if (!ArgumentExpr->isNullPointerConstant(Ctx&: Context,
19370 NPC: Expr::NPC_ValueDependentIsNotNull)) {
19371 Diag(ArgumentExpr->getExprLoc(),
19372 diag::warn_type_safety_null_pointer_required)
19373 << ArgumentKind->getName()
19374 << ArgumentExpr->getSourceRange()
19375 << TypeTagExpr->getSourceRange();
19376 }
19377 return;
19378 }
19379
19380 QualType RequiredType = TypeInfo.Type;
19381 if (IsPointerAttr)
19382 RequiredType = Context.getPointerType(T: RequiredType);
19383
19384 bool mismatch = false;
19385 if (!TypeInfo.LayoutCompatible) {
19386 mismatch = !Context.hasSameType(T1: ArgumentType, T2: RequiredType);
19387
19388 // C++11 [basic.fundamental] p1:
19389 // Plain char, signed char, and unsigned char are three distinct types.
19390 //
19391 // But we treat plain `char' as equivalent to `signed char' or `unsigned
19392 // char' depending on the current char signedness mode.
19393 if (mismatch)
19394 if ((IsPointerAttr && IsSameCharType(T1: ArgumentType->getPointeeType(),
19395 T2: RequiredType->getPointeeType())) ||
19396 (!IsPointerAttr && IsSameCharType(T1: ArgumentType, T2: RequiredType)))
19397 mismatch = false;
19398 } else
19399 if (IsPointerAttr)
19400 mismatch = !isLayoutCompatible(C&: Context,
19401 T1: ArgumentType->getPointeeType(),
19402 T2: RequiredType->getPointeeType());
19403 else
19404 mismatch = !isLayoutCompatible(C&: Context, T1: ArgumentType, T2: RequiredType);
19405
19406 if (mismatch)
19407 Diag(ArgumentExpr->getExprLoc(), diag::warn_type_safety_type_mismatch)
19408 << ArgumentType << ArgumentKind
19409 << TypeInfo.LayoutCompatible << RequiredType
19410 << ArgumentExpr->getSourceRange()
19411 << TypeTagExpr->getSourceRange();
19412}
19413
19414void Sema::AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
19415 CharUnits Alignment) {
19416 MisalignedMembers.emplace_back(Args&: E, Args&: RD, Args&: MD, Args&: Alignment);
19417}
19418
19419void Sema::DiagnoseMisalignedMembers() {
19420 for (MisalignedMember &m : MisalignedMembers) {
19421 const NamedDecl *ND = m.RD;
19422 if (ND->getName().empty()) {
19423 if (const TypedefNameDecl *TD = m.RD->getTypedefNameForAnonDecl())
19424 ND = TD;
19425 }
19426 Diag(m.E->getBeginLoc(), diag::warn_taking_address_of_packed_member)
19427 << m.MD << ND << m.E->getSourceRange();
19428 }
19429 MisalignedMembers.clear();
19430}
19431
19432void Sema::DiscardMisalignedMemberAddress(const Type *T, Expr *E) {
19433 E = E->IgnoreParens();
19434 if (!T->isPointerType() && !T->isIntegerType() && !T->isDependentType())
19435 return;
19436 if (isa<UnaryOperator>(Val: E) &&
19437 cast<UnaryOperator>(Val: E)->getOpcode() == UO_AddrOf) {
19438 auto *Op = cast<UnaryOperator>(Val: E)->getSubExpr()->IgnoreParens();
19439 if (isa<MemberExpr>(Val: Op)) {
19440 auto *MA = llvm::find(Range&: MisalignedMembers, Val: MisalignedMember(Op));
19441 if (MA != MisalignedMembers.end() &&
19442 (T->isDependentType() || T->isIntegerType() ||
19443 (T->isPointerType() && (T->getPointeeType()->isIncompleteType() ||
19444 Context.getTypeAlignInChars(
19445 T: T->getPointeeType()) <= MA->Alignment))))
19446 MisalignedMembers.erase(CI: MA);
19447 }
19448 }
19449}
19450
19451void Sema::RefersToMemberWithReducedAlignment(
19452 Expr *E,
19453 llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
19454 Action) {
19455 const auto *ME = dyn_cast<MemberExpr>(Val: E);
19456 if (!ME)
19457 return;
19458
19459 // No need to check expressions with an __unaligned-qualified type.
19460 if (E->getType().getQualifiers().hasUnaligned())
19461 return;
19462
19463 // For a chain of MemberExpr like "a.b.c.d" this list
19464 // will keep FieldDecl's like [d, c, b].
19465 SmallVector<FieldDecl *, 4> ReverseMemberChain;
19466 const MemberExpr *TopME = nullptr;
19467 bool AnyIsPacked = false;
19468 do {
19469 QualType BaseType = ME->getBase()->getType();
19470 if (BaseType->isDependentType())
19471 return;
19472 if (ME->isArrow())
19473 BaseType = BaseType->getPointeeType();
19474 RecordDecl *RD = BaseType->castAs<RecordType>()->getDecl();
19475 if (RD->isInvalidDecl())
19476 return;
19477
19478 ValueDecl *MD = ME->getMemberDecl();
19479 auto *FD = dyn_cast<FieldDecl>(Val: MD);
19480 // We do not care about non-data members.
19481 if (!FD || FD->isInvalidDecl())
19482 return;
19483
19484 AnyIsPacked =
19485 AnyIsPacked || (RD->hasAttr<PackedAttr>() || MD->hasAttr<PackedAttr>());
19486 ReverseMemberChain.push_back(Elt: FD);
19487
19488 TopME = ME;
19489 ME = dyn_cast<MemberExpr>(Val: ME->getBase()->IgnoreParens());
19490 } while (ME);
19491 assert(TopME && "We did not compute a topmost MemberExpr!");
19492
19493 // Not the scope of this diagnostic.
19494 if (!AnyIsPacked)
19495 return;
19496
19497 const Expr *TopBase = TopME->getBase()->IgnoreParenImpCasts();
19498 const auto *DRE = dyn_cast<DeclRefExpr>(Val: TopBase);
19499 // TODO: The innermost base of the member expression may be too complicated.
19500 // For now, just disregard these cases. This is left for future
19501 // improvement.
19502 if (!DRE && !isa<CXXThisExpr>(Val: TopBase))
19503 return;
19504
19505 // Alignment expected by the whole expression.
19506 CharUnits ExpectedAlignment = Context.getTypeAlignInChars(T: E->getType());
19507
19508 // No need to do anything else with this case.
19509 if (ExpectedAlignment.isOne())
19510 return;
19511
19512 // Synthesize offset of the whole access.
19513 CharUnits Offset;
19514 for (const FieldDecl *FD : llvm::reverse(C&: ReverseMemberChain))
19515 Offset += Context.toCharUnitsFromBits(BitSize: Context.getFieldOffset(FD));
19516
19517 // Compute the CompleteObjectAlignment as the alignment of the whole chain.
19518 CharUnits CompleteObjectAlignment = Context.getTypeAlignInChars(
19519 ReverseMemberChain.back()->getParent()->getTypeForDecl());
19520
19521 // The base expression of the innermost MemberExpr may give
19522 // stronger guarantees than the class containing the member.
19523 if (DRE && !TopME->isArrow()) {
19524 const ValueDecl *VD = DRE->getDecl();
19525 if (!VD->getType()->isReferenceType())
19526 CompleteObjectAlignment =
19527 std::max(a: CompleteObjectAlignment, b: Context.getDeclAlign(VD));
19528 }
19529
19530 // Check if the synthesized offset fulfills the alignment.
19531 if (Offset % ExpectedAlignment != 0 ||
19532 // It may fulfill the offset it but the effective alignment may still be
19533 // lower than the expected expression alignment.
19534 CompleteObjectAlignment < ExpectedAlignment) {
19535 // If this happens, we want to determine a sensible culprit of this.
19536 // Intuitively, watching the chain of member expressions from right to
19537 // left, we start with the required alignment (as required by the field
19538 // type) but some packed attribute in that chain has reduced the alignment.
19539 // It may happen that another packed structure increases it again. But if
19540 // we are here such increase has not been enough. So pointing the first
19541 // FieldDecl that either is packed or else its RecordDecl is,
19542 // seems reasonable.
19543 FieldDecl *FD = nullptr;
19544 CharUnits Alignment;
19545 for (FieldDecl *FDI : ReverseMemberChain) {
19546 if (FDI->hasAttr<PackedAttr>() ||
19547 FDI->getParent()->hasAttr<PackedAttr>()) {
19548 FD = FDI;
19549 Alignment = std::min(
19550 Context.getTypeAlignInChars(FD->getType()),
19551 Context.getTypeAlignInChars(FD->getParent()->getTypeForDecl()));
19552 break;
19553 }
19554 }
19555 assert(FD && "We did not find a packed FieldDecl!");
19556 Action(E, FD->getParent(), FD, Alignment);
19557 }
19558}
19559
19560void Sema::CheckAddressOfPackedMember(Expr *rhs) {
19561 using namespace std::placeholders;
19562
19563 RefersToMemberWithReducedAlignment(
19564 rhs, std::bind(f: &Sema::AddPotentialMisalignedMembers, args: std::ref(t&: *this), args: _1,
19565 args: _2, args: _3, args: _4));
19566}
19567
19568bool Sema::PrepareBuiltinElementwiseMathOneArgCall(CallExpr *TheCall) {
19569 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 1))
19570 return true;
19571
19572 ExprResult A = UsualUnaryConversions(E: TheCall->getArg(Arg: 0));
19573 if (A.isInvalid())
19574 return true;
19575
19576 TheCall->setArg(Arg: 0, ArgExpr: A.get());
19577 QualType TyA = A.get()->getType();
19578
19579 if (checkMathBuiltinElementType(*this, A.get()->getBeginLoc(), TyA))
19580 return true;
19581
19582 TheCall->setType(TyA);
19583 return false;
19584}
19585
19586bool Sema::SemaBuiltinElementwiseMath(CallExpr *TheCall) {
19587 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 2))
19588 return true;
19589
19590 ExprResult A = TheCall->getArg(Arg: 0);
19591 ExprResult B = TheCall->getArg(Arg: 1);
19592 // Do standard promotions between the two arguments, returning their common
19593 // type.
19594 QualType Res =
19595 UsualArithmeticConversions(LHS&: A, RHS&: B, Loc: TheCall->getExprLoc(), ACK: ACK_Comparison);
19596 if (A.isInvalid() || B.isInvalid())
19597 return true;
19598
19599 QualType TyA = A.get()->getType();
19600 QualType TyB = B.get()->getType();
19601
19602 if (Res.isNull() || TyA.getCanonicalType() != TyB.getCanonicalType())
19603 return Diag(A.get()->getBeginLoc(),
19604 diag::err_typecheck_call_different_arg_types)
19605 << TyA << TyB;
19606
19607 if (checkMathBuiltinElementType(*this, A.get()->getBeginLoc(), TyA))
19608 return true;
19609
19610 TheCall->setArg(Arg: 0, ArgExpr: A.get());
19611 TheCall->setArg(Arg: 1, ArgExpr: B.get());
19612 TheCall->setType(Res);
19613 return false;
19614}
19615
19616bool Sema::SemaBuiltinElementwiseTernaryMath(CallExpr *TheCall) {
19617 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 3))
19618 return true;
19619
19620 Expr *Args[3];
19621 for (int I = 0; I < 3; ++I) {
19622 ExprResult Converted = UsualUnaryConversions(E: TheCall->getArg(Arg: I));
19623 if (Converted.isInvalid())
19624 return true;
19625 Args[I] = Converted.get();
19626 }
19627
19628 int ArgOrdinal = 1;
19629 for (Expr *Arg : Args) {
19630 if (checkFPMathBuiltinElementType(*this, Arg->getBeginLoc(), Arg->getType(),
19631 ArgOrdinal++))
19632 return true;
19633 }
19634
19635 for (int I = 1; I < 3; ++I) {
19636 if (Args[0]->getType().getCanonicalType() !=
19637 Args[I]->getType().getCanonicalType()) {
19638 return Diag(Args[0]->getBeginLoc(),
19639 diag::err_typecheck_call_different_arg_types)
19640 << Args[0]->getType() << Args[I]->getType();
19641 }
19642
19643 TheCall->setArg(Arg: I, ArgExpr: Args[I]);
19644 }
19645
19646 TheCall->setType(Args[0]->getType());
19647 return false;
19648}
19649
19650bool Sema::PrepareBuiltinReduceMathOneArgCall(CallExpr *TheCall) {
19651 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 1))
19652 return true;
19653
19654 ExprResult A = UsualUnaryConversions(E: TheCall->getArg(Arg: 0));
19655 if (A.isInvalid())
19656 return true;
19657
19658 TheCall->setArg(Arg: 0, ArgExpr: A.get());
19659 return false;
19660}
19661
19662bool Sema::SemaBuiltinNonDeterministicValue(CallExpr *TheCall) {
19663 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 1))
19664 return true;
19665
19666 ExprResult Arg = TheCall->getArg(Arg: 0);
19667 QualType TyArg = Arg.get()->getType();
19668
19669 if (!TyArg->isBuiltinType() && !TyArg->isVectorType())
19670 return Diag(TheCall->getArg(0)->getBeginLoc(), diag::err_builtin_invalid_arg_type)
19671 << 1 << /*vector, integer or floating point ty*/ 0 << TyArg;
19672
19673 TheCall->setType(TyArg);
19674 return false;
19675}
19676
19677ExprResult Sema::SemaBuiltinMatrixTranspose(CallExpr *TheCall,
19678 ExprResult CallResult) {
19679 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 1))
19680 return ExprError();
19681
19682 ExprResult MatrixArg = DefaultLvalueConversion(E: TheCall->getArg(Arg: 0));
19683 if (MatrixArg.isInvalid())
19684 return MatrixArg;
19685 Expr *Matrix = MatrixArg.get();
19686
19687 auto *MType = Matrix->getType()->getAs<ConstantMatrixType>();
19688 if (!MType) {
19689 Diag(Matrix->getBeginLoc(), diag::err_builtin_invalid_arg_type)
19690 << 1 << /* matrix ty*/ 1 << Matrix->getType();
19691 return ExprError();
19692 }
19693
19694 // Create returned matrix type by swapping rows and columns of the argument
19695 // matrix type.
19696 QualType ResultType = Context.getConstantMatrixType(
19697 ElementType: MType->getElementType(), NumRows: MType->getNumColumns(), NumColumns: MType->getNumRows());
19698
19699 // Change the return type to the type of the returned matrix.
19700 TheCall->setType(ResultType);
19701
19702 // Update call argument to use the possibly converted matrix argument.
19703 TheCall->setArg(Arg: 0, ArgExpr: Matrix);
19704 return CallResult;
19705}
19706
19707// Get and verify the matrix dimensions.
19708static std::optional<unsigned>
19709getAndVerifyMatrixDimension(Expr *Expr, StringRef Name, Sema &S) {
19710 SourceLocation ErrorPos;
19711 std::optional<llvm::APSInt> Value =
19712 Expr->getIntegerConstantExpr(Ctx: S.Context, Loc: &ErrorPos);
19713 if (!Value) {
19714 S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_scalar_unsigned_arg)
19715 << Name;
19716 return {};
19717 }
19718 uint64_t Dim = Value->getZExtValue();
19719 if (!ConstantMatrixType::isDimensionValid(NumElements: Dim)) {
19720 S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_invalid_dimension)
19721 << Name << ConstantMatrixType::getMaxElementsPerDimension();
19722 return {};
19723 }
19724 return Dim;
19725}
19726
19727ExprResult Sema::SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall,
19728 ExprResult CallResult) {
19729 if (!getLangOpts().MatrixTypes) {
19730 Diag(TheCall->getBeginLoc(), diag::err_builtin_matrix_disabled);
19731 return ExprError();
19732 }
19733
19734 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 4))
19735 return ExprError();
19736
19737 unsigned PtrArgIdx = 0;
19738 Expr *PtrExpr = TheCall->getArg(Arg: PtrArgIdx);
19739 Expr *RowsExpr = TheCall->getArg(Arg: 1);
19740 Expr *ColumnsExpr = TheCall->getArg(Arg: 2);
19741 Expr *StrideExpr = TheCall->getArg(Arg: 3);
19742
19743 bool ArgError = false;
19744
19745 // Check pointer argument.
19746 {
19747 ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(E: PtrExpr);
19748 if (PtrConv.isInvalid())
19749 return PtrConv;
19750 PtrExpr = PtrConv.get();
19751 TheCall->setArg(Arg: 0, ArgExpr: PtrExpr);
19752 if (PtrExpr->isTypeDependent()) {
19753 TheCall->setType(Context.DependentTy);
19754 return TheCall;
19755 }
19756 }
19757
19758 auto *PtrTy = PtrExpr->getType()->getAs<PointerType>();
19759 QualType ElementTy;
19760 if (!PtrTy) {
19761 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type)
19762 << PtrArgIdx + 1 << /*pointer to element ty*/ 2 << PtrExpr->getType();
19763 ArgError = true;
19764 } else {
19765 ElementTy = PtrTy->getPointeeType().getUnqualifiedType();
19766
19767 if (!ConstantMatrixType::isValidElementType(T: ElementTy)) {
19768 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type)
19769 << PtrArgIdx + 1 << /* pointer to element ty*/ 2
19770 << PtrExpr->getType();
19771 ArgError = true;
19772 }
19773 }
19774
19775 // Apply default Lvalue conversions and convert the expression to size_t.
19776 auto ApplyArgumentConversions = [this](Expr *E) {
19777 ExprResult Conv = DefaultLvalueConversion(E);
19778 if (Conv.isInvalid())
19779 return Conv;
19780
19781 return tryConvertExprToType(E: Conv.get(), Ty: Context.getSizeType());
19782 };
19783
19784 // Apply conversion to row and column expressions.
19785 ExprResult RowsConv = ApplyArgumentConversions(RowsExpr);
19786 if (!RowsConv.isInvalid()) {
19787 RowsExpr = RowsConv.get();
19788 TheCall->setArg(Arg: 1, ArgExpr: RowsExpr);
19789 } else
19790 RowsExpr = nullptr;
19791
19792 ExprResult ColumnsConv = ApplyArgumentConversions(ColumnsExpr);
19793 if (!ColumnsConv.isInvalid()) {
19794 ColumnsExpr = ColumnsConv.get();
19795 TheCall->setArg(Arg: 2, ArgExpr: ColumnsExpr);
19796 } else
19797 ColumnsExpr = nullptr;
19798
19799 // If any part of the result matrix type is still pending, just use
19800 // Context.DependentTy, until all parts are resolved.
19801 if ((RowsExpr && RowsExpr->isTypeDependent()) ||
19802 (ColumnsExpr && ColumnsExpr->isTypeDependent())) {
19803 TheCall->setType(Context.DependentTy);
19804 return CallResult;
19805 }
19806
19807 // Check row and column dimensions.
19808 std::optional<unsigned> MaybeRows;
19809 if (RowsExpr)
19810 MaybeRows = getAndVerifyMatrixDimension(Expr: RowsExpr, Name: "row", S&: *this);
19811
19812 std::optional<unsigned> MaybeColumns;
19813 if (ColumnsExpr)
19814 MaybeColumns = getAndVerifyMatrixDimension(Expr: ColumnsExpr, Name: "column", S&: *this);
19815
19816 // Check stride argument.
19817 ExprResult StrideConv = ApplyArgumentConversions(StrideExpr);
19818 if (StrideConv.isInvalid())
19819 return ExprError();
19820 StrideExpr = StrideConv.get();
19821 TheCall->setArg(Arg: 3, ArgExpr: StrideExpr);
19822
19823 if (MaybeRows) {
19824 if (std::optional<llvm::APSInt> Value =
19825 StrideExpr->getIntegerConstantExpr(Ctx: Context)) {
19826 uint64_t Stride = Value->getZExtValue();
19827 if (Stride < *MaybeRows) {
19828 Diag(StrideExpr->getBeginLoc(),
19829 diag::err_builtin_matrix_stride_too_small);
19830 ArgError = true;
19831 }
19832 }
19833 }
19834
19835 if (ArgError || !MaybeRows || !MaybeColumns)
19836 return ExprError();
19837
19838 TheCall->setType(
19839 Context.getConstantMatrixType(ElementType: ElementTy, NumRows: *MaybeRows, NumColumns: *MaybeColumns));
19840 return CallResult;
19841}
19842
19843ExprResult Sema::SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall,
19844 ExprResult CallResult) {
19845 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 3))
19846 return ExprError();
19847
19848 unsigned PtrArgIdx = 1;
19849 Expr *MatrixExpr = TheCall->getArg(Arg: 0);
19850 Expr *PtrExpr = TheCall->getArg(Arg: PtrArgIdx);
19851 Expr *StrideExpr = TheCall->getArg(Arg: 2);
19852
19853 bool ArgError = false;
19854
19855 {
19856 ExprResult MatrixConv = DefaultLvalueConversion(E: MatrixExpr);
19857 if (MatrixConv.isInvalid())
19858 return MatrixConv;
19859 MatrixExpr = MatrixConv.get();
19860 TheCall->setArg(Arg: 0, ArgExpr: MatrixExpr);
19861 }
19862 if (MatrixExpr->isTypeDependent()) {
19863 TheCall->setType(Context.DependentTy);
19864 return TheCall;
19865 }
19866
19867 auto *MatrixTy = MatrixExpr->getType()->getAs<ConstantMatrixType>();
19868 if (!MatrixTy) {
19869 Diag(MatrixExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type)
19870 << 1 << /*matrix ty */ 1 << MatrixExpr->getType();
19871 ArgError = true;
19872 }
19873
19874 {
19875 ExprResult PtrConv = DefaultFunctionArrayLvalueConversion(E: PtrExpr);
19876 if (PtrConv.isInvalid())
19877 return PtrConv;
19878 PtrExpr = PtrConv.get();
19879 TheCall->setArg(Arg: 1, ArgExpr: PtrExpr);
19880 if (PtrExpr->isTypeDependent()) {
19881 TheCall->setType(Context.DependentTy);
19882 return TheCall;
19883 }
19884 }
19885
19886 // Check pointer argument.
19887 auto *PtrTy = PtrExpr->getType()->getAs<PointerType>();
19888 if (!PtrTy) {
19889 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_invalid_arg_type)
19890 << PtrArgIdx + 1 << /*pointer to element ty*/ 2 << PtrExpr->getType();
19891 ArgError = true;
19892 } else {
19893 QualType ElementTy = PtrTy->getPointeeType();
19894 if (ElementTy.isConstQualified()) {
19895 Diag(PtrExpr->getBeginLoc(), diag::err_builtin_matrix_store_to_const);
19896 ArgError = true;
19897 }
19898 ElementTy = ElementTy.getUnqualifiedType().getCanonicalType();
19899 if (MatrixTy &&
19900 !Context.hasSameType(ElementTy, MatrixTy->getElementType())) {
19901 Diag(PtrExpr->getBeginLoc(),
19902 diag::err_builtin_matrix_pointer_arg_mismatch)
19903 << ElementTy << MatrixTy->getElementType();
19904 ArgError = true;
19905 }
19906 }
19907
19908 // Apply default Lvalue conversions and convert the stride expression to
19909 // size_t.
19910 {
19911 ExprResult StrideConv = DefaultLvalueConversion(E: StrideExpr);
19912 if (StrideConv.isInvalid())
19913 return StrideConv;
19914
19915 StrideConv = tryConvertExprToType(E: StrideConv.get(), Ty: Context.getSizeType());
19916 if (StrideConv.isInvalid())
19917 return StrideConv;
19918 StrideExpr = StrideConv.get();
19919 TheCall->setArg(Arg: 2, ArgExpr: StrideExpr);
19920 }
19921
19922 // Check stride argument.
19923 if (MatrixTy) {
19924 if (std::optional<llvm::APSInt> Value =
19925 StrideExpr->getIntegerConstantExpr(Ctx: Context)) {
19926 uint64_t Stride = Value->getZExtValue();
19927 if (Stride < MatrixTy->getNumRows()) {
19928 Diag(StrideExpr->getBeginLoc(),
19929 diag::err_builtin_matrix_stride_too_small);
19930 ArgError = true;
19931 }
19932 }
19933 }
19934
19935 if (ArgError)
19936 return ExprError();
19937
19938 return CallResult;
19939}
19940
19941/// Checks the argument at the given index is a WebAssembly table and if it
19942/// is, sets ElTy to the element type.
19943static bool CheckWasmBuiltinArgIsTable(Sema &S, CallExpr *E, unsigned ArgIndex,
19944 QualType &ElTy) {
19945 Expr *ArgExpr = E->getArg(Arg: ArgIndex);
19946 const auto *ATy = dyn_cast<ArrayType>(Val: ArgExpr->getType());
19947 if (!ATy || !ATy->getElementType().isWebAssemblyReferenceType()) {
19948 return S.Diag(ArgExpr->getBeginLoc(),
19949 diag::err_wasm_builtin_arg_must_be_table_type)
19950 << ArgIndex + 1 << ArgExpr->getSourceRange();
19951 }
19952 ElTy = ATy->getElementType();
19953 return false;
19954}
19955
19956/// Checks the argument at the given index is an integer.
19957static bool CheckWasmBuiltinArgIsInteger(Sema &S, CallExpr *E,
19958 unsigned ArgIndex) {
19959 Expr *ArgExpr = E->getArg(Arg: ArgIndex);
19960 if (!ArgExpr->getType()->isIntegerType()) {
19961 return S.Diag(ArgExpr->getBeginLoc(),
19962 diag::err_wasm_builtin_arg_must_be_integer_type)
19963 << ArgIndex + 1 << ArgExpr->getSourceRange();
19964 }
19965 return false;
19966}
19967
19968/// Check that the first argument is a WebAssembly table, and the second
19969/// is an index to use as index into the table.
19970bool Sema::BuiltinWasmTableGet(CallExpr *TheCall) {
19971 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 2))
19972 return true;
19973
19974 QualType ElTy;
19975 if (CheckWasmBuiltinArgIsTable(S&: *this, E: TheCall, ArgIndex: 0, ElTy))
19976 return true;
19977
19978 if (CheckWasmBuiltinArgIsInteger(S&: *this, E: TheCall, ArgIndex: 1))
19979 return true;
19980
19981 // If all is well, we set the type of TheCall to be the type of the
19982 // element of the table.
19983 // i.e. a table.get on an externref table has type externref,
19984 // or whatever the type of the table element is.
19985 TheCall->setType(ElTy);
19986
19987 return false;
19988}
19989
19990/// Check that the first argumnet is a WebAssembly table, the second is
19991/// an index to use as index into the table and the third is the reference
19992/// type to set into the table.
19993bool Sema::BuiltinWasmTableSet(CallExpr *TheCall) {
19994 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 3))
19995 return true;
19996
19997 QualType ElTy;
19998 if (CheckWasmBuiltinArgIsTable(S&: *this, E: TheCall, ArgIndex: 0, ElTy))
19999 return true;
20000
20001 if (CheckWasmBuiltinArgIsInteger(S&: *this, E: TheCall, ArgIndex: 1))
20002 return true;
20003
20004 if (!Context.hasSameType(T1: ElTy, T2: TheCall->getArg(Arg: 2)->getType()))
20005 return true;
20006
20007 return false;
20008}
20009
20010/// Check that the argument is a WebAssembly table.
20011bool Sema::BuiltinWasmTableSize(CallExpr *TheCall) {
20012 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 1))
20013 return true;
20014
20015 QualType ElTy;
20016 if (CheckWasmBuiltinArgIsTable(S&: *this, E: TheCall, ArgIndex: 0, ElTy))
20017 return true;
20018
20019 return false;
20020}
20021
20022/// Check that the first argument is a WebAssembly table, the second is the
20023/// value to use for new elements (of a type matching the table type), the
20024/// third value is an integer.
20025bool Sema::BuiltinWasmTableGrow(CallExpr *TheCall) {
20026 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 3))
20027 return true;
20028
20029 QualType ElTy;
20030 if (CheckWasmBuiltinArgIsTable(S&: *this, E: TheCall, ArgIndex: 0, ElTy))
20031 return true;
20032
20033 Expr *NewElemArg = TheCall->getArg(Arg: 1);
20034 if (!Context.hasSameType(T1: ElTy, T2: NewElemArg->getType())) {
20035 return Diag(NewElemArg->getBeginLoc(),
20036 diag::err_wasm_builtin_arg_must_match_table_element_type)
20037 << 2 << 1 << NewElemArg->getSourceRange();
20038 }
20039
20040 if (CheckWasmBuiltinArgIsInteger(S&: *this, E: TheCall, ArgIndex: 2))
20041 return true;
20042
20043 return false;
20044}
20045
20046/// Check that the first argument is a WebAssembly table, the second is an
20047/// integer, the third is the value to use to fill the table (of a type
20048/// matching the table type), and the fourth is an integer.
20049bool Sema::BuiltinWasmTableFill(CallExpr *TheCall) {
20050 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 4))
20051 return true;
20052
20053 QualType ElTy;
20054 if (CheckWasmBuiltinArgIsTable(S&: *this, E: TheCall, ArgIndex: 0, ElTy))
20055 return true;
20056
20057 if (CheckWasmBuiltinArgIsInteger(S&: *this, E: TheCall, ArgIndex: 1))
20058 return true;
20059
20060 Expr *NewElemArg = TheCall->getArg(Arg: 2);
20061 if (!Context.hasSameType(T1: ElTy, T2: NewElemArg->getType())) {
20062 return Diag(NewElemArg->getBeginLoc(),
20063 diag::err_wasm_builtin_arg_must_match_table_element_type)
20064 << 3 << 1 << NewElemArg->getSourceRange();
20065 }
20066
20067 if (CheckWasmBuiltinArgIsInteger(S&: *this, E: TheCall, ArgIndex: 3))
20068 return true;
20069
20070 return false;
20071}
20072
20073/// Check that the first argument is a WebAssembly table, the second is also a
20074/// WebAssembly table (of the same element type), and the third to fifth
20075/// arguments are integers.
20076bool Sema::BuiltinWasmTableCopy(CallExpr *TheCall) {
20077 if (checkArgCount(S&: *this, Call: TheCall, DesiredArgCount: 5))
20078 return true;
20079
20080 QualType XElTy;
20081 if (CheckWasmBuiltinArgIsTable(S&: *this, E: TheCall, ArgIndex: 0, ElTy&: XElTy))
20082 return true;
20083
20084 QualType YElTy;
20085 if (CheckWasmBuiltinArgIsTable(S&: *this, E: TheCall, ArgIndex: 1, ElTy&: YElTy))
20086 return true;
20087
20088 Expr *TableYArg = TheCall->getArg(Arg: 1);
20089 if (!Context.hasSameType(T1: XElTy, T2: YElTy)) {
20090 return Diag(TableYArg->getBeginLoc(),
20091 diag::err_wasm_builtin_arg_must_match_table_element_type)
20092 << 2 << 1 << TableYArg->getSourceRange();
20093 }
20094
20095 for (int I = 2; I <= 4; I++) {
20096 if (CheckWasmBuiltinArgIsInteger(S&: *this, E: TheCall, ArgIndex: I))
20097 return true;
20098 }
20099
20100 return false;
20101}
20102
20103/// \brief Enforce the bounds of a TCB
20104/// CheckTCBEnforcement - Enforces that every function in a named TCB only
20105/// directly calls other functions in the same TCB as marked by the enforce_tcb
20106/// and enforce_tcb_leaf attributes.
20107void Sema::CheckTCBEnforcement(const SourceLocation CallExprLoc,
20108 const NamedDecl *Callee) {
20109 // This warning does not make sense in code that has no runtime behavior.
20110 if (isUnevaluatedContext())
20111 return;
20112
20113 const NamedDecl *Caller = getCurFunctionOrMethodDecl();
20114
20115 if (!Caller || !Caller->hasAttr<EnforceTCBAttr>())
20116 return;
20117
20118 // Search through the enforce_tcb and enforce_tcb_leaf attributes to find
20119 // all TCBs the callee is a part of.
20120 llvm::StringSet<> CalleeTCBs;
20121 for (const auto *A : Callee->specific_attrs<EnforceTCBAttr>())
20122 CalleeTCBs.insert(A->getTCBName());
20123 for (const auto *A : Callee->specific_attrs<EnforceTCBLeafAttr>())
20124 CalleeTCBs.insert(A->getTCBName());
20125
20126 // Go through the TCBs the caller is a part of and emit warnings if Caller
20127 // is in a TCB that the Callee is not.
20128 for (const auto *A : Caller->specific_attrs<EnforceTCBAttr>()) {
20129 StringRef CallerTCB = A->getTCBName();
20130 if (CalleeTCBs.count(CallerTCB) == 0) {
20131 this->Diag(CallExprLoc, diag::warn_tcb_enforcement_violation)
20132 << Callee << CallerTCB;
20133 }
20134 }
20135}
20136

source code of clang/lib/Sema/SemaChecking.cpp