1//===--- Sema.cpp - AST Builder and Semantic Analysis Implementation ------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the actions class which performs semantic analysis and
10// builds an AST out of a parse stream.
11//
12//===----------------------------------------------------------------------===//
13
14#include "UsedDeclVisitor.h"
15#include "clang/AST/ASTContext.h"
16#include "clang/AST/ASTDiagnostic.h"
17#include "clang/AST/Decl.h"
18#include "clang/AST/DeclCXX.h"
19#include "clang/AST/DeclFriend.h"
20#include "clang/AST/DeclObjC.h"
21#include "clang/AST/Expr.h"
22#include "clang/AST/ExprCXX.h"
23#include "clang/AST/PrettyDeclStackTrace.h"
24#include "clang/AST/StmtCXX.h"
25#include "clang/Basic/DarwinSDKInfo.h"
26#include "clang/Basic/DiagnosticOptions.h"
27#include "clang/Basic/PartialDiagnostic.h"
28#include "clang/Basic/SourceManager.h"
29#include "clang/Basic/Stack.h"
30#include "clang/Basic/TargetInfo.h"
31#include "clang/Lex/HeaderSearch.h"
32#include "clang/Lex/HeaderSearchOptions.h"
33#include "clang/Lex/Preprocessor.h"
34#include "clang/Sema/CXXFieldCollector.h"
35#include "clang/Sema/DelayedDiagnostic.h"
36#include "clang/Sema/EnterExpressionEvaluationContext.h"
37#include "clang/Sema/ExternalSemaSource.h"
38#include "clang/Sema/Initialization.h"
39#include "clang/Sema/MultiplexExternalSemaSource.h"
40#include "clang/Sema/ObjCMethodList.h"
41#include "clang/Sema/RISCVIntrinsicManager.h"
42#include "clang/Sema/Scope.h"
43#include "clang/Sema/ScopeInfo.h"
44#include "clang/Sema/SemaCUDA.h"
45#include "clang/Sema/SemaConsumer.h"
46#include "clang/Sema/SemaHLSL.h"
47#include "clang/Sema/SemaInternal.h"
48#include "clang/Sema/SemaOpenACC.h"
49#include "clang/Sema/SemaOpenMP.h"
50#include "clang/Sema/SemaSYCL.h"
51#include "clang/Sema/TemplateDeduction.h"
52#include "clang/Sema/TemplateInstCallback.h"
53#include "clang/Sema/TypoCorrection.h"
54#include "llvm/ADT/DenseMap.h"
55#include "llvm/ADT/STLExtras.h"
56#include "llvm/ADT/SmallPtrSet.h"
57#include "llvm/Support/TimeProfiler.h"
58#include <optional>
59
60using namespace clang;
61using namespace sema;
62
63SourceLocation Sema::getLocForEndOfToken(SourceLocation Loc, unsigned Offset) {
64 return Lexer::getLocForEndOfToken(Loc, Offset, SM: SourceMgr, LangOpts);
65}
66
67ModuleLoader &Sema::getModuleLoader() const { return PP.getModuleLoader(); }
68
69DarwinSDKInfo *
70Sema::getDarwinSDKInfoForAvailabilityChecking(SourceLocation Loc,
71 StringRef Platform) {
72 auto *SDKInfo = getDarwinSDKInfoForAvailabilityChecking();
73 if (!SDKInfo && !WarnedDarwinSDKInfoMissing) {
74 Diag(Loc, diag::warn_missing_sdksettings_for_availability_checking)
75 << Platform;
76 WarnedDarwinSDKInfoMissing = true;
77 }
78 return SDKInfo;
79}
80
81DarwinSDKInfo *Sema::getDarwinSDKInfoForAvailabilityChecking() {
82 if (CachedDarwinSDKInfo)
83 return CachedDarwinSDKInfo->get();
84 auto SDKInfo = parseDarwinSDKInfo(
85 VFS&: PP.getFileManager().getVirtualFileSystem(),
86 SDKRootPath: PP.getHeaderSearchInfo().getHeaderSearchOpts().Sysroot);
87 if (SDKInfo && *SDKInfo) {
88 CachedDarwinSDKInfo = std::make_unique<DarwinSDKInfo>(args: std::move(**SDKInfo));
89 return CachedDarwinSDKInfo->get();
90 }
91 if (!SDKInfo)
92 llvm::consumeError(Err: SDKInfo.takeError());
93 CachedDarwinSDKInfo = std::unique_ptr<DarwinSDKInfo>();
94 return nullptr;
95}
96
97IdentifierInfo *Sema::InventAbbreviatedTemplateParameterTypeName(
98 const IdentifierInfo *ParamName, unsigned int Index) {
99 std::string InventedName;
100 llvm::raw_string_ostream OS(InventedName);
101
102 if (!ParamName)
103 OS << "auto:" << Index + 1;
104 else
105 OS << ParamName->getName() << ":auto";
106
107 OS.flush();
108 return &Context.Idents.get(Name: OS.str());
109}
110
111PrintingPolicy Sema::getPrintingPolicy(const ASTContext &Context,
112 const Preprocessor &PP) {
113 PrintingPolicy Policy = Context.getPrintingPolicy();
114 // In diagnostics, we print _Bool as bool if the latter is defined as the
115 // former.
116 Policy.Bool = Context.getLangOpts().Bool;
117 if (!Policy.Bool) {
118 if (const MacroInfo *BoolMacro = PP.getMacroInfo(II: Context.getBoolName())) {
119 Policy.Bool = BoolMacro->isObjectLike() &&
120 BoolMacro->getNumTokens() == 1 &&
121 BoolMacro->getReplacementToken(Tok: 0).is(K: tok::kw__Bool);
122 }
123 }
124
125 // Shorten the data output if needed
126 Policy.EntireContentsOfLargeArray = false;
127
128 return Policy;
129}
130
131void Sema::ActOnTranslationUnitScope(Scope *S) {
132 TUScope = S;
133 PushDeclContext(S, Context.getTranslationUnitDecl());
134}
135
136namespace clang {
137namespace sema {
138
139class SemaPPCallbacks : public PPCallbacks {
140 Sema *S = nullptr;
141 llvm::SmallVector<SourceLocation, 8> IncludeStack;
142 llvm::SmallVector<llvm::TimeTraceProfilerEntry *, 8> ProfilerStack;
143
144public:
145 void set(Sema &S) { this->S = &S; }
146
147 void reset() { S = nullptr; }
148
149 void FileChanged(SourceLocation Loc, FileChangeReason Reason,
150 SrcMgr::CharacteristicKind FileType,
151 FileID PrevFID) override {
152 if (!S)
153 return;
154 switch (Reason) {
155 case EnterFile: {
156 SourceManager &SM = S->getSourceManager();
157 SourceLocation IncludeLoc = SM.getIncludeLoc(FID: SM.getFileID(SpellingLoc: Loc));
158 if (IncludeLoc.isValid()) {
159 if (llvm::timeTraceProfilerEnabled()) {
160 OptionalFileEntryRef FE = SM.getFileEntryRefForID(FID: SM.getFileID(SpellingLoc: Loc));
161 ProfilerStack.push_back(Elt: llvm::timeTraceAsyncProfilerBegin(
162 Name: "Source", Detail: FE ? FE->getName() : StringRef("<unknown>")));
163 }
164
165 IncludeStack.push_back(Elt: IncludeLoc);
166 S->DiagnoseNonDefaultPragmaAlignPack(
167 Kind: Sema::PragmaAlignPackDiagnoseKind::NonDefaultStateAtInclude,
168 IncludeLoc);
169 }
170 break;
171 }
172 case ExitFile:
173 if (!IncludeStack.empty()) {
174 if (llvm::timeTraceProfilerEnabled())
175 llvm::timeTraceProfilerEnd(E: ProfilerStack.pop_back_val());
176
177 S->DiagnoseNonDefaultPragmaAlignPack(
178 Kind: Sema::PragmaAlignPackDiagnoseKind::ChangedStateAtExit,
179 IncludeLoc: IncludeStack.pop_back_val());
180 }
181 break;
182 default:
183 break;
184 }
185 }
186};
187
188} // end namespace sema
189} // end namespace clang
190
191const unsigned Sema::MaxAlignmentExponent;
192const uint64_t Sema::MaximumAlignment;
193
194Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
195 TranslationUnitKind TUKind, CodeCompleteConsumer *CodeCompleter)
196 : SemaBase(*this), CollectStats(false), TUKind(TUKind),
197 CurFPFeatures(pp.getLangOpts()), LangOpts(pp.getLangOpts()), PP(pp),
198 Context(ctxt), Consumer(consumer), Diags(PP.getDiagnostics()),
199 SourceMgr(PP.getSourceManager()), APINotes(SourceMgr, LangOpts),
200 AnalysisWarnings(*this), ThreadSafetyDeclCache(nullptr),
201 LateTemplateParser(nullptr), LateTemplateParserCleanup(nullptr),
202 OpaqueParser(nullptr), CurContext(nullptr), ExternalSource(nullptr),
203 CurScope(nullptr), Ident_super(nullptr),
204 CUDAPtr(std::make_unique<SemaCUDA>(args&: *this)),
205 HLSLPtr(std::make_unique<SemaHLSL>(args&: *this)),
206 OpenACCPtr(std::make_unique<SemaOpenACC>(args&: *this)),
207 OpenMPPtr(std::make_unique<SemaOpenMP>(args&: *this)),
208 SYCLPtr(std::make_unique<SemaSYCL>(args&: *this)),
209 MSPointerToMemberRepresentationMethod(
210 LangOpts.getMSPointerToMemberRepresentationMethod()),
211 MSStructPragmaOn(false), VtorDispStack(LangOpts.getVtorDispMode()),
212 AlignPackStack(AlignPackInfo(getLangOpts().XLPragmaPack)),
213 DataSegStack(nullptr), BSSSegStack(nullptr), ConstSegStack(nullptr),
214 CodeSegStack(nullptr), StrictGuardStackCheckStack(false),
215 FpPragmaStack(FPOptionsOverride()), CurInitSeg(nullptr),
216 VisContext(nullptr), PragmaAttributeCurrentTargetDecl(nullptr),
217 StdCoroutineTraitsCache(nullptr), IdResolver(pp),
218 OriginalLexicalContext(nullptr), StdInitializerList(nullptr),
219 FullyCheckedComparisonCategories(
220 static_cast<unsigned>(ComparisonCategoryType::Last) + 1),
221 StdSourceLocationImplDecl(nullptr), CXXTypeInfoDecl(nullptr),
222 GlobalNewDeleteDeclared(false), DisableTypoCorrection(false),
223 TyposCorrected(0), IsBuildingRecoveryCallExpr(false), NumSFINAEErrors(0),
224 AccessCheckingSFINAE(false), CurrentInstantiationScope(nullptr),
225 InNonInstantiationSFINAEContext(false), NonInstantiationEntries(0),
226 ArgumentPackSubstitutionIndex(-1), SatisfactionCache(Context),
227 NSNumberDecl(nullptr), NSValueDecl(nullptr), NSStringDecl(nullptr),
228 StringWithUTF8StringMethod(nullptr),
229 ValueWithBytesObjCTypeMethod(nullptr), NSArrayDecl(nullptr),
230 ArrayWithObjectsMethod(nullptr), NSDictionaryDecl(nullptr),
231 DictionaryWithObjectsMethod(nullptr), CodeCompleter(CodeCompleter) {
232 assert(pp.TUKind == TUKind);
233 TUScope = nullptr;
234
235 LoadedExternalKnownNamespaces = false;
236 for (unsigned I = 0; I != NSAPI::NumNSNumberLiteralMethods; ++I)
237 NSNumberLiteralMethods[I] = nullptr;
238
239 if (getLangOpts().ObjC)
240 NSAPIObj.reset(p: new NSAPI(Context));
241
242 if (getLangOpts().CPlusPlus)
243 FieldCollector.reset(p: new CXXFieldCollector());
244
245 // Tell diagnostics how to render things from the AST library.
246 Diags.SetArgToStringFn(Fn: &FormatASTNodeDiagnosticArgument, Cookie: &Context);
247
248 // This evaluation context exists to ensure that there's always at least one
249 // valid evaluation context available. It is never removed from the
250 // evaluation stack.
251 ExprEvalContexts.emplace_back(
252 Args: ExpressionEvaluationContext::PotentiallyEvaluated, Args: 0, Args: CleanupInfo{},
253 Args: nullptr, Args: ExpressionEvaluationContextRecord::EK_Other);
254
255 // Initialization of data sharing attributes stack for OpenMP
256 OpenMP().InitDataSharingAttributesStack();
257
258 std::unique_ptr<sema::SemaPPCallbacks> Callbacks =
259 std::make_unique<sema::SemaPPCallbacks>();
260 SemaPPCallbackHandler = Callbacks.get();
261 PP.addPPCallbacks(C: std::move(Callbacks));
262 SemaPPCallbackHandler->set(*this);
263
264 CurFPFeatures.setFPEvalMethod(PP.getCurrentFPEvalMethod());
265}
266
267// Anchor Sema's type info to this TU.
268void Sema::anchor() {}
269
270void Sema::addImplicitTypedef(StringRef Name, QualType T) {
271 DeclarationName DN = &Context.Idents.get(Name);
272 if (IdResolver.begin(Name: DN) == IdResolver.end())
273 PushOnScopeChains(Context.buildImplicitTypedef(T, Name), TUScope);
274}
275
276void Sema::Initialize() {
277 if (SemaConsumer *SC = dyn_cast<SemaConsumer>(Val: &Consumer))
278 SC->InitializeSema(S&: *this);
279
280 // Tell the external Sema source about this Sema object.
281 if (ExternalSemaSource *ExternalSema
282 = dyn_cast_or_null<ExternalSemaSource>(Val: Context.getExternalSource()))
283 ExternalSema->InitializeSema(S&: *this);
284
285 // This needs to happen after ExternalSemaSource::InitializeSema(this) or we
286 // will not be able to merge any duplicate __va_list_tag decls correctly.
287 VAListTagName = PP.getIdentifierInfo(Name: "__va_list_tag");
288
289 if (!TUScope)
290 return;
291
292 // Initialize predefined 128-bit integer types, if needed.
293 if (Context.getTargetInfo().hasInt128Type() ||
294 (Context.getAuxTargetInfo() &&
295 Context.getAuxTargetInfo()->hasInt128Type())) {
296 // If either of the 128-bit integer types are unavailable to name lookup,
297 // define them now.
298 DeclarationName Int128 = &Context.Idents.get(Name: "__int128_t");
299 if (IdResolver.begin(Name: Int128) == IdResolver.end())
300 PushOnScopeChains(Context.getInt128Decl(), TUScope);
301
302 DeclarationName UInt128 = &Context.Idents.get(Name: "__uint128_t");
303 if (IdResolver.begin(Name: UInt128) == IdResolver.end())
304 PushOnScopeChains(Context.getUInt128Decl(), TUScope);
305 }
306
307
308 // Initialize predefined Objective-C types:
309 if (getLangOpts().ObjC) {
310 // If 'SEL' does not yet refer to any declarations, make it refer to the
311 // predefined 'SEL'.
312 DeclarationName SEL = &Context.Idents.get(Name: "SEL");
313 if (IdResolver.begin(Name: SEL) == IdResolver.end())
314 PushOnScopeChains(Context.getObjCSelDecl(), TUScope);
315
316 // If 'id' does not yet refer to any declarations, make it refer to the
317 // predefined 'id'.
318 DeclarationName Id = &Context.Idents.get(Name: "id");
319 if (IdResolver.begin(Name: Id) == IdResolver.end())
320 PushOnScopeChains(Context.getObjCIdDecl(), TUScope);
321
322 // Create the built-in typedef for 'Class'.
323 DeclarationName Class = &Context.Idents.get(Name: "Class");
324 if (IdResolver.begin(Name: Class) == IdResolver.end())
325 PushOnScopeChains(Context.getObjCClassDecl(), TUScope);
326
327 // Create the built-in forward declaratino for 'Protocol'.
328 DeclarationName Protocol = &Context.Idents.get(Name: "Protocol");
329 if (IdResolver.begin(Name: Protocol) == IdResolver.end())
330 PushOnScopeChains(Context.getObjCProtocolDecl(), TUScope);
331 }
332
333 // Create the internal type for the *StringMakeConstantString builtins.
334 DeclarationName ConstantString = &Context.Idents.get(Name: "__NSConstantString");
335 if (IdResolver.begin(Name: ConstantString) == IdResolver.end())
336 PushOnScopeChains(Context.getCFConstantStringDecl(), TUScope);
337
338 // Initialize Microsoft "predefined C++ types".
339 if (getLangOpts().MSVCCompat) {
340 if (getLangOpts().CPlusPlus &&
341 IdResolver.begin(Name: &Context.Idents.get(Name: "type_info")) == IdResolver.end())
342 PushOnScopeChains(
343 Context.buildImplicitRecord(Name: "type_info", TK: TagTypeKind::Class),
344 TUScope);
345
346 addImplicitTypedef(Name: "size_t", T: Context.getSizeType());
347 }
348
349 // Initialize predefined OpenCL types and supported extensions and (optional)
350 // core features.
351 if (getLangOpts().OpenCL) {
352 getOpenCLOptions().addSupport(
353 FeaturesMap: Context.getTargetInfo().getSupportedOpenCLOpts(), Opts: getLangOpts());
354 addImplicitTypedef(Name: "sampler_t", T: Context.OCLSamplerTy);
355 addImplicitTypedef(Name: "event_t", T: Context.OCLEventTy);
356 auto OCLCompatibleVersion = getLangOpts().getOpenCLCompatibleVersion();
357 if (OCLCompatibleVersion >= 200) {
358 if (getLangOpts().OpenCLCPlusPlus || getLangOpts().Blocks) {
359 addImplicitTypedef(Name: "clk_event_t", T: Context.OCLClkEventTy);
360 addImplicitTypedef(Name: "queue_t", T: Context.OCLQueueTy);
361 }
362 if (getLangOpts().OpenCLPipes)
363 addImplicitTypedef(Name: "reserve_id_t", T: Context.OCLReserveIDTy);
364 addImplicitTypedef(Name: "atomic_int", T: Context.getAtomicType(T: Context.IntTy));
365 addImplicitTypedef(Name: "atomic_uint",
366 T: Context.getAtomicType(T: Context.UnsignedIntTy));
367 addImplicitTypedef(Name: "atomic_float",
368 T: Context.getAtomicType(T: Context.FloatTy));
369 // OpenCLC v2.0, s6.13.11.6 requires that atomic_flag is implemented as
370 // 32-bit integer and OpenCLC v2.0, s6.1.1 int is always 32-bit wide.
371 addImplicitTypedef(Name: "atomic_flag", T: Context.getAtomicType(T: Context.IntTy));
372
373
374 // OpenCL v2.0 s6.13.11.6:
375 // - The atomic_long and atomic_ulong types are supported if the
376 // cl_khr_int64_base_atomics and cl_khr_int64_extended_atomics
377 // extensions are supported.
378 // - The atomic_double type is only supported if double precision
379 // is supported and the cl_khr_int64_base_atomics and
380 // cl_khr_int64_extended_atomics extensions are supported.
381 // - If the device address space is 64-bits, the data types
382 // atomic_intptr_t, atomic_uintptr_t, atomic_size_t and
383 // atomic_ptrdiff_t are supported if the cl_khr_int64_base_atomics and
384 // cl_khr_int64_extended_atomics extensions are supported.
385
386 auto AddPointerSizeDependentTypes = [&]() {
387 auto AtomicSizeT = Context.getAtomicType(T: Context.getSizeType());
388 auto AtomicIntPtrT = Context.getAtomicType(T: Context.getIntPtrType());
389 auto AtomicUIntPtrT = Context.getAtomicType(T: Context.getUIntPtrType());
390 auto AtomicPtrDiffT =
391 Context.getAtomicType(T: Context.getPointerDiffType());
392 addImplicitTypedef(Name: "atomic_size_t", T: AtomicSizeT);
393 addImplicitTypedef(Name: "atomic_intptr_t", T: AtomicIntPtrT);
394 addImplicitTypedef(Name: "atomic_uintptr_t", T: AtomicUIntPtrT);
395 addImplicitTypedef(Name: "atomic_ptrdiff_t", T: AtomicPtrDiffT);
396 };
397
398 if (Context.getTypeSize(T: Context.getSizeType()) == 32) {
399 AddPointerSizeDependentTypes();
400 }
401
402 if (getOpenCLOptions().isSupported(Ext: "cl_khr_fp16", LO: getLangOpts())) {
403 auto AtomicHalfT = Context.getAtomicType(T: Context.HalfTy);
404 addImplicitTypedef(Name: "atomic_half", T: AtomicHalfT);
405 }
406
407 std::vector<QualType> Atomic64BitTypes;
408 if (getOpenCLOptions().isSupported(Ext: "cl_khr_int64_base_atomics",
409 LO: getLangOpts()) &&
410 getOpenCLOptions().isSupported(Ext: "cl_khr_int64_extended_atomics",
411 LO: getLangOpts())) {
412 if (getOpenCLOptions().isSupported(Ext: "cl_khr_fp64", LO: getLangOpts())) {
413 auto AtomicDoubleT = Context.getAtomicType(T: Context.DoubleTy);
414 addImplicitTypedef(Name: "atomic_double", T: AtomicDoubleT);
415 Atomic64BitTypes.push_back(AtomicDoubleT);
416 }
417 auto AtomicLongT = Context.getAtomicType(T: Context.LongTy);
418 auto AtomicULongT = Context.getAtomicType(T: Context.UnsignedLongTy);
419 addImplicitTypedef(Name: "atomic_long", T: AtomicLongT);
420 addImplicitTypedef(Name: "atomic_ulong", T: AtomicULongT);
421
422
423 if (Context.getTypeSize(T: Context.getSizeType()) == 64) {
424 AddPointerSizeDependentTypes();
425 }
426 }
427 }
428
429#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
430 if (getOpenCLOptions().isSupported(#Ext, getLangOpts())) { \
431 addImplicitTypedef(#ExtType, Context.Id##Ty); \
432 }
433#include "clang/Basic/OpenCLExtensionTypes.def"
434 }
435
436 if (Context.getTargetInfo().hasAArch64SVETypes()) {
437#define SVE_TYPE(Name, Id, SingletonId) \
438 addImplicitTypedef(Name, Context.SingletonId);
439#include "clang/Basic/AArch64SVEACLETypes.def"
440 }
441
442 if (Context.getTargetInfo().getTriple().isPPC64()) {
443#define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \
444 addImplicitTypedef(#Name, Context.Id##Ty);
445#include "clang/Basic/PPCTypes.def"
446#define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \
447 addImplicitTypedef(#Name, Context.Id##Ty);
448#include "clang/Basic/PPCTypes.def"
449 }
450
451 if (Context.getTargetInfo().hasRISCVVTypes()) {
452#define RVV_TYPE(Name, Id, SingletonId) \
453 addImplicitTypedef(Name, Context.SingletonId);
454#include "clang/Basic/RISCVVTypes.def"
455 }
456
457 if (Context.getTargetInfo().getTriple().isWasm() &&
458 Context.getTargetInfo().hasFeature(Feature: "reference-types")) {
459#define WASM_TYPE(Name, Id, SingletonId) \
460 addImplicitTypedef(Name, Context.SingletonId);
461#include "clang/Basic/WebAssemblyReferenceTypes.def"
462 }
463
464 if (Context.getTargetInfo().hasBuiltinMSVaList()) {
465 DeclarationName MSVaList = &Context.Idents.get(Name: "__builtin_ms_va_list");
466 if (IdResolver.begin(Name: MSVaList) == IdResolver.end())
467 PushOnScopeChains(Context.getBuiltinMSVaListDecl(), TUScope);
468 }
469
470 DeclarationName BuiltinVaList = &Context.Idents.get(Name: "__builtin_va_list");
471 if (IdResolver.begin(Name: BuiltinVaList) == IdResolver.end())
472 PushOnScopeChains(Context.getBuiltinVaListDecl(), TUScope);
473}
474
475Sema::~Sema() {
476 assert(InstantiatingSpecializations.empty() &&
477 "failed to clean up an InstantiatingTemplate?");
478
479 if (VisContext) FreeVisContext();
480
481 // Kill all the active scopes.
482 for (sema::FunctionScopeInfo *FSI : FunctionScopes)
483 delete FSI;
484
485 // Tell the SemaConsumer to forget about us; we're going out of scope.
486 if (SemaConsumer *SC = dyn_cast<SemaConsumer>(Val: &Consumer))
487 SC->ForgetSema();
488
489 // Detach from the external Sema source.
490 if (ExternalSemaSource *ExternalSema
491 = dyn_cast_or_null<ExternalSemaSource>(Val: Context.getExternalSource()))
492 ExternalSema->ForgetSema();
493
494 // Delete cached satisfactions.
495 std::vector<ConstraintSatisfaction *> Satisfactions;
496 Satisfactions.reserve(n: SatisfactionCache.size());
497 for (auto &Node : SatisfactionCache)
498 Satisfactions.push_back(x: &Node);
499 for (auto *Node : Satisfactions)
500 delete Node;
501
502 threadSafety::threadSafetyCleanup(Cache: ThreadSafetyDeclCache);
503
504 // Destroys data sharing attributes stack for OpenMP
505 OpenMP().DestroyDataSharingAttributesStack();
506
507 // Detach from the PP callback handler which outlives Sema since it's owned
508 // by the preprocessor.
509 SemaPPCallbackHandler->reset();
510}
511
512void Sema::warnStackExhausted(SourceLocation Loc) {
513 // Only warn about this once.
514 if (!WarnedStackExhausted) {
515 Diag(Loc, diag::warn_stack_exhausted);
516 WarnedStackExhausted = true;
517 }
518}
519
520void Sema::runWithSufficientStackSpace(SourceLocation Loc,
521 llvm::function_ref<void()> Fn) {
522 clang::runWithSufficientStackSpace(Diag: [&] { warnStackExhausted(Loc); }, Fn);
523}
524
525/// makeUnavailableInSystemHeader - There is an error in the current
526/// context. If we're still in a system header, and we can plausibly
527/// make the relevant declaration unavailable instead of erroring, do
528/// so and return true.
529bool Sema::makeUnavailableInSystemHeader(SourceLocation loc,
530 UnavailableAttr::ImplicitReason reason) {
531 // If we're not in a function, it's an error.
532 FunctionDecl *fn = dyn_cast<FunctionDecl>(Val: CurContext);
533 if (!fn) return false;
534
535 // If we're in template instantiation, it's an error.
536 if (inTemplateInstantiation())
537 return false;
538
539 // If that function's not in a system header, it's an error.
540 if (!Context.getSourceManager().isInSystemHeader(Loc: loc))
541 return false;
542
543 // If the function is already unavailable, it's not an error.
544 if (fn->hasAttr<UnavailableAttr>()) return true;
545
546 fn->addAttr(UnavailableAttr::CreateImplicit(Context, "", reason, loc));
547 return true;
548}
549
550ASTMutationListener *Sema::getASTMutationListener() const {
551 return getASTConsumer().GetASTMutationListener();
552}
553
554///Registers an external source. If an external source already exists,
555/// creates a multiplex external source and appends to it.
556///
557///\param[in] E - A non-null external sema source.
558///
559void Sema::addExternalSource(ExternalSemaSource *E) {
560 assert(E && "Cannot use with NULL ptr");
561
562 if (!ExternalSource) {
563 ExternalSource = E;
564 return;
565 }
566
567 if (auto *Ex = dyn_cast<MultiplexExternalSemaSource>(Val&: ExternalSource))
568 Ex->AddSource(Source: E);
569 else
570 ExternalSource = new MultiplexExternalSemaSource(ExternalSource.get(), E);
571}
572
573/// Print out statistics about the semantic analysis.
574void Sema::PrintStats() const {
575 llvm::errs() << "\n*** Semantic Analysis Stats:\n";
576 llvm::errs() << NumSFINAEErrors << " SFINAE diagnostics trapped.\n";
577
578 BumpAlloc.PrintStats();
579 AnalysisWarnings.PrintStats();
580}
581
582void Sema::diagnoseNullableToNonnullConversion(QualType DstType,
583 QualType SrcType,
584 SourceLocation Loc) {
585 std::optional<NullabilityKind> ExprNullability = SrcType->getNullability();
586 if (!ExprNullability || (*ExprNullability != NullabilityKind::Nullable &&
587 *ExprNullability != NullabilityKind::NullableResult))
588 return;
589
590 std::optional<NullabilityKind> TypeNullability = DstType->getNullability();
591 if (!TypeNullability || *TypeNullability != NullabilityKind::NonNull)
592 return;
593
594 Diag(Loc, diag::warn_nullability_lost) << SrcType << DstType;
595}
596
597void Sema::diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E) {
598 // nullptr only exists from C++11 on, so don't warn on its absence earlier.
599 if (!getLangOpts().CPlusPlus11)
600 return;
601
602 if (Kind != CK_NullToPointer && Kind != CK_NullToMemberPointer)
603 return;
604
605 const Expr *EStripped = E->IgnoreParenImpCasts();
606 if (EStripped->getType()->isNullPtrType())
607 return;
608 if (isa<GNUNullExpr>(Val: EStripped))
609 return;
610
611 if (Diags.isIgnored(diag::warn_zero_as_null_pointer_constant,
612 E->getBeginLoc()))
613 return;
614
615 // Don't diagnose the conversion from a 0 literal to a null pointer argument
616 // in a synthesized call to operator<=>.
617 if (!CodeSynthesisContexts.empty() &&
618 CodeSynthesisContexts.back().Kind ==
619 CodeSynthesisContext::RewritingOperatorAsSpaceship)
620 return;
621
622 // Ignore null pointers in defaulted comparison operators.
623 FunctionDecl *FD = getCurFunctionDecl();
624 if (FD && FD->isDefaulted()) {
625 return;
626 }
627
628 // If it is a macro from system header, and if the macro name is not "NULL",
629 // do not warn.
630 // Note that uses of "NULL" will be ignored above on systems that define it
631 // as __null.
632 SourceLocation MaybeMacroLoc = E->getBeginLoc();
633 if (Diags.getSuppressSystemWarnings() &&
634 SourceMgr.isInSystemMacro(loc: MaybeMacroLoc) &&
635 !findMacroSpelling(loc&: MaybeMacroLoc, name: "NULL"))
636 return;
637
638 Diag(E->getBeginLoc(), diag::warn_zero_as_null_pointer_constant)
639 << FixItHint::CreateReplacement(E->getSourceRange(), "nullptr");
640}
641
642/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit cast.
643/// If there is already an implicit cast, merge into the existing one.
644/// The result is of the given category.
645ExprResult Sema::ImpCastExprToType(Expr *E, QualType Ty,
646 CastKind Kind, ExprValueKind VK,
647 const CXXCastPath *BasePath,
648 CheckedConversionKind CCK) {
649#ifndef NDEBUG
650 if (VK == VK_PRValue && !E->isPRValue()) {
651 switch (Kind) {
652 default:
653 llvm_unreachable(
654 ("can't implicitly cast glvalue to prvalue with this cast "
655 "kind: " +
656 std::string(CastExpr::getCastKindName(Kind)))
657 .c_str());
658 case CK_Dependent:
659 case CK_LValueToRValue:
660 case CK_ArrayToPointerDecay:
661 case CK_FunctionToPointerDecay:
662 case CK_ToVoid:
663 case CK_NonAtomicToAtomic:
664 case CK_HLSLArrayRValue:
665 break;
666 }
667 }
668 assert((VK == VK_PRValue || Kind == CK_Dependent || !E->isPRValue()) &&
669 "can't cast prvalue to glvalue");
670#endif
671
672 diagnoseNullableToNonnullConversion(DstType: Ty, SrcType: E->getType(), Loc: E->getBeginLoc());
673 diagnoseZeroToNullptrConversion(Kind, E);
674
675 QualType ExprTy = Context.getCanonicalType(T: E->getType());
676 QualType TypeTy = Context.getCanonicalType(T: Ty);
677
678 if (ExprTy == TypeTy)
679 return E;
680
681 if (Kind == CK_ArrayToPointerDecay) {
682 // C++1z [conv.array]: The temporary materialization conversion is applied.
683 // We also use this to fuel C++ DR1213, which applies to C++11 onwards.
684 if (getLangOpts().CPlusPlus && E->isPRValue()) {
685 // The temporary is an lvalue in C++98 and an xvalue otherwise.
686 ExprResult Materialized = CreateMaterializeTemporaryExpr(
687 T: E->getType(), Temporary: E, BoundToLvalueReference: !getLangOpts().CPlusPlus11);
688 if (Materialized.isInvalid())
689 return ExprError();
690 E = Materialized.get();
691 }
692 // C17 6.7.1p6 footnote 124: The implementation can treat any register
693 // declaration simply as an auto declaration. However, whether or not
694 // addressable storage is actually used, the address of any part of an
695 // object declared with storage-class specifier register cannot be
696 // computed, either explicitly(by use of the unary & operator as discussed
697 // in 6.5.3.2) or implicitly(by converting an array name to a pointer as
698 // discussed in 6.3.2.1).Thus, the only operator that can be applied to an
699 // array declared with storage-class specifier register is sizeof.
700 if (VK == VK_PRValue && !getLangOpts().CPlusPlus && !E->isPRValue()) {
701 if (const auto *DRE = dyn_cast<DeclRefExpr>(Val: E)) {
702 if (const auto *VD = dyn_cast<VarDecl>(Val: DRE->getDecl())) {
703 if (VD->getStorageClass() == SC_Register) {
704 Diag(E->getExprLoc(), diag::err_typecheck_address_of)
705 << /*register variable*/ 3 << E->getSourceRange();
706 return ExprError();
707 }
708 }
709 }
710 }
711 }
712
713 if (ImplicitCastExpr *ImpCast = dyn_cast<ImplicitCastExpr>(Val: E)) {
714 if (ImpCast->getCastKind() == Kind && (!BasePath || BasePath->empty())) {
715 ImpCast->setType(Ty);
716 ImpCast->setValueKind(VK);
717 return E;
718 }
719 }
720
721 return ImplicitCastExpr::Create(Context, T: Ty, Kind, Operand: E, BasePath, Cat: VK,
722 FPO: CurFPFeatureOverrides());
723}
724
725/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
726/// to the conversion from scalar type ScalarTy to the Boolean type.
727CastKind Sema::ScalarTypeToBooleanCastKind(QualType ScalarTy) {
728 switch (ScalarTy->getScalarTypeKind()) {
729 case Type::STK_Bool: return CK_NoOp;
730 case Type::STK_CPointer: return CK_PointerToBoolean;
731 case Type::STK_BlockPointer: return CK_PointerToBoolean;
732 case Type::STK_ObjCObjectPointer: return CK_PointerToBoolean;
733 case Type::STK_MemberPointer: return CK_MemberPointerToBoolean;
734 case Type::STK_Integral: return CK_IntegralToBoolean;
735 case Type::STK_Floating: return CK_FloatingToBoolean;
736 case Type::STK_IntegralComplex: return CK_IntegralComplexToBoolean;
737 case Type::STK_FloatingComplex: return CK_FloatingComplexToBoolean;
738 case Type::STK_FixedPoint: return CK_FixedPointToBoolean;
739 }
740 llvm_unreachable("unknown scalar type kind");
741}
742
743/// Used to prune the decls of Sema's UnusedFileScopedDecls vector.
744static bool ShouldRemoveFromUnused(Sema *SemaRef, const DeclaratorDecl *D) {
745 if (D->getMostRecentDecl()->isUsed())
746 return true;
747
748 if (D->isExternallyVisible())
749 return true;
750
751 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Val: D)) {
752 // If this is a function template and none of its specializations is used,
753 // we should warn.
754 if (FunctionTemplateDecl *Template = FD->getDescribedFunctionTemplate())
755 for (const auto *Spec : Template->specializations())
756 if (ShouldRemoveFromUnused(SemaRef, Spec))
757 return true;
758
759 // UnusedFileScopedDecls stores the first declaration.
760 // The declaration may have become definition so check again.
761 const FunctionDecl *DeclToCheck;
762 if (FD->hasBody(Definition&: DeclToCheck))
763 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck);
764
765 // Later redecls may add new information resulting in not having to warn,
766 // so check again.
767 DeclToCheck = FD->getMostRecentDecl();
768 if (DeclToCheck != FD)
769 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck);
770 }
771
772 if (const VarDecl *VD = dyn_cast<VarDecl>(Val: D)) {
773 // If a variable usable in constant expressions is referenced,
774 // don't warn if it isn't used: if the value of a variable is required
775 // for the computation of a constant expression, it doesn't make sense to
776 // warn even if the variable isn't odr-used. (isReferenced doesn't
777 // precisely reflect that, but it's a decent approximation.)
778 if (VD->isReferenced() &&
779 VD->mightBeUsableInConstantExpressions(C: SemaRef->Context))
780 return true;
781
782 if (VarTemplateDecl *Template = VD->getDescribedVarTemplate())
783 // If this is a variable template and none of its specializations is used,
784 // we should warn.
785 for (const auto *Spec : Template->specializations())
786 if (ShouldRemoveFromUnused(SemaRef, Spec))
787 return true;
788
789 // UnusedFileScopedDecls stores the first declaration.
790 // The declaration may have become definition so check again.
791 const VarDecl *DeclToCheck = VD->getDefinition();
792 if (DeclToCheck)
793 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck);
794
795 // Later redecls may add new information resulting in not having to warn,
796 // so check again.
797 DeclToCheck = VD->getMostRecentDecl();
798 if (DeclToCheck != VD)
799 return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck);
800 }
801
802 return false;
803}
804
805static bool isFunctionOrVarDeclExternC(const NamedDecl *ND) {
806 if (const auto *FD = dyn_cast<FunctionDecl>(Val: ND))
807 return FD->isExternC();
808 return cast<VarDecl>(Val: ND)->isExternC();
809}
810
811/// Determine whether ND is an external-linkage function or variable whose
812/// type has no linkage.
813bool Sema::isExternalWithNoLinkageType(const ValueDecl *VD) const {
814 // Note: it's not quite enough to check whether VD has UniqueExternalLinkage,
815 // because we also want to catch the case where its type has VisibleNoLinkage,
816 // which does not affect the linkage of VD.
817 return getLangOpts().CPlusPlus && VD->hasExternalFormalLinkage() &&
818 !isExternalFormalLinkage(L: VD->getType()->getLinkage()) &&
819 !isFunctionOrVarDeclExternC(VD);
820}
821
822/// Obtains a sorted list of functions and variables that are undefined but
823/// ODR-used.
824void Sema::getUndefinedButUsed(
825 SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined) {
826 for (const auto &UndefinedUse : UndefinedButUsed) {
827 NamedDecl *ND = UndefinedUse.first;
828
829 // Ignore attributes that have become invalid.
830 if (ND->isInvalidDecl()) continue;
831
832 // __attribute__((weakref)) is basically a definition.
833 if (ND->hasAttr<WeakRefAttr>()) continue;
834
835 if (isa<CXXDeductionGuideDecl>(Val: ND))
836 continue;
837
838 if (ND->hasAttr<DLLImportAttr>() || ND->hasAttr<DLLExportAttr>()) {
839 // An exported function will always be emitted when defined, so even if
840 // the function is inline, it doesn't have to be emitted in this TU. An
841 // imported function implies that it has been exported somewhere else.
842 continue;
843 }
844
845 if (const auto *FD = dyn_cast<FunctionDecl>(Val: ND)) {
846 if (FD->isDefined())
847 continue;
848 if (FD->isExternallyVisible() &&
849 !isExternalWithNoLinkageType(FD) &&
850 !FD->getMostRecentDecl()->isInlined() &&
851 !FD->hasAttr<ExcludeFromExplicitInstantiationAttr>())
852 continue;
853 if (FD->getBuiltinID())
854 continue;
855 } else {
856 const auto *VD = cast<VarDecl>(Val: ND);
857 if (VD->hasDefinition() != VarDecl::DeclarationOnly)
858 continue;
859 if (VD->isExternallyVisible() &&
860 !isExternalWithNoLinkageType(VD) &&
861 !VD->getMostRecentDecl()->isInline() &&
862 !VD->hasAttr<ExcludeFromExplicitInstantiationAttr>())
863 continue;
864
865 // Skip VarDecls that lack formal definitions but which we know are in
866 // fact defined somewhere.
867 if (VD->isKnownToBeDefined())
868 continue;
869 }
870
871 Undefined.push_back(Elt: std::make_pair(x&: ND, y: UndefinedUse.second));
872 }
873}
874
875/// checkUndefinedButUsed - Check for undefined objects with internal linkage
876/// or that are inline.
877static void checkUndefinedButUsed(Sema &S) {
878 if (S.UndefinedButUsed.empty()) return;
879
880 // Collect all the still-undefined entities with internal linkage.
881 SmallVector<std::pair<NamedDecl *, SourceLocation>, 16> Undefined;
882 S.getUndefinedButUsed(Undefined);
883 S.UndefinedButUsed.clear();
884 if (Undefined.empty()) return;
885
886 for (const auto &Undef : Undefined) {
887 ValueDecl *VD = cast<ValueDecl>(Val: Undef.first);
888 SourceLocation UseLoc = Undef.second;
889
890 if (S.isExternalWithNoLinkageType(VD)) {
891 // C++ [basic.link]p8:
892 // A type without linkage shall not be used as the type of a variable
893 // or function with external linkage unless
894 // -- the entity has C language linkage
895 // -- the entity is not odr-used or is defined in the same TU
896 //
897 // As an extension, accept this in cases where the type is externally
898 // visible, since the function or variable actually can be defined in
899 // another translation unit in that case.
900 S.Diag(VD->getLocation(), isExternallyVisible(VD->getType()->getLinkage())
901 ? diag::ext_undefined_internal_type
902 : diag::err_undefined_internal_type)
903 << isa<VarDecl>(VD) << VD;
904 } else if (!VD->isExternallyVisible()) {
905 // FIXME: We can promote this to an error. The function or variable can't
906 // be defined anywhere else, so the program must necessarily violate the
907 // one definition rule.
908 bool IsImplicitBase = false;
909 if (const auto *BaseD = dyn_cast<FunctionDecl>(Val: VD)) {
910 auto *DVAttr = BaseD->getAttr<OMPDeclareVariantAttr>();
911 if (DVAttr && !DVAttr->getTraitInfo().isExtensionActive(
912 llvm::omp::TraitProperty::
913 implementation_extension_disable_implicit_base)) {
914 const auto *Func = cast<FunctionDecl>(
915 cast<DeclRefExpr>(DVAttr->getVariantFuncRef())->getDecl());
916 IsImplicitBase = BaseD->isImplicit() &&
917 Func->getIdentifier()->isMangledOpenMPVariantName();
918 }
919 }
920 if (!S.getLangOpts().OpenMP || !IsImplicitBase)
921 S.Diag(VD->getLocation(), diag::warn_undefined_internal)
922 << isa<VarDecl>(VD) << VD;
923 } else if (auto *FD = dyn_cast<FunctionDecl>(Val: VD)) {
924 (void)FD;
925 assert(FD->getMostRecentDecl()->isInlined() &&
926 "used object requires definition but isn't inline or internal?");
927 // FIXME: This is ill-formed; we should reject.
928 S.Diag(VD->getLocation(), diag::warn_undefined_inline) << VD;
929 } else {
930 assert(cast<VarDecl>(VD)->getMostRecentDecl()->isInline() &&
931 "used var requires definition but isn't inline or internal?");
932 S.Diag(VD->getLocation(), diag::err_undefined_inline_var) << VD;
933 }
934 if (UseLoc.isValid())
935 S.Diag(UseLoc, diag::note_used_here);
936 }
937}
938
939void Sema::LoadExternalWeakUndeclaredIdentifiers() {
940 if (!ExternalSource)
941 return;
942
943 SmallVector<std::pair<IdentifierInfo *, WeakInfo>, 4> WeakIDs;
944 ExternalSource->ReadWeakUndeclaredIdentifiers(WI&: WeakIDs);
945 for (auto &WeakID : WeakIDs)
946 (void)WeakUndeclaredIdentifiers[WeakID.first].insert(X: WeakID.second);
947}
948
949
950typedef llvm::DenseMap<const CXXRecordDecl*, bool> RecordCompleteMap;
951
952/// Returns true, if all methods and nested classes of the given
953/// CXXRecordDecl are defined in this translation unit.
954///
955/// Should only be called from ActOnEndOfTranslationUnit so that all
956/// definitions are actually read.
957static bool MethodsAndNestedClassesComplete(const CXXRecordDecl *RD,
958 RecordCompleteMap &MNCComplete) {
959 RecordCompleteMap::iterator Cache = MNCComplete.find(Val: RD);
960 if (Cache != MNCComplete.end())
961 return Cache->second;
962 if (!RD->isCompleteDefinition())
963 return false;
964 bool Complete = true;
965 for (DeclContext::decl_iterator I = RD->decls_begin(),
966 E = RD->decls_end();
967 I != E && Complete; ++I) {
968 if (const CXXMethodDecl *M = dyn_cast<CXXMethodDecl>(Val: *I))
969 Complete = M->isDefined() || M->isDefaulted() ||
970 (M->isPureVirtual() && !isa<CXXDestructorDecl>(Val: M));
971 else if (const FunctionTemplateDecl *F = dyn_cast<FunctionTemplateDecl>(Val: *I))
972 // If the template function is marked as late template parsed at this
973 // point, it has not been instantiated and therefore we have not
974 // performed semantic analysis on it yet, so we cannot know if the type
975 // can be considered complete.
976 Complete = !F->getTemplatedDecl()->isLateTemplateParsed() &&
977 F->getTemplatedDecl()->isDefined();
978 else if (const CXXRecordDecl *R = dyn_cast<CXXRecordDecl>(Val: *I)) {
979 if (R->isInjectedClassName())
980 continue;
981 if (R->hasDefinition())
982 Complete = MethodsAndNestedClassesComplete(RD: R->getDefinition(),
983 MNCComplete);
984 else
985 Complete = false;
986 }
987 }
988 MNCComplete[RD] = Complete;
989 return Complete;
990}
991
992/// Returns true, if the given CXXRecordDecl is fully defined in this
993/// translation unit, i.e. all methods are defined or pure virtual and all
994/// friends, friend functions and nested classes are fully defined in this
995/// translation unit.
996///
997/// Should only be called from ActOnEndOfTranslationUnit so that all
998/// definitions are actually read.
999static bool IsRecordFullyDefined(const CXXRecordDecl *RD,
1000 RecordCompleteMap &RecordsComplete,
1001 RecordCompleteMap &MNCComplete) {
1002 RecordCompleteMap::iterator Cache = RecordsComplete.find(Val: RD);
1003 if (Cache != RecordsComplete.end())
1004 return Cache->second;
1005 bool Complete = MethodsAndNestedClassesComplete(RD, MNCComplete);
1006 for (CXXRecordDecl::friend_iterator I = RD->friend_begin(),
1007 E = RD->friend_end();
1008 I != E && Complete; ++I) {
1009 // Check if friend classes and methods are complete.
1010 if (TypeSourceInfo *TSI = (*I)->getFriendType()) {
1011 // Friend classes are available as the TypeSourceInfo of the FriendDecl.
1012 if (CXXRecordDecl *FriendD = TSI->getType()->getAsCXXRecordDecl())
1013 Complete = MethodsAndNestedClassesComplete(RD: FriendD, MNCComplete);
1014 else
1015 Complete = false;
1016 } else {
1017 // Friend functions are available through the NamedDecl of FriendDecl.
1018 if (const FunctionDecl *FD =
1019 dyn_cast<FunctionDecl>(Val: (*I)->getFriendDecl()))
1020 Complete = FD->isDefined();
1021 else
1022 // This is a template friend, give up.
1023 Complete = false;
1024 }
1025 }
1026 RecordsComplete[RD] = Complete;
1027 return Complete;
1028}
1029
1030void Sema::emitAndClearUnusedLocalTypedefWarnings() {
1031 if (ExternalSource)
1032 ExternalSource->ReadUnusedLocalTypedefNameCandidates(
1033 Decls&: UnusedLocalTypedefNameCandidates);
1034 for (const TypedefNameDecl *TD : UnusedLocalTypedefNameCandidates) {
1035 if (TD->isReferenced())
1036 continue;
1037 Diag(TD->getLocation(), diag::warn_unused_local_typedef)
1038 << isa<TypeAliasDecl>(TD) << TD->getDeclName();
1039 }
1040 UnusedLocalTypedefNameCandidates.clear();
1041}
1042
1043/// This is called before the very first declaration in the translation unit
1044/// is parsed. Note that the ASTContext may have already injected some
1045/// declarations.
1046void Sema::ActOnStartOfTranslationUnit() {
1047 if (getLangOpts().CPlusPlusModules &&
1048 getLangOpts().getCompilingModule() == LangOptions::CMK_HeaderUnit)
1049 HandleStartOfHeaderUnit();
1050}
1051
1052void Sema::ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind) {
1053 // No explicit actions are required at the end of the global module fragment.
1054 if (Kind == TUFragmentKind::Global)
1055 return;
1056
1057 // Transfer late parsed template instantiations over to the pending template
1058 // instantiation list. During normal compilation, the late template parser
1059 // will be installed and instantiating these templates will succeed.
1060 //
1061 // If we are building a TU prefix for serialization, it is also safe to
1062 // transfer these over, even though they are not parsed. The end of the TU
1063 // should be outside of any eager template instantiation scope, so when this
1064 // AST is deserialized, these templates will not be parsed until the end of
1065 // the combined TU.
1066 PendingInstantiations.insert(position: PendingInstantiations.end(),
1067 first: LateParsedInstantiations.begin(),
1068 last: LateParsedInstantiations.end());
1069 LateParsedInstantiations.clear();
1070
1071 // If DefinedUsedVTables ends up marking any virtual member functions it
1072 // might lead to more pending template instantiations, which we then need
1073 // to instantiate.
1074 DefineUsedVTables();
1075
1076 // C++: Perform implicit template instantiations.
1077 //
1078 // FIXME: When we perform these implicit instantiations, we do not
1079 // carefully keep track of the point of instantiation (C++ [temp.point]).
1080 // This means that name lookup that occurs within the template
1081 // instantiation will always happen at the end of the translation unit,
1082 // so it will find some names that are not required to be found. This is
1083 // valid, but we could do better by diagnosing if an instantiation uses a
1084 // name that was not visible at its first point of instantiation.
1085 if (ExternalSource) {
1086 // Load pending instantiations from the external source.
1087 SmallVector<PendingImplicitInstantiation, 4> Pending;
1088 ExternalSource->ReadPendingInstantiations(Pending);
1089 for (auto PII : Pending)
1090 if (auto Func = dyn_cast<FunctionDecl>(Val: PII.first))
1091 Func->setInstantiationIsPending(true);
1092 PendingInstantiations.insert(position: PendingInstantiations.begin(),
1093 first: Pending.begin(), last: Pending.end());
1094 }
1095
1096 {
1097 llvm::TimeTraceScope TimeScope("PerformPendingInstantiations");
1098 PerformPendingInstantiations();
1099 }
1100
1101 emitDeferredDiags();
1102
1103 assert(LateParsedInstantiations.empty() &&
1104 "end of TU template instantiation should not create more "
1105 "late-parsed templates");
1106
1107 // Report diagnostics for uncorrected delayed typos. Ideally all of them
1108 // should have been corrected by that time, but it is very hard to cover all
1109 // cases in practice.
1110 for (const auto &Typo : DelayedTypos) {
1111 // We pass an empty TypoCorrection to indicate no correction was performed.
1112 Typo.second.DiagHandler(TypoCorrection());
1113 }
1114 DelayedTypos.clear();
1115}
1116
1117/// ActOnEndOfTranslationUnit - This is called at the very end of the
1118/// translation unit when EOF is reached and all but the top-level scope is
1119/// popped.
1120void Sema::ActOnEndOfTranslationUnit() {
1121 assert(DelayedDiagnostics.getCurrentPool() == nullptr
1122 && "reached end of translation unit with a pool attached?");
1123
1124 // If code completion is enabled, don't perform any end-of-translation-unit
1125 // work.
1126 if (PP.isCodeCompletionEnabled())
1127 return;
1128
1129 // Complete translation units and modules define vtables and perform implicit
1130 // instantiations. PCH files do not.
1131 if (TUKind != TU_Prefix) {
1132 DiagnoseUseOfUnimplementedSelectors();
1133
1134 ActOnEndOfTranslationUnitFragment(
1135 Kind: !ModuleScopes.empty() && ModuleScopes.back().Module->Kind ==
1136 Module::PrivateModuleFragment
1137 ? TUFragmentKind::Private
1138 : TUFragmentKind::Normal);
1139
1140 if (LateTemplateParserCleanup)
1141 LateTemplateParserCleanup(OpaqueParser);
1142
1143 CheckDelayedMemberExceptionSpecs();
1144 } else {
1145 // If we are building a TU prefix for serialization, it is safe to transfer
1146 // these over, even though they are not parsed. The end of the TU should be
1147 // outside of any eager template instantiation scope, so when this AST is
1148 // deserialized, these templates will not be parsed until the end of the
1149 // combined TU.
1150 PendingInstantiations.insert(position: PendingInstantiations.end(),
1151 first: LateParsedInstantiations.begin(),
1152 last: LateParsedInstantiations.end());
1153 LateParsedInstantiations.clear();
1154
1155 if (LangOpts.PCHInstantiateTemplates) {
1156 llvm::TimeTraceScope TimeScope("PerformPendingInstantiations");
1157 PerformPendingInstantiations();
1158 }
1159 }
1160
1161 DiagnoseUnterminatedPragmaAlignPack();
1162 DiagnoseUnterminatedPragmaAttribute();
1163 OpenMP().DiagnoseUnterminatedOpenMPDeclareTarget();
1164
1165 // All delayed member exception specs should be checked or we end up accepting
1166 // incompatible declarations.
1167 assert(DelayedOverridingExceptionSpecChecks.empty());
1168 assert(DelayedEquivalentExceptionSpecChecks.empty());
1169
1170 // All dllexport classes should have been processed already.
1171 assert(DelayedDllExportClasses.empty());
1172 assert(DelayedDllExportMemberFunctions.empty());
1173
1174 // Remove file scoped decls that turned out to be used.
1175 UnusedFileScopedDecls.erase(
1176 From: std::remove_if(first: UnusedFileScopedDecls.begin(source: nullptr, LocalOnly: true),
1177 last: UnusedFileScopedDecls.end(),
1178 pred: [this](const DeclaratorDecl *DD) {
1179 return ShouldRemoveFromUnused(SemaRef: this, D: DD);
1180 }),
1181 To: UnusedFileScopedDecls.end());
1182
1183 if (TUKind == TU_Prefix) {
1184 // Translation unit prefixes don't need any of the checking below.
1185 if (!PP.isIncrementalProcessingEnabled())
1186 TUScope = nullptr;
1187 return;
1188 }
1189
1190 // Check for #pragma weak identifiers that were never declared
1191 LoadExternalWeakUndeclaredIdentifiers();
1192 for (const auto &WeakIDs : WeakUndeclaredIdentifiers) {
1193 if (WeakIDs.second.empty())
1194 continue;
1195
1196 Decl *PrevDecl = LookupSingleName(S: TUScope, Name: WeakIDs.first, Loc: SourceLocation(),
1197 NameKind: LookupOrdinaryName);
1198 if (PrevDecl != nullptr &&
1199 !(isa<FunctionDecl>(PrevDecl) || isa<VarDecl>(PrevDecl)))
1200 for (const auto &WI : WeakIDs.second)
1201 Diag(WI.getLocation(), diag::warn_attribute_wrong_decl_type)
1202 << "'weak'" << /*isRegularKeyword=*/0 << ExpectedVariableOrFunction;
1203 else
1204 for (const auto &WI : WeakIDs.second)
1205 Diag(WI.getLocation(), diag::warn_weak_identifier_undeclared)
1206 << WeakIDs.first;
1207 }
1208
1209 if (LangOpts.CPlusPlus11 &&
1210 !Diags.isIgnored(diag::warn_delegating_ctor_cycle, SourceLocation()))
1211 CheckDelegatingCtorCycles();
1212
1213 if (!Diags.hasErrorOccurred()) {
1214 if (ExternalSource)
1215 ExternalSource->ReadUndefinedButUsed(Undefined&: UndefinedButUsed);
1216 checkUndefinedButUsed(S&: *this);
1217 }
1218
1219 // A global-module-fragment is only permitted within a module unit.
1220 if (!ModuleScopes.empty() && ModuleScopes.back().Module->Kind ==
1221 Module::ExplicitGlobalModuleFragment) {
1222 Diag(ModuleScopes.back().BeginLoc,
1223 diag::err_module_declaration_missing_after_global_module_introducer);
1224 }
1225
1226 // Now we can decide whether the modules we're building need an initializer.
1227 if (Module *CurrentModule = getCurrentModule();
1228 CurrentModule && CurrentModule->isInterfaceOrPartition()) {
1229 auto DoesModNeedInit = [this](Module *M) {
1230 if (!getASTContext().getModuleInitializers(M).empty())
1231 return true;
1232 for (auto [Exported, _] : M->Exports)
1233 if (Exported->isNamedModuleInterfaceHasInit())
1234 return true;
1235 for (Module *I : M->Imports)
1236 if (I->isNamedModuleInterfaceHasInit())
1237 return true;
1238
1239 return false;
1240 };
1241
1242 CurrentModule->NamedModuleHasInit =
1243 DoesModNeedInit(CurrentModule) ||
1244 llvm::any_of(Range: CurrentModule->submodules(),
1245 P: [&](auto *SubM) { return DoesModNeedInit(SubM); });
1246 }
1247
1248 if (TUKind == TU_ClangModule) {
1249 // If we are building a module, resolve all of the exported declarations
1250 // now.
1251 if (Module *CurrentModule = PP.getCurrentModule()) {
1252 ModuleMap &ModMap = PP.getHeaderSearchInfo().getModuleMap();
1253
1254 SmallVector<Module *, 2> Stack;
1255 Stack.push_back(Elt: CurrentModule);
1256 while (!Stack.empty()) {
1257 Module *Mod = Stack.pop_back_val();
1258
1259 // Resolve the exported declarations and conflicts.
1260 // FIXME: Actually complain, once we figure out how to teach the
1261 // diagnostic client to deal with complaints in the module map at this
1262 // point.
1263 ModMap.resolveExports(Mod, /*Complain=*/false);
1264 ModMap.resolveUses(Mod, /*Complain=*/false);
1265 ModMap.resolveConflicts(Mod, /*Complain=*/false);
1266
1267 // Queue the submodules, so their exports will also be resolved.
1268 auto SubmodulesRange = Mod->submodules();
1269 Stack.append(in_start: SubmodulesRange.begin(), in_end: SubmodulesRange.end());
1270 }
1271 }
1272
1273 // Warnings emitted in ActOnEndOfTranslationUnit() should be emitted for
1274 // modules when they are built, not every time they are used.
1275 emitAndClearUnusedLocalTypedefWarnings();
1276 }
1277
1278 // C++ standard modules. Diagnose cases where a function is declared inline
1279 // in the module purview but has no definition before the end of the TU or
1280 // the start of a Private Module Fragment (if one is present).
1281 if (!PendingInlineFuncDecls.empty()) {
1282 for (auto *D : PendingInlineFuncDecls) {
1283 if (auto *FD = dyn_cast<FunctionDecl>(Val: D)) {
1284 bool DefInPMF = false;
1285 if (auto *FDD = FD->getDefinition()) {
1286 DefInPMF = FDD->getOwningModule()->isPrivateModule();
1287 if (!DefInPMF)
1288 continue;
1289 }
1290 Diag(FD->getLocation(), diag::err_export_inline_not_defined)
1291 << DefInPMF;
1292 // If we have a PMF it should be at the end of the ModuleScopes.
1293 if (DefInPMF &&
1294 ModuleScopes.back().Module->Kind == Module::PrivateModuleFragment) {
1295 Diag(ModuleScopes.back().BeginLoc,
1296 diag::note_private_module_fragment);
1297 }
1298 }
1299 }
1300 PendingInlineFuncDecls.clear();
1301 }
1302
1303 // C99 6.9.2p2:
1304 // A declaration of an identifier for an object that has file
1305 // scope without an initializer, and without a storage-class
1306 // specifier or with the storage-class specifier static,
1307 // constitutes a tentative definition. If a translation unit
1308 // contains one or more tentative definitions for an identifier,
1309 // and the translation unit contains no external definition for
1310 // that identifier, then the behavior is exactly as if the
1311 // translation unit contains a file scope declaration of that
1312 // identifier, with the composite type as of the end of the
1313 // translation unit, with an initializer equal to 0.
1314 llvm::SmallSet<VarDecl *, 32> Seen;
1315 for (TentativeDefinitionsType::iterator
1316 T = TentativeDefinitions.begin(source: ExternalSource.get()),
1317 TEnd = TentativeDefinitions.end();
1318 T != TEnd; ++T) {
1319 VarDecl *VD = (*T)->getActingDefinition();
1320
1321 // If the tentative definition was completed, getActingDefinition() returns
1322 // null. If we've already seen this variable before, insert()'s second
1323 // return value is false.
1324 if (!VD || VD->isInvalidDecl() || !Seen.insert(Ptr: VD).second)
1325 continue;
1326
1327 if (const IncompleteArrayType *ArrayT
1328 = Context.getAsIncompleteArrayType(T: VD->getType())) {
1329 // Set the length of the array to 1 (C99 6.9.2p5).
1330 Diag(VD->getLocation(), diag::warn_tentative_incomplete_array);
1331 llvm::APInt One(Context.getTypeSize(T: Context.getSizeType()), true);
1332 QualType T = Context.getConstantArrayType(
1333 EltTy: ArrayT->getElementType(), ArySize: One, SizeExpr: nullptr, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
1334 VD->setType(T);
1335 } else if (RequireCompleteType(VD->getLocation(), VD->getType(),
1336 diag::err_tentative_def_incomplete_type))
1337 VD->setInvalidDecl();
1338
1339 // No initialization is performed for a tentative definition.
1340 CheckCompleteVariableDeclaration(VD);
1341
1342 // Notify the consumer that we've completed a tentative definition.
1343 if (!VD->isInvalidDecl())
1344 Consumer.CompleteTentativeDefinition(D: VD);
1345 }
1346
1347 for (auto *D : ExternalDeclarations) {
1348 if (!D || D->isInvalidDecl() || D->getPreviousDecl() || !D->isUsed())
1349 continue;
1350
1351 Consumer.CompleteExternalDeclaration(D);
1352 }
1353
1354 // If there were errors, disable 'unused' warnings since they will mostly be
1355 // noise. Don't warn for a use from a module: either we should warn on all
1356 // file-scope declarations in modules or not at all, but whether the
1357 // declaration is used is immaterial.
1358 if (!Diags.hasErrorOccurred() && TUKind != TU_ClangModule) {
1359 // Output warning for unused file scoped decls.
1360 for (UnusedFileScopedDeclsType::iterator
1361 I = UnusedFileScopedDecls.begin(source: ExternalSource.get()),
1362 E = UnusedFileScopedDecls.end();
1363 I != E; ++I) {
1364 if (ShouldRemoveFromUnused(SemaRef: this, D: *I))
1365 continue;
1366
1367 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Val: *I)) {
1368 const FunctionDecl *DiagD;
1369 if (!FD->hasBody(Definition&: DiagD))
1370 DiagD = FD;
1371 if (DiagD->isDeleted())
1372 continue; // Deleted functions are supposed to be unused.
1373 SourceRange DiagRange = DiagD->getLocation();
1374 if (const ASTTemplateArgumentListInfo *ASTTAL =
1375 DiagD->getTemplateSpecializationArgsAsWritten())
1376 DiagRange.setEnd(ASTTAL->RAngleLoc);
1377 if (DiagD->isReferenced()) {
1378 if (isa<CXXMethodDecl>(Val: DiagD))
1379 Diag(DiagD->getLocation(), diag::warn_unneeded_member_function)
1380 << DiagD << DiagRange;
1381 else {
1382 if (FD->getStorageClass() == SC_Static &&
1383 !FD->isInlineSpecified() &&
1384 !SourceMgr.isInMainFile(
1385 SourceMgr.getExpansionLoc(FD->getLocation())))
1386 Diag(DiagD->getLocation(),
1387 diag::warn_unneeded_static_internal_decl)
1388 << DiagD << DiagRange;
1389 else
1390 Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl)
1391 << /*function=*/0 << DiagD << DiagRange;
1392 }
1393 } else if (!FD->isTargetMultiVersion() ||
1394 FD->isTargetMultiVersionDefault()) {
1395 if (FD->getDescribedFunctionTemplate())
1396 Diag(DiagD->getLocation(), diag::warn_unused_template)
1397 << /*function=*/0 << DiagD << DiagRange;
1398 else
1399 Diag(DiagD->getLocation(), isa<CXXMethodDecl>(DiagD)
1400 ? diag::warn_unused_member_function
1401 : diag::warn_unused_function)
1402 << DiagD << DiagRange;
1403 }
1404 } else {
1405 const VarDecl *DiagD = cast<VarDecl>(Val: *I)->getDefinition();
1406 if (!DiagD)
1407 DiagD = cast<VarDecl>(Val: *I);
1408 SourceRange DiagRange = DiagD->getLocation();
1409 if (const auto *VTSD = dyn_cast<VarTemplateSpecializationDecl>(Val: DiagD)) {
1410 if (const ASTTemplateArgumentListInfo *ASTTAL =
1411 VTSD->getTemplateArgsInfo())
1412 DiagRange.setEnd(ASTTAL->RAngleLoc);
1413 }
1414 if (DiagD->isReferenced()) {
1415 Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl)
1416 << /*variable=*/1 << DiagD << DiagRange;
1417 } else if (DiagD->getDescribedVarTemplate()) {
1418 Diag(DiagD->getLocation(), diag::warn_unused_template)
1419 << /*variable=*/1 << DiagD << DiagRange;
1420 } else if (DiagD->getType().isConstQualified()) {
1421 const SourceManager &SM = SourceMgr;
1422 if (SM.getMainFileID() != SM.getFileID(DiagD->getLocation()) ||
1423 !PP.getLangOpts().IsHeaderFile)
1424 Diag(DiagD->getLocation(), diag::warn_unused_const_variable)
1425 << DiagD << DiagRange;
1426 } else {
1427 Diag(DiagD->getLocation(), diag::warn_unused_variable)
1428 << DiagD << DiagRange;
1429 }
1430 }
1431 }
1432
1433 emitAndClearUnusedLocalTypedefWarnings();
1434 }
1435
1436 if (!Diags.isIgnored(diag::warn_unused_private_field, SourceLocation())) {
1437 // FIXME: Load additional unused private field candidates from the external
1438 // source.
1439 RecordCompleteMap RecordsComplete;
1440 RecordCompleteMap MNCComplete;
1441 for (const NamedDecl *D : UnusedPrivateFields) {
1442 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D->getDeclContext());
1443 if (RD && !RD->isUnion() &&
1444 IsRecordFullyDefined(RD, RecordsComplete, MNCComplete)) {
1445 Diag(D->getLocation(), diag::warn_unused_private_field)
1446 << D->getDeclName();
1447 }
1448 }
1449 }
1450
1451 if (!Diags.isIgnored(diag::warn_mismatched_delete_new, SourceLocation())) {
1452 if (ExternalSource)
1453 ExternalSource->ReadMismatchingDeleteExpressions(DeleteExprs);
1454 for (const auto &DeletedFieldInfo : DeleteExprs) {
1455 for (const auto &DeleteExprLoc : DeletedFieldInfo.second) {
1456 AnalyzeDeleteExprMismatch(Field: DeletedFieldInfo.first, DeleteLoc: DeleteExprLoc.first,
1457 DeleteWasArrayForm: DeleteExprLoc.second);
1458 }
1459 }
1460 }
1461
1462 AnalysisWarnings.IssueWarnings(D: Context.getTranslationUnitDecl());
1463
1464 // Check we've noticed that we're no longer parsing the initializer for every
1465 // variable. If we miss cases, then at best we have a performance issue and
1466 // at worst a rejects-valid bug.
1467 assert(ParsingInitForAutoVars.empty() &&
1468 "Didn't unmark var as having its initializer parsed");
1469
1470 if (!PP.isIncrementalProcessingEnabled())
1471 TUScope = nullptr;
1472}
1473
1474
1475//===----------------------------------------------------------------------===//
1476// Helper functions.
1477//===----------------------------------------------------------------------===//
1478
1479DeclContext *Sema::getFunctionLevelDeclContext(bool AllowLambda) const {
1480 DeclContext *DC = CurContext;
1481
1482 while (true) {
1483 if (isa<BlockDecl>(Val: DC) || isa<EnumDecl>(Val: DC) || isa<CapturedDecl>(Val: DC) ||
1484 isa<RequiresExprBodyDecl>(Val: DC)) {
1485 DC = DC->getParent();
1486 } else if (!AllowLambda && isa<CXXMethodDecl>(Val: DC) &&
1487 cast<CXXMethodDecl>(Val: DC)->getOverloadedOperator() == OO_Call &&
1488 cast<CXXRecordDecl>(Val: DC->getParent())->isLambda()) {
1489 DC = DC->getParent()->getParent();
1490 } else break;
1491 }
1492
1493 return DC;
1494}
1495
1496/// getCurFunctionDecl - If inside of a function body, this returns a pointer
1497/// to the function decl for the function being parsed. If we're currently
1498/// in a 'block', this returns the containing context.
1499FunctionDecl *Sema::getCurFunctionDecl(bool AllowLambda) const {
1500 DeclContext *DC = getFunctionLevelDeclContext(AllowLambda);
1501 return dyn_cast<FunctionDecl>(Val: DC);
1502}
1503
1504ObjCMethodDecl *Sema::getCurMethodDecl() {
1505 DeclContext *DC = getFunctionLevelDeclContext();
1506 while (isa<RecordDecl>(Val: DC))
1507 DC = DC->getParent();
1508 return dyn_cast<ObjCMethodDecl>(Val: DC);
1509}
1510
1511NamedDecl *Sema::getCurFunctionOrMethodDecl() const {
1512 DeclContext *DC = getFunctionLevelDeclContext();
1513 if (isa<ObjCMethodDecl>(Val: DC) || isa<FunctionDecl>(Val: DC))
1514 return cast<NamedDecl>(Val: DC);
1515 return nullptr;
1516}
1517
1518LangAS Sema::getDefaultCXXMethodAddrSpace() const {
1519 if (getLangOpts().OpenCL)
1520 return getASTContext().getDefaultOpenCLPointeeAddrSpace();
1521 return LangAS::Default;
1522}
1523
1524void Sema::EmitCurrentDiagnostic(unsigned DiagID) {
1525 // FIXME: It doesn't make sense to me that DiagID is an incoming argument here
1526 // and yet we also use the current diag ID on the DiagnosticsEngine. This has
1527 // been made more painfully obvious by the refactor that introduced this
1528 // function, but it is possible that the incoming argument can be
1529 // eliminated. If it truly cannot be (for example, there is some reentrancy
1530 // issue I am not seeing yet), then there should at least be a clarifying
1531 // comment somewhere.
1532 if (std::optional<TemplateDeductionInfo *> Info = isSFINAEContext()) {
1533 switch (DiagnosticIDs::getDiagnosticSFINAEResponse(
1534 DiagID: Diags.getCurrentDiagID())) {
1535 case DiagnosticIDs::SFINAE_Report:
1536 // We'll report the diagnostic below.
1537 break;
1538
1539 case DiagnosticIDs::SFINAE_SubstitutionFailure:
1540 // Count this failure so that we know that template argument deduction
1541 // has failed.
1542 ++NumSFINAEErrors;
1543
1544 // Make a copy of this suppressed diagnostic and store it with the
1545 // template-deduction information.
1546 if (*Info && !(*Info)->hasSFINAEDiagnostic()) {
1547 Diagnostic DiagInfo(&Diags);
1548 (*Info)->addSFINAEDiagnostic(Loc: DiagInfo.getLocation(),
1549 PD: PartialDiagnostic(DiagInfo, Context.getDiagAllocator()));
1550 }
1551
1552 Diags.setLastDiagnosticIgnored(true);
1553 Diags.Clear();
1554 return;
1555
1556 case DiagnosticIDs::SFINAE_AccessControl: {
1557 // Per C++ Core Issue 1170, access control is part of SFINAE.
1558 // Additionally, the AccessCheckingSFINAE flag can be used to temporarily
1559 // make access control a part of SFINAE for the purposes of checking
1560 // type traits.
1561 if (!AccessCheckingSFINAE && !getLangOpts().CPlusPlus11)
1562 break;
1563
1564 SourceLocation Loc = Diags.getCurrentDiagLoc();
1565
1566 // Suppress this diagnostic.
1567 ++NumSFINAEErrors;
1568
1569 // Make a copy of this suppressed diagnostic and store it with the
1570 // template-deduction information.
1571 if (*Info && !(*Info)->hasSFINAEDiagnostic()) {
1572 Diagnostic DiagInfo(&Diags);
1573 (*Info)->addSFINAEDiagnostic(Loc: DiagInfo.getLocation(),
1574 PD: PartialDiagnostic(DiagInfo, Context.getDiagAllocator()));
1575 }
1576
1577 Diags.setLastDiagnosticIgnored(true);
1578 Diags.Clear();
1579
1580 // Now the diagnostic state is clear, produce a C++98 compatibility
1581 // warning.
1582 Diag(Loc, diag::warn_cxx98_compat_sfinae_access_control);
1583
1584 // The last diagnostic which Sema produced was ignored. Suppress any
1585 // notes attached to it.
1586 Diags.setLastDiagnosticIgnored(true);
1587 return;
1588 }
1589
1590 case DiagnosticIDs::SFINAE_Suppress:
1591 // Make a copy of this suppressed diagnostic and store it with the
1592 // template-deduction information;
1593 if (*Info) {
1594 Diagnostic DiagInfo(&Diags);
1595 (*Info)->addSuppressedDiagnostic(Loc: DiagInfo.getLocation(),
1596 PD: PartialDiagnostic(DiagInfo, Context.getDiagAllocator()));
1597 }
1598
1599 // Suppress this diagnostic.
1600 Diags.setLastDiagnosticIgnored(true);
1601 Diags.Clear();
1602 return;
1603 }
1604 }
1605
1606 // Copy the diagnostic printing policy over the ASTContext printing policy.
1607 // TODO: Stop doing that. See: https://reviews.llvm.org/D45093#1090292
1608 Context.setPrintingPolicy(getPrintingPolicy());
1609
1610 // Emit the diagnostic.
1611 if (!Diags.EmitCurrentDiagnostic())
1612 return;
1613
1614 // If this is not a note, and we're in a template instantiation
1615 // that is different from the last template instantiation where
1616 // we emitted an error, print a template instantiation
1617 // backtrace.
1618 if (!DiagnosticIDs::isBuiltinNote(DiagID))
1619 PrintContextStack();
1620}
1621
1622bool Sema::hasUncompilableErrorOccurred() const {
1623 if (getDiagnostics().hasUncompilableErrorOccurred())
1624 return true;
1625 auto *FD = dyn_cast<FunctionDecl>(Val: CurContext);
1626 if (!FD)
1627 return false;
1628 auto Loc = DeviceDeferredDiags.find(Val: FD);
1629 if (Loc == DeviceDeferredDiags.end())
1630 return false;
1631 for (auto PDAt : Loc->second) {
1632 if (DiagnosticIDs::isDefaultMappingAsError(DiagID: PDAt.second.getDiagID()))
1633 return true;
1634 }
1635 return false;
1636}
1637
1638// Print notes showing how we can reach FD starting from an a priori
1639// known-callable function.
1640static void emitCallStackNotes(Sema &S, const FunctionDecl *FD) {
1641 auto FnIt = S.CUDA().DeviceKnownEmittedFns.find(Val: FD);
1642 while (FnIt != S.CUDA().DeviceKnownEmittedFns.end()) {
1643 // Respect error limit.
1644 if (S.Diags.hasFatalErrorOccurred())
1645 return;
1646 DiagnosticBuilder Builder(
1647 S.Diags.Report(FnIt->second.Loc, diag::note_called_by));
1648 Builder << FnIt->second.FD;
1649 FnIt = S.CUDA().DeviceKnownEmittedFns.find(Val: FnIt->second.FD);
1650 }
1651}
1652
1653namespace {
1654
1655/// Helper class that emits deferred diagnostic messages if an entity directly
1656/// or indirectly using the function that causes the deferred diagnostic
1657/// messages is known to be emitted.
1658///
1659/// During parsing of AST, certain diagnostic messages are recorded as deferred
1660/// diagnostics since it is unknown whether the functions containing such
1661/// diagnostics will be emitted. A list of potentially emitted functions and
1662/// variables that may potentially trigger emission of functions are also
1663/// recorded. DeferredDiagnosticsEmitter recursively visits used functions
1664/// by each function to emit deferred diagnostics.
1665///
1666/// During the visit, certain OpenMP directives or initializer of variables
1667/// with certain OpenMP attributes will cause subsequent visiting of any
1668/// functions enter a state which is called OpenMP device context in this
1669/// implementation. The state is exited when the directive or initializer is
1670/// exited. This state can change the emission states of subsequent uses
1671/// of functions.
1672///
1673/// Conceptually the functions or variables to be visited form a use graph
1674/// where the parent node uses the child node. At any point of the visit,
1675/// the tree nodes traversed from the tree root to the current node form a use
1676/// stack. The emission state of the current node depends on two factors:
1677/// 1. the emission state of the root node
1678/// 2. whether the current node is in OpenMP device context
1679/// If the function is decided to be emitted, its contained deferred diagnostics
1680/// are emitted, together with the information about the use stack.
1681///
1682class DeferredDiagnosticsEmitter
1683 : public UsedDeclVisitor<DeferredDiagnosticsEmitter> {
1684public:
1685 typedef UsedDeclVisitor<DeferredDiagnosticsEmitter> Inherited;
1686
1687 // Whether the function is already in the current use-path.
1688 llvm::SmallPtrSet<CanonicalDeclPtr<Decl>, 4> InUsePath;
1689
1690 // The current use-path.
1691 llvm::SmallVector<CanonicalDeclPtr<FunctionDecl>, 4> UsePath;
1692
1693 // Whether the visiting of the function has been done. Done[0] is for the
1694 // case not in OpenMP device context. Done[1] is for the case in OpenMP
1695 // device context. We need two sets because diagnostics emission may be
1696 // different depending on whether it is in OpenMP device context.
1697 llvm::SmallPtrSet<CanonicalDeclPtr<Decl>, 4> DoneMap[2];
1698
1699 // Emission state of the root node of the current use graph.
1700 bool ShouldEmitRootNode;
1701
1702 // Current OpenMP device context level. It is initialized to 0 and each
1703 // entering of device context increases it by 1 and each exit decreases
1704 // it by 1. Non-zero value indicates it is currently in device context.
1705 unsigned InOMPDeviceContext;
1706
1707 DeferredDiagnosticsEmitter(Sema &S)
1708 : Inherited(S), ShouldEmitRootNode(false), InOMPDeviceContext(0) {}
1709
1710 bool shouldVisitDiscardedStmt() const { return false; }
1711
1712 void VisitOMPTargetDirective(OMPTargetDirective *Node) {
1713 ++InOMPDeviceContext;
1714 Inherited::VisitOMPTargetDirective(Node);
1715 --InOMPDeviceContext;
1716 }
1717
1718 void visitUsedDecl(SourceLocation Loc, Decl *D) {
1719 if (isa<VarDecl>(Val: D))
1720 return;
1721 if (auto *FD = dyn_cast<FunctionDecl>(Val: D))
1722 checkFunc(Loc, FD);
1723 else
1724 Inherited::visitUsedDecl(Loc, D);
1725 }
1726
1727 void checkVar(VarDecl *VD) {
1728 assert(VD->isFileVarDecl() &&
1729 "Should only check file-scope variables");
1730 if (auto *Init = VD->getInit()) {
1731 auto DevTy = OMPDeclareTargetDeclAttr::getDeviceType(VD);
1732 bool IsDev = DevTy && (*DevTy == OMPDeclareTargetDeclAttr::DT_NoHost ||
1733 *DevTy == OMPDeclareTargetDeclAttr::DT_Any);
1734 if (IsDev)
1735 ++InOMPDeviceContext;
1736 this->Visit(Init);
1737 if (IsDev)
1738 --InOMPDeviceContext;
1739 }
1740 }
1741
1742 void checkFunc(SourceLocation Loc, FunctionDecl *FD) {
1743 auto &Done = DoneMap[InOMPDeviceContext > 0 ? 1 : 0];
1744 FunctionDecl *Caller = UsePath.empty() ? nullptr : UsePath.back();
1745 if ((!ShouldEmitRootNode && !S.getLangOpts().OpenMP && !Caller) ||
1746 S.shouldIgnoreInHostDeviceCheck(Callee: FD) || InUsePath.count(FD))
1747 return;
1748 // Finalize analysis of OpenMP-specific constructs.
1749 if (Caller && S.LangOpts.OpenMP && UsePath.size() == 1 &&
1750 (ShouldEmitRootNode || InOMPDeviceContext))
1751 S.OpenMP().finalizeOpenMPDelayedAnalysis(Caller, Callee: FD, Loc);
1752 if (Caller)
1753 S.CUDA().DeviceKnownEmittedFns[FD] = {.FD: Caller, .Loc: Loc};
1754 // Always emit deferred diagnostics for the direct users. This does not
1755 // lead to explosion of diagnostics since each user is visited at most
1756 // twice.
1757 if (ShouldEmitRootNode || InOMPDeviceContext)
1758 emitDeferredDiags(FD, ShowCallStack: Caller);
1759 // Do not revisit a function if the function body has been completely
1760 // visited before.
1761 if (!Done.insert(FD).second)
1762 return;
1763 InUsePath.insert(FD);
1764 UsePath.push_back(Elt: FD);
1765 if (auto *S = FD->getBody()) {
1766 this->Visit(S);
1767 }
1768 UsePath.pop_back();
1769 InUsePath.erase(FD);
1770 }
1771
1772 void checkRecordedDecl(Decl *D) {
1773 if (auto *FD = dyn_cast<FunctionDecl>(Val: D)) {
1774 ShouldEmitRootNode = S.getEmissionStatus(Decl: FD, /*Final=*/true) ==
1775 Sema::FunctionEmissionStatus::Emitted;
1776 checkFunc(Loc: SourceLocation(), FD);
1777 } else
1778 checkVar(VD: cast<VarDecl>(Val: D));
1779 }
1780
1781 // Emit any deferred diagnostics for FD
1782 void emitDeferredDiags(FunctionDecl *FD, bool ShowCallStack) {
1783 auto It = S.DeviceDeferredDiags.find(Val: FD);
1784 if (It == S.DeviceDeferredDiags.end())
1785 return;
1786 bool HasWarningOrError = false;
1787 bool FirstDiag = true;
1788 for (PartialDiagnosticAt &PDAt : It->second) {
1789 // Respect error limit.
1790 if (S.Diags.hasFatalErrorOccurred())
1791 return;
1792 const SourceLocation &Loc = PDAt.first;
1793 const PartialDiagnostic &PD = PDAt.second;
1794 HasWarningOrError |=
1795 S.getDiagnostics().getDiagnosticLevel(DiagID: PD.getDiagID(), Loc) >=
1796 DiagnosticsEngine::Warning;
1797 {
1798 DiagnosticBuilder Builder(S.Diags.Report(Loc, DiagID: PD.getDiagID()));
1799 PD.Emit(DB: Builder);
1800 }
1801 // Emit the note on the first diagnostic in case too many diagnostics
1802 // cause the note not emitted.
1803 if (FirstDiag && HasWarningOrError && ShowCallStack) {
1804 emitCallStackNotes(S, FD);
1805 FirstDiag = false;
1806 }
1807 }
1808 }
1809};
1810} // namespace
1811
1812void Sema::emitDeferredDiags() {
1813 if (ExternalSource)
1814 ExternalSource->ReadDeclsToCheckForDeferredDiags(
1815 Decls&: DeclsToCheckForDeferredDiags);
1816
1817 if ((DeviceDeferredDiags.empty() && !LangOpts.OpenMP) ||
1818 DeclsToCheckForDeferredDiags.empty())
1819 return;
1820
1821 DeferredDiagnosticsEmitter DDE(*this);
1822 for (auto *D : DeclsToCheckForDeferredDiags)
1823 DDE.checkRecordedDecl(D);
1824}
1825
1826// In CUDA, there are some constructs which may appear in semantically-valid
1827// code, but trigger errors if we ever generate code for the function in which
1828// they appear. Essentially every construct you're not allowed to use on the
1829// device falls into this category, because you are allowed to use these
1830// constructs in a __host__ __device__ function, but only if that function is
1831// never codegen'ed on the device.
1832//
1833// To handle semantic checking for these constructs, we keep track of the set of
1834// functions we know will be emitted, either because we could tell a priori that
1835// they would be emitted, or because they were transitively called by a
1836// known-emitted function.
1837//
1838// We also keep a partial call graph of which not-known-emitted functions call
1839// which other not-known-emitted functions.
1840//
1841// When we see something which is illegal if the current function is emitted
1842// (usually by way of DiagIfDeviceCode, DiagIfHostCode, or
1843// CheckCall), we first check if the current function is known-emitted. If
1844// so, we immediately output the diagnostic.
1845//
1846// Otherwise, we "defer" the diagnostic. It sits in Sema::DeviceDeferredDiags
1847// until we discover that the function is known-emitted, at which point we take
1848// it out of this map and emit the diagnostic.
1849
1850Sema::SemaDiagnosticBuilder::SemaDiagnosticBuilder(Kind K, SourceLocation Loc,
1851 unsigned DiagID,
1852 const FunctionDecl *Fn,
1853 Sema &S)
1854 : S(S), Loc(Loc), DiagID(DiagID), Fn(Fn),
1855 ShowCallStack(K == K_ImmediateWithCallStack || K == K_Deferred) {
1856 switch (K) {
1857 case K_Nop:
1858 break;
1859 case K_Immediate:
1860 case K_ImmediateWithCallStack:
1861 ImmediateDiag.emplace(
1862 args: ImmediateDiagBuilder(S.Diags.Report(Loc, DiagID), S, DiagID));
1863 break;
1864 case K_Deferred:
1865 assert(Fn && "Must have a function to attach the deferred diag to.");
1866 auto &Diags = S.DeviceDeferredDiags[Fn];
1867 PartialDiagId.emplace(args: Diags.size());
1868 Diags.emplace_back(args&: Loc, args: S.PDiag(DiagID));
1869 break;
1870 }
1871}
1872
1873Sema::SemaDiagnosticBuilder::SemaDiagnosticBuilder(SemaDiagnosticBuilder &&D)
1874 : S(D.S), Loc(D.Loc), DiagID(D.DiagID), Fn(D.Fn),
1875 ShowCallStack(D.ShowCallStack), ImmediateDiag(D.ImmediateDiag),
1876 PartialDiagId(D.PartialDiagId) {
1877 // Clean the previous diagnostics.
1878 D.ShowCallStack = false;
1879 D.ImmediateDiag.reset();
1880 D.PartialDiagId.reset();
1881}
1882
1883Sema::SemaDiagnosticBuilder::~SemaDiagnosticBuilder() {
1884 if (ImmediateDiag) {
1885 // Emit our diagnostic and, if it was a warning or error, output a callstack
1886 // if Fn isn't a priori known-emitted.
1887 bool IsWarningOrError = S.getDiagnostics().getDiagnosticLevel(
1888 DiagID, Loc) >= DiagnosticsEngine::Warning;
1889 ImmediateDiag.reset(); // Emit the immediate diag.
1890 if (IsWarningOrError && ShowCallStack)
1891 emitCallStackNotes(S, FD: Fn);
1892 } else {
1893 assert((!PartialDiagId || ShowCallStack) &&
1894 "Must always show call stack for deferred diags.");
1895 }
1896}
1897
1898Sema::SemaDiagnosticBuilder
1899Sema::targetDiag(SourceLocation Loc, unsigned DiagID, const FunctionDecl *FD) {
1900 FD = FD ? FD : getCurFunctionDecl();
1901 if (LangOpts.OpenMP)
1902 return LangOpts.OpenMPIsTargetDevice
1903 ? OpenMP().diagIfOpenMPDeviceCode(Loc, DiagID, FD)
1904 : OpenMP().diagIfOpenMPHostCode(Loc, DiagID, FD);
1905 if (getLangOpts().CUDA)
1906 return getLangOpts().CUDAIsDevice ? CUDA().DiagIfDeviceCode(Loc, DiagID)
1907 : CUDA().DiagIfHostCode(Loc, DiagID);
1908
1909 if (getLangOpts().SYCLIsDevice)
1910 return SYCL().DiagIfDeviceCode(Loc, DiagID);
1911
1912 return SemaDiagnosticBuilder(SemaDiagnosticBuilder::K_Immediate, Loc, DiagID,
1913 FD, *this);
1914}
1915
1916void Sema::checkTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D) {
1917 if (isUnevaluatedContext() || Ty.isNull())
1918 return;
1919
1920 // The original idea behind checkTypeSupport function is that unused
1921 // declarations can be replaced with an array of bytes of the same size during
1922 // codegen, such replacement doesn't seem to be possible for types without
1923 // constant byte size like zero length arrays. So, do a deep check for SYCL.
1924 if (D && LangOpts.SYCLIsDevice) {
1925 llvm::DenseSet<QualType> Visited;
1926 SYCL().deepTypeCheckForDevice(UsedAt: Loc, Visited, DeclToCheck: D);
1927 }
1928
1929 Decl *C = cast<Decl>(Val: getCurLexicalContext());
1930
1931 // Memcpy operations for structs containing a member with unsupported type
1932 // are ok, though.
1933 if (const auto *MD = dyn_cast<CXXMethodDecl>(Val: C)) {
1934 if ((MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) &&
1935 MD->isTrivial())
1936 return;
1937
1938 if (const auto *Ctor = dyn_cast<CXXConstructorDecl>(Val: MD))
1939 if (Ctor->isCopyOrMoveConstructor() && Ctor->isTrivial())
1940 return;
1941 }
1942
1943 // Try to associate errors with the lexical context, if that is a function, or
1944 // the value declaration otherwise.
1945 const FunctionDecl *FD = isa<FunctionDecl>(Val: C)
1946 ? cast<FunctionDecl>(Val: C)
1947 : dyn_cast_or_null<FunctionDecl>(Val: D);
1948
1949 auto CheckDeviceType = [&](QualType Ty) {
1950 if (Ty->isDependentType())
1951 return;
1952
1953 if (Ty->isBitIntType()) {
1954 if (!Context.getTargetInfo().hasBitIntType()) {
1955 PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type);
1956 if (D)
1957 PD << D;
1958 else
1959 PD << "expression";
1960 targetDiag(Loc, PD, FD)
1961 << false /*show bit size*/ << 0 /*bitsize*/ << false /*return*/
1962 << Ty << Context.getTargetInfo().getTriple().str();
1963 }
1964 return;
1965 }
1966
1967 // Check if we are dealing with two 'long double' but with different
1968 // semantics.
1969 bool LongDoubleMismatched = false;
1970 if (Ty->isRealFloatingType() && Context.getTypeSize(T: Ty) == 128) {
1971 const llvm::fltSemantics &Sem = Context.getFloatTypeSemantics(T: Ty);
1972 if ((&Sem != &llvm::APFloat::PPCDoubleDouble() &&
1973 !Context.getTargetInfo().hasFloat128Type()) ||
1974 (&Sem == &llvm::APFloat::PPCDoubleDouble() &&
1975 !Context.getTargetInfo().hasIbm128Type()))
1976 LongDoubleMismatched = true;
1977 }
1978
1979 if ((Ty->isFloat16Type() && !Context.getTargetInfo().hasFloat16Type()) ||
1980 (Ty->isFloat128Type() && !Context.getTargetInfo().hasFloat128Type()) ||
1981 (Ty->isIbm128Type() && !Context.getTargetInfo().hasIbm128Type()) ||
1982 (Ty->isIntegerType() && Context.getTypeSize(T: Ty) == 128 &&
1983 !Context.getTargetInfo().hasInt128Type()) ||
1984 (Ty->isBFloat16Type() && !Context.getTargetInfo().hasBFloat16Type() &&
1985 !LangOpts.CUDAIsDevice) ||
1986 LongDoubleMismatched) {
1987 PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type);
1988 if (D)
1989 PD << D;
1990 else
1991 PD << "expression";
1992
1993 if (targetDiag(Loc, PD, FD)
1994 << true /*show bit size*/
1995 << static_cast<unsigned>(Context.getTypeSize(T: Ty)) << Ty
1996 << false /*return*/ << Context.getTargetInfo().getTriple().str()) {
1997 if (D)
1998 D->setInvalidDecl();
1999 }
2000 if (D)
2001 targetDiag(D->getLocation(), diag::note_defined_here, FD) << D;
2002 }
2003 };
2004
2005 auto CheckType = [&](QualType Ty, bool IsRetTy = false) {
2006 if (LangOpts.SYCLIsDevice ||
2007 (LangOpts.OpenMP && LangOpts.OpenMPIsTargetDevice) ||
2008 LangOpts.CUDAIsDevice)
2009 CheckDeviceType(Ty);
2010
2011 QualType UnqualTy = Ty.getCanonicalType().getUnqualifiedType();
2012 const TargetInfo &TI = Context.getTargetInfo();
2013 if (!TI.hasLongDoubleType() && UnqualTy == Context.LongDoubleTy) {
2014 PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type);
2015 if (D)
2016 PD << D;
2017 else
2018 PD << "expression";
2019
2020 if (Diag(Loc, PD, FD)
2021 << false /*show bit size*/ << 0 << Ty << false /*return*/
2022 << TI.getTriple().str()) {
2023 if (D)
2024 D->setInvalidDecl();
2025 }
2026 if (D)
2027 targetDiag(D->getLocation(), diag::note_defined_here, FD) << D;
2028 }
2029
2030 bool IsDouble = UnqualTy == Context.DoubleTy;
2031 bool IsFloat = UnqualTy == Context.FloatTy;
2032 if (IsRetTy && !TI.hasFPReturn() && (IsDouble || IsFloat)) {
2033 PartialDiagnostic PD = PDiag(diag::err_target_unsupported_type);
2034 if (D)
2035 PD << D;
2036 else
2037 PD << "expression";
2038
2039 if (Diag(Loc, PD, FD)
2040 << false /*show bit size*/ << 0 << Ty << true /*return*/
2041 << TI.getTriple().str()) {
2042 if (D)
2043 D->setInvalidDecl();
2044 }
2045 if (D)
2046 targetDiag(D->getLocation(), diag::note_defined_here, FD) << D;
2047 }
2048
2049 if (TI.hasRISCVVTypes() && Ty->isRVVSizelessBuiltinType() && FD) {
2050 llvm::StringMap<bool> CallerFeatureMap;
2051 Context.getFunctionFeatureMap(FeatureMap&: CallerFeatureMap, FD);
2052 checkRVVTypeSupport(Ty, Loc, D, CallerFeatureMap);
2053 }
2054
2055 // Don't allow SVE types in functions without a SVE target.
2056 if (Ty->isSVESizelessBuiltinType() && FD && FD->hasBody()) {
2057 llvm::StringMap<bool> CallerFeatureMap;
2058 Context.getFunctionFeatureMap(FeatureMap&: CallerFeatureMap, FD);
2059 if (!Builtin::evaluateRequiredTargetFeatures("sve", CallerFeatureMap) &&
2060 !Builtin::evaluateRequiredTargetFeatures("sme", CallerFeatureMap))
2061 Diag(D->getLocation(), diag::err_sve_vector_in_non_sve_target) << Ty;
2062 }
2063 };
2064
2065 CheckType(Ty);
2066 if (const auto *FPTy = dyn_cast<FunctionProtoType>(Val&: Ty)) {
2067 for (const auto &ParamTy : FPTy->param_types())
2068 CheckType(ParamTy);
2069 CheckType(FPTy->getReturnType(), /*IsRetTy=*/true);
2070 }
2071 if (const auto *FNPTy = dyn_cast<FunctionNoProtoType>(Val&: Ty))
2072 CheckType(FNPTy->getReturnType(), /*IsRetTy=*/true);
2073}
2074
2075/// Looks through the macro-expansion chain for the given
2076/// location, looking for a macro expansion with the given name.
2077/// If one is found, returns true and sets the location to that
2078/// expansion loc.
2079bool Sema::findMacroSpelling(SourceLocation &locref, StringRef name) {
2080 SourceLocation loc = locref;
2081 if (!loc.isMacroID()) return false;
2082
2083 // There's no good way right now to look at the intermediate
2084 // expansions, so just jump to the expansion location.
2085 loc = getSourceManager().getExpansionLoc(Loc: loc);
2086
2087 // If that's written with the name, stop here.
2088 SmallString<16> buffer;
2089 if (getPreprocessor().getSpelling(loc, buffer) == name) {
2090 locref = loc;
2091 return true;
2092 }
2093 return false;
2094}
2095
2096/// Determines the active Scope associated with the given declaration
2097/// context.
2098///
2099/// This routine maps a declaration context to the active Scope object that
2100/// represents that declaration context in the parser. It is typically used
2101/// from "scope-less" code (e.g., template instantiation, lazy creation of
2102/// declarations) that injects a name for name-lookup purposes and, therefore,
2103/// must update the Scope.
2104///
2105/// \returns The scope corresponding to the given declaraion context, or NULL
2106/// if no such scope is open.
2107Scope *Sema::getScopeForContext(DeclContext *Ctx) {
2108
2109 if (!Ctx)
2110 return nullptr;
2111
2112 Ctx = Ctx->getPrimaryContext();
2113 for (Scope *S = getCurScope(); S; S = S->getParent()) {
2114 // Ignore scopes that cannot have declarations. This is important for
2115 // out-of-line definitions of static class members.
2116 if (S->getFlags() & (Scope::DeclScope | Scope::TemplateParamScope))
2117 if (DeclContext *Entity = S->getEntity())
2118 if (Ctx == Entity->getPrimaryContext())
2119 return S;
2120 }
2121
2122 return nullptr;
2123}
2124
2125/// Enter a new function scope
2126void Sema::PushFunctionScope() {
2127 if (FunctionScopes.empty() && CachedFunctionScope) {
2128 // Use CachedFunctionScope to avoid allocating memory when possible.
2129 CachedFunctionScope->Clear();
2130 FunctionScopes.push_back(Elt: CachedFunctionScope.release());
2131 } else {
2132 FunctionScopes.push_back(Elt: new FunctionScopeInfo(getDiagnostics()));
2133 }
2134 if (LangOpts.OpenMP)
2135 OpenMP().pushOpenMPFunctionRegion();
2136}
2137
2138void Sema::PushBlockScope(Scope *BlockScope, BlockDecl *Block) {
2139 FunctionScopes.push_back(new BlockScopeInfo(getDiagnostics(),
2140 BlockScope, Block));
2141 CapturingFunctionScopes++;
2142}
2143
2144LambdaScopeInfo *Sema::PushLambdaScope() {
2145 LambdaScopeInfo *const LSI = new LambdaScopeInfo(getDiagnostics());
2146 FunctionScopes.push_back(LSI);
2147 CapturingFunctionScopes++;
2148 return LSI;
2149}
2150
2151void Sema::RecordParsingTemplateParameterDepth(unsigned Depth) {
2152 if (LambdaScopeInfo *const LSI = getCurLambda()) {
2153 LSI->AutoTemplateParameterDepth = Depth;
2154 return;
2155 }
2156 llvm_unreachable(
2157 "Remove assertion if intentionally called in a non-lambda context.");
2158}
2159
2160// Check that the type of the VarDecl has an accessible copy constructor and
2161// resolve its destructor's exception specification.
2162// This also performs initialization of block variables when they are moved
2163// to the heap. It uses the same rules as applicable for implicit moves
2164// according to the C++ standard in effect ([class.copy.elision]p3).
2165static void checkEscapingByref(VarDecl *VD, Sema &S) {
2166 QualType T = VD->getType();
2167 EnterExpressionEvaluationContext scope(
2168 S, Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
2169 SourceLocation Loc = VD->getLocation();
2170 Expr *VarRef =
2171 new (S.Context) DeclRefExpr(S.Context, VD, false, T, VK_LValue, Loc);
2172 ExprResult Result;
2173 auto IE = InitializedEntity::InitializeBlock(BlockVarLoc: Loc, Type: T);
2174 if (S.getLangOpts().CPlusPlus23) {
2175 auto *E = ImplicitCastExpr::Create(Context: S.Context, T, Kind: CK_NoOp, Operand: VarRef, BasePath: nullptr,
2176 Cat: VK_XValue, FPO: FPOptionsOverride());
2177 Result = S.PerformCopyInitialization(Entity: IE, EqualLoc: SourceLocation(), Init: E);
2178 } else {
2179 Result = S.PerformMoveOrCopyInitialization(
2180 Entity: IE, NRInfo: Sema::NamedReturnInfo{.Candidate: VD, .S: Sema::NamedReturnInfo::MoveEligible},
2181 Value: VarRef);
2182 }
2183
2184 if (!Result.isInvalid()) {
2185 Result = S.MaybeCreateExprWithCleanups(SubExpr: Result);
2186 Expr *Init = Result.getAs<Expr>();
2187 S.Context.setBlockVarCopyInit(VD, CopyExpr: Init, CanThrow: S.canThrow(Init));
2188 }
2189
2190 // The destructor's exception specification is needed when IRGen generates
2191 // block copy/destroy functions. Resolve it here.
2192 if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl())
2193 if (CXXDestructorDecl *DD = RD->getDestructor()) {
2194 auto *FPT = DD->getType()->castAs<FunctionProtoType>();
2195 S.ResolveExceptionSpec(Loc, FPT: FPT);
2196 }
2197}
2198
2199static void markEscapingByrefs(const FunctionScopeInfo &FSI, Sema &S) {
2200 // Set the EscapingByref flag of __block variables captured by
2201 // escaping blocks.
2202 for (const BlockDecl *BD : FSI.Blocks) {
2203 for (const BlockDecl::Capture &BC : BD->captures()) {
2204 VarDecl *VD = BC.getVariable();
2205 if (VD->hasAttr<BlocksAttr>()) {
2206 // Nothing to do if this is a __block variable captured by a
2207 // non-escaping block.
2208 if (BD->doesNotEscape())
2209 continue;
2210 VD->setEscapingByref();
2211 }
2212 // Check whether the captured variable is or contains an object of
2213 // non-trivial C union type.
2214 QualType CapType = BC.getVariable()->getType();
2215 if (CapType.hasNonTrivialToPrimitiveDestructCUnion() ||
2216 CapType.hasNonTrivialToPrimitiveCopyCUnion())
2217 S.checkNonTrivialCUnion(QT: BC.getVariable()->getType(),
2218 Loc: BD->getCaretLocation(),
2219 UseContext: Sema::NTCUC_BlockCapture,
2220 NonTrivialKind: Sema::NTCUK_Destruct|Sema::NTCUK_Copy);
2221 }
2222 }
2223
2224 for (VarDecl *VD : FSI.ByrefBlockVars) {
2225 // __block variables might require us to capture a copy-initializer.
2226 if (!VD->isEscapingByref())
2227 continue;
2228 // It's currently invalid to ever have a __block variable with an
2229 // array type; should we diagnose that here?
2230 // Regardless, we don't want to ignore array nesting when
2231 // constructing this copy.
2232 if (VD->getType()->isStructureOrClassType())
2233 checkEscapingByref(VD, S);
2234 }
2235}
2236
2237/// Pop a function (or block or lambda or captured region) scope from the stack.
2238///
2239/// \param WP The warning policy to use for CFG-based warnings, or null if such
2240/// warnings should not be produced.
2241/// \param D The declaration corresponding to this function scope, if producing
2242/// CFG-based warnings.
2243/// \param BlockType The type of the block expression, if D is a BlockDecl.
2244Sema::PoppedFunctionScopePtr
2245Sema::PopFunctionScopeInfo(const AnalysisBasedWarnings::Policy *WP,
2246 const Decl *D, QualType BlockType) {
2247 assert(!FunctionScopes.empty() && "mismatched push/pop!");
2248
2249 markEscapingByrefs(FSI: *FunctionScopes.back(), S&: *this);
2250
2251 PoppedFunctionScopePtr Scope(FunctionScopes.pop_back_val(),
2252 PoppedFunctionScopeDeleter(this));
2253
2254 if (LangOpts.OpenMP)
2255 OpenMP().popOpenMPFunctionRegion(OldFSI: Scope.get());
2256
2257 // Issue any analysis-based warnings.
2258 if (WP && D)
2259 AnalysisWarnings.IssueWarnings(P: *WP, fscope: Scope.get(), D, BlockType);
2260 else
2261 for (const auto &PUD : Scope->PossiblyUnreachableDiags)
2262 Diag(PUD.Loc, PUD.PD);
2263
2264 return Scope;
2265}
2266
2267void Sema::PoppedFunctionScopeDeleter::
2268operator()(sema::FunctionScopeInfo *Scope) const {
2269 if (!Scope->isPlainFunction())
2270 Self->CapturingFunctionScopes--;
2271 // Stash the function scope for later reuse if it's for a normal function.
2272 if (Scope->isPlainFunction() && !Self->CachedFunctionScope)
2273 Self->CachedFunctionScope.reset(p: Scope);
2274 else
2275 delete Scope;
2276}
2277
2278void Sema::PushCompoundScope(bool IsStmtExpr) {
2279 getCurFunction()->CompoundScopes.push_back(
2280 Elt: CompoundScopeInfo(IsStmtExpr, getCurFPFeatures()));
2281}
2282
2283void Sema::PopCompoundScope() {
2284 FunctionScopeInfo *CurFunction = getCurFunction();
2285 assert(!CurFunction->CompoundScopes.empty() && "mismatched push/pop");
2286
2287 CurFunction->CompoundScopes.pop_back();
2288}
2289
2290/// Determine whether any errors occurred within this function/method/
2291/// block.
2292bool Sema::hasAnyUnrecoverableErrorsInThisFunction() const {
2293 return getCurFunction()->hasUnrecoverableErrorOccurred();
2294}
2295
2296void Sema::setFunctionHasBranchIntoScope() {
2297 if (!FunctionScopes.empty())
2298 FunctionScopes.back()->setHasBranchIntoScope();
2299}
2300
2301void Sema::setFunctionHasBranchProtectedScope() {
2302 if (!FunctionScopes.empty())
2303 FunctionScopes.back()->setHasBranchProtectedScope();
2304}
2305
2306void Sema::setFunctionHasIndirectGoto() {
2307 if (!FunctionScopes.empty())
2308 FunctionScopes.back()->setHasIndirectGoto();
2309}
2310
2311void Sema::setFunctionHasMustTail() {
2312 if (!FunctionScopes.empty())
2313 FunctionScopes.back()->setHasMustTail();
2314}
2315
2316BlockScopeInfo *Sema::getCurBlock() {
2317 if (FunctionScopes.empty())
2318 return nullptr;
2319
2320 auto CurBSI = dyn_cast<BlockScopeInfo>(Val: FunctionScopes.back());
2321 if (CurBSI && CurBSI->TheDecl &&
2322 !CurBSI->TheDecl->Encloses(CurContext)) {
2323 // We have switched contexts due to template instantiation.
2324 assert(!CodeSynthesisContexts.empty());
2325 return nullptr;
2326 }
2327
2328 return CurBSI;
2329}
2330
2331FunctionScopeInfo *Sema::getEnclosingFunction() const {
2332 if (FunctionScopes.empty())
2333 return nullptr;
2334
2335 for (int e = FunctionScopes.size() - 1; e >= 0; --e) {
2336 if (isa<sema::BlockScopeInfo>(Val: FunctionScopes[e]))
2337 continue;
2338 return FunctionScopes[e];
2339 }
2340 return nullptr;
2341}
2342
2343LambdaScopeInfo *Sema::getEnclosingLambda() const {
2344 for (auto *Scope : llvm::reverse(C: FunctionScopes)) {
2345 if (auto *LSI = dyn_cast<sema::LambdaScopeInfo>(Val: Scope)) {
2346 if (LSI->Lambda && !LSI->Lambda->Encloses(CurContext) &&
2347 LSI->AfterParameterList) {
2348 // We have switched contexts due to template instantiation.
2349 // FIXME: We should swap out the FunctionScopes during code synthesis
2350 // so that we don't need to check for this.
2351 assert(!CodeSynthesisContexts.empty());
2352 return nullptr;
2353 }
2354 return LSI;
2355 }
2356 }
2357 return nullptr;
2358}
2359
2360LambdaScopeInfo *Sema::getCurLambda(bool IgnoreNonLambdaCapturingScope) {
2361 if (FunctionScopes.empty())
2362 return nullptr;
2363
2364 auto I = FunctionScopes.rbegin();
2365 if (IgnoreNonLambdaCapturingScope) {
2366 auto E = FunctionScopes.rend();
2367 while (I != E && isa<CapturingScopeInfo>(Val: *I) && !isa<LambdaScopeInfo>(Val: *I))
2368 ++I;
2369 if (I == E)
2370 return nullptr;
2371 }
2372 auto *CurLSI = dyn_cast<LambdaScopeInfo>(Val: *I);
2373 if (CurLSI && CurLSI->Lambda && CurLSI->CallOperator &&
2374 !CurLSI->Lambda->Encloses(CurContext) && CurLSI->AfterParameterList) {
2375 // We have switched contexts due to template instantiation.
2376 assert(!CodeSynthesisContexts.empty());
2377 return nullptr;
2378 }
2379
2380 return CurLSI;
2381}
2382
2383// We have a generic lambda if we parsed auto parameters, or we have
2384// an associated template parameter list.
2385LambdaScopeInfo *Sema::getCurGenericLambda() {
2386 if (LambdaScopeInfo *LSI = getCurLambda()) {
2387 return (LSI->TemplateParams.size() ||
2388 LSI->GLTemplateParameterList) ? LSI : nullptr;
2389 }
2390 return nullptr;
2391}
2392
2393
2394void Sema::ActOnComment(SourceRange Comment) {
2395 if (!LangOpts.RetainCommentsFromSystemHeaders &&
2396 SourceMgr.isInSystemHeader(Loc: Comment.getBegin()))
2397 return;
2398 RawComment RC(SourceMgr, Comment, LangOpts.CommentOpts, false);
2399 if (RC.isAlmostTrailingComment() || RC.hasUnsupportedSplice(SourceMgr)) {
2400 SourceRange MagicMarkerRange(Comment.getBegin(),
2401 Comment.getBegin().getLocWithOffset(Offset: 3));
2402 StringRef MagicMarkerText;
2403 switch (RC.getKind()) {
2404 case RawComment::RCK_OrdinaryBCPL:
2405 MagicMarkerText = "///<";
2406 break;
2407 case RawComment::RCK_OrdinaryC:
2408 MagicMarkerText = "/**<";
2409 break;
2410 case RawComment::RCK_Invalid:
2411 // FIXME: are there other scenarios that could produce an invalid
2412 // raw comment here?
2413 Diag(Comment.getBegin(), diag::warn_splice_in_doxygen_comment);
2414 return;
2415 default:
2416 llvm_unreachable("if this is an almost Doxygen comment, "
2417 "it should be ordinary");
2418 }
2419 Diag(Comment.getBegin(), diag::warn_not_a_doxygen_trailing_member_comment) <<
2420 FixItHint::CreateReplacement(MagicMarkerRange, MagicMarkerText);
2421 }
2422 Context.addComment(RC);
2423}
2424
2425// Pin this vtable to this file.
2426ExternalSemaSource::~ExternalSemaSource() {}
2427char ExternalSemaSource::ID;
2428
2429void ExternalSemaSource::ReadMethodPool(Selector Sel) { }
2430void ExternalSemaSource::updateOutOfDateSelector(Selector Sel) { }
2431
2432void ExternalSemaSource::ReadKnownNamespaces(
2433 SmallVectorImpl<NamespaceDecl *> &Namespaces) {
2434}
2435
2436void ExternalSemaSource::ReadUndefinedButUsed(
2437 llvm::MapVector<NamedDecl *, SourceLocation> &Undefined) {}
2438
2439void ExternalSemaSource::ReadMismatchingDeleteExpressions(llvm::MapVector<
2440 FieldDecl *, llvm::SmallVector<std::pair<SourceLocation, bool>, 4>> &) {}
2441
2442/// Figure out if an expression could be turned into a call.
2443///
2444/// Use this when trying to recover from an error where the programmer may have
2445/// written just the name of a function instead of actually calling it.
2446///
2447/// \param E - The expression to examine.
2448/// \param ZeroArgCallReturnTy - If the expression can be turned into a call
2449/// with no arguments, this parameter is set to the type returned by such a
2450/// call; otherwise, it is set to an empty QualType.
2451/// \param OverloadSet - If the expression is an overloaded function
2452/// name, this parameter is populated with the decls of the various overloads.
2453bool Sema::tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
2454 UnresolvedSetImpl &OverloadSet) {
2455 ZeroArgCallReturnTy = QualType();
2456 OverloadSet.clear();
2457
2458 const OverloadExpr *Overloads = nullptr;
2459 bool IsMemExpr = false;
2460 if (E.getType() == Context.OverloadTy) {
2461 OverloadExpr::FindResult FR = OverloadExpr::find(E: const_cast<Expr*>(&E));
2462
2463 // Ignore overloads that are pointer-to-member constants.
2464 if (FR.HasFormOfMemberPointer)
2465 return false;
2466
2467 Overloads = FR.Expression;
2468 } else if (E.getType() == Context.BoundMemberTy) {
2469 Overloads = dyn_cast<UnresolvedMemberExpr>(Val: E.IgnoreParens());
2470 IsMemExpr = true;
2471 }
2472
2473 bool Ambiguous = false;
2474 bool IsMV = false;
2475
2476 if (Overloads) {
2477 for (OverloadExpr::decls_iterator it = Overloads->decls_begin(),
2478 DeclsEnd = Overloads->decls_end(); it != DeclsEnd; ++it) {
2479 OverloadSet.addDecl(D: *it);
2480
2481 // Check whether the function is a non-template, non-member which takes no
2482 // arguments.
2483 if (IsMemExpr)
2484 continue;
2485 if (const FunctionDecl *OverloadDecl
2486 = dyn_cast<FunctionDecl>(Val: (*it)->getUnderlyingDecl())) {
2487 if (OverloadDecl->getMinRequiredArguments() == 0) {
2488 if (!ZeroArgCallReturnTy.isNull() && !Ambiguous &&
2489 (!IsMV || !(OverloadDecl->isCPUDispatchMultiVersion() ||
2490 OverloadDecl->isCPUSpecificMultiVersion()))) {
2491 ZeroArgCallReturnTy = QualType();
2492 Ambiguous = true;
2493 } else {
2494 ZeroArgCallReturnTy = OverloadDecl->getReturnType();
2495 IsMV = OverloadDecl->isCPUDispatchMultiVersion() ||
2496 OverloadDecl->isCPUSpecificMultiVersion();
2497 }
2498 }
2499 }
2500 }
2501
2502 // If it's not a member, use better machinery to try to resolve the call
2503 if (!IsMemExpr)
2504 return !ZeroArgCallReturnTy.isNull();
2505 }
2506
2507 // Attempt to call the member with no arguments - this will correctly handle
2508 // member templates with defaults/deduction of template arguments, overloads
2509 // with default arguments, etc.
2510 if (IsMemExpr && !E.isTypeDependent()) {
2511 Sema::TentativeAnalysisScope Trap(*this);
2512 ExprResult R = BuildCallToMemberFunction(S: nullptr, MemExpr: &E, LParenLoc: SourceLocation(),
2513 Args: std::nullopt, RParenLoc: SourceLocation());
2514 if (R.isUsable()) {
2515 ZeroArgCallReturnTy = R.get()->getType();
2516 return true;
2517 }
2518 return false;
2519 }
2520
2521 if (const auto *DeclRef = dyn_cast<DeclRefExpr>(Val: E.IgnoreParens())) {
2522 if (const auto *Fun = dyn_cast<FunctionDecl>(Val: DeclRef->getDecl())) {
2523 if (Fun->getMinRequiredArguments() == 0)
2524 ZeroArgCallReturnTy = Fun->getReturnType();
2525 return true;
2526 }
2527 }
2528
2529 // We don't have an expression that's convenient to get a FunctionDecl from,
2530 // but we can at least check if the type is "function of 0 arguments".
2531 QualType ExprTy = E.getType();
2532 const FunctionType *FunTy = nullptr;
2533 QualType PointeeTy = ExprTy->getPointeeType();
2534 if (!PointeeTy.isNull())
2535 FunTy = PointeeTy->getAs<FunctionType>();
2536 if (!FunTy)
2537 FunTy = ExprTy->getAs<FunctionType>();
2538
2539 if (const auto *FPT = dyn_cast_if_present<FunctionProtoType>(Val: FunTy)) {
2540 if (FPT->getNumParams() == 0)
2541 ZeroArgCallReturnTy = FunTy->getReturnType();
2542 return true;
2543 }
2544 return false;
2545}
2546
2547/// Give notes for a set of overloads.
2548///
2549/// A companion to tryExprAsCall. In cases when the name that the programmer
2550/// wrote was an overloaded function, we may be able to make some guesses about
2551/// plausible overloads based on their return types; such guesses can be handed
2552/// off to this method to be emitted as notes.
2553///
2554/// \param Overloads - The overloads to note.
2555/// \param FinalNoteLoc - If we've suppressed printing some overloads due to
2556/// -fshow-overloads=best, this is the location to attach to the note about too
2557/// many candidates. Typically this will be the location of the original
2558/// ill-formed expression.
2559static void noteOverloads(Sema &S, const UnresolvedSetImpl &Overloads,
2560 const SourceLocation FinalNoteLoc) {
2561 unsigned ShownOverloads = 0;
2562 unsigned SuppressedOverloads = 0;
2563 for (UnresolvedSetImpl::iterator It = Overloads.begin(),
2564 DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) {
2565 if (ShownOverloads >= S.Diags.getNumOverloadCandidatesToShow()) {
2566 ++SuppressedOverloads;
2567 continue;
2568 }
2569
2570 const NamedDecl *Fn = (*It)->getUnderlyingDecl();
2571 // Don't print overloads for non-default multiversioned functions.
2572 if (const auto *FD = Fn->getAsFunction()) {
2573 if (FD->isMultiVersion() && FD->hasAttr<TargetAttr>() &&
2574 !FD->getAttr<TargetAttr>()->isDefaultVersion())
2575 continue;
2576 if (FD->isMultiVersion() && FD->hasAttr<TargetVersionAttr>() &&
2577 !FD->getAttr<TargetVersionAttr>()->isDefaultVersion())
2578 continue;
2579 }
2580 S.Diag(Fn->getLocation(), diag::note_possible_target_of_call);
2581 ++ShownOverloads;
2582 }
2583
2584 S.Diags.overloadCandidatesShown(N: ShownOverloads);
2585
2586 if (SuppressedOverloads)
2587 S.Diag(FinalNoteLoc, diag::note_ovl_too_many_candidates)
2588 << SuppressedOverloads;
2589}
2590
2591static void notePlausibleOverloads(Sema &S, SourceLocation Loc,
2592 const UnresolvedSetImpl &Overloads,
2593 bool (*IsPlausibleResult)(QualType)) {
2594 if (!IsPlausibleResult)
2595 return noteOverloads(S, Overloads, FinalNoteLoc: Loc);
2596
2597 UnresolvedSet<2> PlausibleOverloads;
2598 for (OverloadExpr::decls_iterator It = Overloads.begin(),
2599 DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) {
2600 const auto *OverloadDecl = cast<FunctionDecl>(Val: *It);
2601 QualType OverloadResultTy = OverloadDecl->getReturnType();
2602 if (IsPlausibleResult(OverloadResultTy))
2603 PlausibleOverloads.addDecl(D: It.getDecl());
2604 }
2605 noteOverloads(S, Overloads: PlausibleOverloads, FinalNoteLoc: Loc);
2606}
2607
2608/// Determine whether the given expression can be called by just
2609/// putting parentheses after it. Notably, expressions with unary
2610/// operators can't be because the unary operator will start parsing
2611/// outside the call.
2612static bool IsCallableWithAppend(const Expr *E) {
2613 E = E->IgnoreImplicit();
2614 return (!isa<CStyleCastExpr>(Val: E) &&
2615 !isa<UnaryOperator>(Val: E) &&
2616 !isa<BinaryOperator>(Val: E) &&
2617 !isa<CXXOperatorCallExpr>(Val: E));
2618}
2619
2620static bool IsCPUDispatchCPUSpecificMultiVersion(const Expr *E) {
2621 if (const auto *UO = dyn_cast<UnaryOperator>(Val: E))
2622 E = UO->getSubExpr();
2623
2624 if (const auto *ULE = dyn_cast<UnresolvedLookupExpr>(Val: E)) {
2625 if (ULE->getNumDecls() == 0)
2626 return false;
2627
2628 const NamedDecl *ND = *ULE->decls_begin();
2629 if (const auto *FD = dyn_cast<FunctionDecl>(ND))
2630 return FD->isCPUDispatchMultiVersion() || FD->isCPUSpecificMultiVersion();
2631 }
2632 return false;
2633}
2634
2635bool Sema::tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
2636 bool ForceComplain,
2637 bool (*IsPlausibleResult)(QualType)) {
2638 SourceLocation Loc = E.get()->getExprLoc();
2639 SourceRange Range = E.get()->getSourceRange();
2640 UnresolvedSet<4> Overloads;
2641
2642 // If this is a SFINAE context, don't try anything that might trigger ADL
2643 // prematurely.
2644 if (!isSFINAEContext()) {
2645 QualType ZeroArgCallTy;
2646 if (tryExprAsCall(E&: *E.get(), ZeroArgCallReturnTy&: ZeroArgCallTy, OverloadSet&: Overloads) &&
2647 !ZeroArgCallTy.isNull() &&
2648 (!IsPlausibleResult || IsPlausibleResult(ZeroArgCallTy))) {
2649 // At this point, we know E is potentially callable with 0
2650 // arguments and that it returns something of a reasonable type,
2651 // so we can emit a fixit and carry on pretending that E was
2652 // actually a CallExpr.
2653 SourceLocation ParenInsertionLoc = getLocForEndOfToken(Loc: Range.getEnd());
2654 bool IsMV = IsCPUDispatchCPUSpecificMultiVersion(E: E.get());
2655 Diag(Loc, PD) << /*zero-arg*/ 1 << IsMV << Range
2656 << (IsCallableWithAppend(E: E.get())
2657 ? FixItHint::CreateInsertion(InsertionLoc: ParenInsertionLoc,
2658 Code: "()")
2659 : FixItHint());
2660 if (!IsMV)
2661 notePlausibleOverloads(S&: *this, Loc, Overloads, IsPlausibleResult);
2662
2663 // FIXME: Try this before emitting the fixit, and suppress diagnostics
2664 // while doing so.
2665 E = BuildCallExpr(S: nullptr, Fn: E.get(), LParenLoc: Range.getEnd(), ArgExprs: std::nullopt,
2666 RParenLoc: Range.getEnd().getLocWithOffset(Offset: 1));
2667 return true;
2668 }
2669 }
2670 if (!ForceComplain) return false;
2671
2672 bool IsMV = IsCPUDispatchCPUSpecificMultiVersion(E: E.get());
2673 Diag(Loc, PD) << /*not zero-arg*/ 0 << IsMV << Range;
2674 if (!IsMV)
2675 notePlausibleOverloads(S&: *this, Loc, Overloads, IsPlausibleResult);
2676 E = ExprError();
2677 return true;
2678}
2679
2680IdentifierInfo *Sema::getSuperIdentifier() const {
2681 if (!Ident_super)
2682 Ident_super = &Context.Idents.get(Name: "super");
2683 return Ident_super;
2684}
2685
2686void Sema::PushCapturedRegionScope(Scope *S, CapturedDecl *CD, RecordDecl *RD,
2687 CapturedRegionKind K,
2688 unsigned OpenMPCaptureLevel) {
2689 auto *CSI = new CapturedRegionScopeInfo(
2690 getDiagnostics(), S, CD, RD, CD->getContextParam(), K,
2691 (getLangOpts().OpenMP && K == CR_OpenMP)
2692 ? OpenMP().getOpenMPNestingLevel()
2693 : 0,
2694 OpenMPCaptureLevel);
2695 CSI->ReturnType = Context.VoidTy;
2696 FunctionScopes.push_back(CSI);
2697 CapturingFunctionScopes++;
2698}
2699
2700CapturedRegionScopeInfo *Sema::getCurCapturedRegion() {
2701 if (FunctionScopes.empty())
2702 return nullptr;
2703
2704 return dyn_cast<CapturedRegionScopeInfo>(Val: FunctionScopes.back());
2705}
2706
2707const llvm::MapVector<FieldDecl *, Sema::DeleteLocs> &
2708Sema::getMismatchingDeleteExpressions() const {
2709 return DeleteExprs;
2710}
2711
2712Sema::FPFeaturesStateRAII::FPFeaturesStateRAII(Sema &S)
2713 : S(S), OldFPFeaturesState(S.CurFPFeatures),
2714 OldOverrides(S.FpPragmaStack.CurrentValue),
2715 OldEvalMethod(S.PP.getCurrentFPEvalMethod()),
2716 OldFPPragmaLocation(S.PP.getLastFPEvalPragmaLocation()) {}
2717
2718Sema::FPFeaturesStateRAII::~FPFeaturesStateRAII() {
2719 S.CurFPFeatures = OldFPFeaturesState;
2720 S.FpPragmaStack.CurrentValue = OldOverrides;
2721 S.PP.setCurrentFPEvalMethod(PragmaLoc: OldFPPragmaLocation, Val: OldEvalMethod);
2722}
2723
2724bool Sema::isDeclaratorFunctionLike(Declarator &D) {
2725 assert(D.getCXXScopeSpec().isSet() &&
2726 "can only be called for qualified names");
2727
2728 auto LR = LookupResult(*this, D.getIdentifier(), D.getBeginLoc(),
2729 LookupOrdinaryName, forRedeclarationInCurContext());
2730 DeclContext *DC = computeDeclContext(SS: D.getCXXScopeSpec(),
2731 EnteringContext: !D.getDeclSpec().isFriendSpecified());
2732 if (!DC)
2733 return false;
2734
2735 LookupQualifiedName(R&: LR, LookupCtx: DC);
2736 bool Result = llvm::all_of(Range&: LR, P: [](Decl *Dcl) {
2737 if (NamedDecl *ND = dyn_cast<NamedDecl>(Val: Dcl)) {
2738 ND = ND->getUnderlyingDecl();
2739 return isa<FunctionDecl>(Val: ND) || isa<FunctionTemplateDecl>(Val: ND) ||
2740 isa<UsingDecl>(Val: ND);
2741 }
2742 return false;
2743 });
2744 return Result;
2745}
2746

source code of clang/lib/Sema/Sema.cpp