1//===- ASTContext.cpp - Context to hold long-lived AST nodes --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the ASTContext interface.
10//
11//===----------------------------------------------------------------------===//
12
13#include "clang/AST/ASTContext.h"
14#include "CXXABI.h"
15#include "Interp/Context.h"
16#include "clang/AST/APValue.h"
17#include "clang/AST/ASTConcept.h"
18#include "clang/AST/ASTMutationListener.h"
19#include "clang/AST/ASTTypeTraits.h"
20#include "clang/AST/Attr.h"
21#include "clang/AST/AttrIterator.h"
22#include "clang/AST/CharUnits.h"
23#include "clang/AST/Comment.h"
24#include "clang/AST/Decl.h"
25#include "clang/AST/DeclBase.h"
26#include "clang/AST/DeclCXX.h"
27#include "clang/AST/DeclContextInternals.h"
28#include "clang/AST/DeclObjC.h"
29#include "clang/AST/DeclOpenMP.h"
30#include "clang/AST/DeclTemplate.h"
31#include "clang/AST/DeclarationName.h"
32#include "clang/AST/DependenceFlags.h"
33#include "clang/AST/Expr.h"
34#include "clang/AST/ExprCXX.h"
35#include "clang/AST/ExprConcepts.h"
36#include "clang/AST/ExternalASTSource.h"
37#include "clang/AST/Mangle.h"
38#include "clang/AST/MangleNumberingContext.h"
39#include "clang/AST/NestedNameSpecifier.h"
40#include "clang/AST/ParentMapContext.h"
41#include "clang/AST/RawCommentList.h"
42#include "clang/AST/RecordLayout.h"
43#include "clang/AST/Stmt.h"
44#include "clang/AST/TemplateBase.h"
45#include "clang/AST/TemplateName.h"
46#include "clang/AST/Type.h"
47#include "clang/AST/TypeLoc.h"
48#include "clang/AST/UnresolvedSet.h"
49#include "clang/AST/VTableBuilder.h"
50#include "clang/Basic/AddressSpaces.h"
51#include "clang/Basic/Builtins.h"
52#include "clang/Basic/CommentOptions.h"
53#include "clang/Basic/ExceptionSpecificationType.h"
54#include "clang/Basic/IdentifierTable.h"
55#include "clang/Basic/LLVM.h"
56#include "clang/Basic/LangOptions.h"
57#include "clang/Basic/Linkage.h"
58#include "clang/Basic/Module.h"
59#include "clang/Basic/NoSanitizeList.h"
60#include "clang/Basic/ObjCRuntime.h"
61#include "clang/Basic/ProfileList.h"
62#include "clang/Basic/SourceLocation.h"
63#include "clang/Basic/SourceManager.h"
64#include "clang/Basic/Specifiers.h"
65#include "clang/Basic/TargetCXXABI.h"
66#include "clang/Basic/TargetInfo.h"
67#include "clang/Basic/XRayLists.h"
68#include "llvm/ADT/APFixedPoint.h"
69#include "llvm/ADT/APInt.h"
70#include "llvm/ADT/APSInt.h"
71#include "llvm/ADT/ArrayRef.h"
72#include "llvm/ADT/DenseMap.h"
73#include "llvm/ADT/DenseSet.h"
74#include "llvm/ADT/FoldingSet.h"
75#include "llvm/ADT/PointerUnion.h"
76#include "llvm/ADT/STLExtras.h"
77#include "llvm/ADT/SmallPtrSet.h"
78#include "llvm/ADT/SmallVector.h"
79#include "llvm/ADT/StringExtras.h"
80#include "llvm/ADT/StringRef.h"
81#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
82#include "llvm/Support/Capacity.h"
83#include "llvm/Support/Casting.h"
84#include "llvm/Support/Compiler.h"
85#include "llvm/Support/ErrorHandling.h"
86#include "llvm/Support/MD5.h"
87#include "llvm/Support/MathExtras.h"
88#include "llvm/Support/raw_ostream.h"
89#include "llvm/TargetParser/Triple.h"
90#include <algorithm>
91#include <cassert>
92#include <cstddef>
93#include <cstdint>
94#include <cstdlib>
95#include <map>
96#include <memory>
97#include <optional>
98#include <string>
99#include <tuple>
100#include <utility>
101
102using namespace clang;
103
104enum FloatingRank {
105 BFloat16Rank,
106 Float16Rank,
107 HalfRank,
108 FloatRank,
109 DoubleRank,
110 LongDoubleRank,
111 Float128Rank,
112 Ibm128Rank
113};
114
115/// \returns The locations that are relevant when searching for Doc comments
116/// related to \p D.
117static SmallVector<SourceLocation, 2>
118getDeclLocsForCommentSearch(const Decl *D, SourceManager &SourceMgr) {
119 assert(D);
120
121 // User can not attach documentation to implicit declarations.
122 if (D->isImplicit())
123 return {};
124
125 // User can not attach documentation to implicit instantiations.
126 if (const auto *FD = dyn_cast<FunctionDecl>(Val: D)) {
127 if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
128 return {};
129 }
130
131 if (const auto *VD = dyn_cast<VarDecl>(Val: D)) {
132 if (VD->isStaticDataMember() &&
133 VD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
134 return {};
135 }
136
137 if (const auto *CRD = dyn_cast<CXXRecordDecl>(Val: D)) {
138 if (CRD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
139 return {};
140 }
141
142 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(Val: D)) {
143 TemplateSpecializationKind TSK = CTSD->getSpecializationKind();
144 if (TSK == TSK_ImplicitInstantiation ||
145 TSK == TSK_Undeclared)
146 return {};
147 }
148
149 if (const auto *ED = dyn_cast<EnumDecl>(Val: D)) {
150 if (ED->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
151 return {};
152 }
153 if (const auto *TD = dyn_cast<TagDecl>(Val: D)) {
154 // When tag declaration (but not definition!) is part of the
155 // decl-specifier-seq of some other declaration, it doesn't get comment
156 if (TD->isEmbeddedInDeclarator() && !TD->isCompleteDefinition())
157 return {};
158 }
159 // TODO: handle comments for function parameters properly.
160 if (isa<ParmVarDecl>(Val: D))
161 return {};
162
163 // TODO: we could look up template parameter documentation in the template
164 // documentation.
165 if (isa<TemplateTypeParmDecl>(Val: D) ||
166 isa<NonTypeTemplateParmDecl>(Val: D) ||
167 isa<TemplateTemplateParmDecl>(Val: D))
168 return {};
169
170 SmallVector<SourceLocation, 2> Locations;
171 // Find declaration location.
172 // For Objective-C declarations we generally don't expect to have multiple
173 // declarators, thus use declaration starting location as the "declaration
174 // location".
175 // For all other declarations multiple declarators are used quite frequently,
176 // so we use the location of the identifier as the "declaration location".
177 SourceLocation BaseLocation;
178 if (isa<ObjCMethodDecl>(Val: D) || isa<ObjCContainerDecl>(Val: D) ||
179 isa<ObjCPropertyDecl>(Val: D) || isa<RedeclarableTemplateDecl>(Val: D) ||
180 isa<ClassTemplateSpecializationDecl>(Val: D) ||
181 // Allow association with Y across {} in `typedef struct X {} Y`.
182 isa<TypedefDecl>(Val: D))
183 BaseLocation = D->getBeginLoc();
184 else
185 BaseLocation = D->getLocation();
186
187 if (!D->getLocation().isMacroID()) {
188 Locations.emplace_back(Args&: BaseLocation);
189 } else {
190 const auto *DeclCtx = D->getDeclContext();
191
192 // When encountering definitions generated from a macro (that are not
193 // contained by another declaration in the macro) we need to try and find
194 // the comment at the location of the expansion but if there is no comment
195 // there we should retry to see if there is a comment inside the macro as
196 // well. To this end we return first BaseLocation to first look at the
197 // expansion site, the second value is the spelling location of the
198 // beginning of the declaration defined inside the macro.
199 if (!(DeclCtx &&
200 Decl::castFromDeclContext(DeclCtx)->getLocation().isMacroID())) {
201 Locations.emplace_back(Args: SourceMgr.getExpansionLoc(Loc: BaseLocation));
202 }
203
204 // We use Decl::getBeginLoc() and not just BaseLocation here to ensure that
205 // we don't refer to the macro argument location at the expansion site (this
206 // can happen if the name's spelling is provided via macro argument), and
207 // always to the declaration itself.
208 Locations.emplace_back(Args: SourceMgr.getSpellingLoc(Loc: D->getBeginLoc()));
209 }
210
211 return Locations;
212}
213
214RawComment *ASTContext::getRawCommentForDeclNoCacheImpl(
215 const Decl *D, const SourceLocation RepresentativeLocForDecl,
216 const std::map<unsigned, RawComment *> &CommentsInTheFile) const {
217 // If the declaration doesn't map directly to a location in a file, we
218 // can't find the comment.
219 if (RepresentativeLocForDecl.isInvalid() ||
220 !RepresentativeLocForDecl.isFileID())
221 return nullptr;
222
223 // If there are no comments anywhere, we won't find anything.
224 if (CommentsInTheFile.empty())
225 return nullptr;
226
227 // Decompose the location for the declaration and find the beginning of the
228 // file buffer.
229 const std::pair<FileID, unsigned> DeclLocDecomp =
230 SourceMgr.getDecomposedLoc(Loc: RepresentativeLocForDecl);
231
232 // Slow path.
233 auto OffsetCommentBehindDecl =
234 CommentsInTheFile.lower_bound(x: DeclLocDecomp.second);
235
236 // First check whether we have a trailing comment.
237 if (OffsetCommentBehindDecl != CommentsInTheFile.end()) {
238 RawComment *CommentBehindDecl = OffsetCommentBehindDecl->second;
239 if ((CommentBehindDecl->isDocumentation() ||
240 LangOpts.CommentOpts.ParseAllComments) &&
241 CommentBehindDecl->isTrailingComment() &&
242 (isa<FieldDecl>(Val: D) || isa<EnumConstantDecl>(Val: D) || isa<VarDecl>(Val: D) ||
243 isa<ObjCMethodDecl>(Val: D) || isa<ObjCPropertyDecl>(Val: D))) {
244
245 // Check that Doxygen trailing comment comes after the declaration, starts
246 // on the same line and in the same file as the declaration.
247 if (SourceMgr.getLineNumber(FID: DeclLocDecomp.first, FilePos: DeclLocDecomp.second) ==
248 Comments.getCommentBeginLine(C: CommentBehindDecl, File: DeclLocDecomp.first,
249 Offset: OffsetCommentBehindDecl->first)) {
250 return CommentBehindDecl;
251 }
252 }
253 }
254
255 // The comment just after the declaration was not a trailing comment.
256 // Let's look at the previous comment.
257 if (OffsetCommentBehindDecl == CommentsInTheFile.begin())
258 return nullptr;
259
260 auto OffsetCommentBeforeDecl = --OffsetCommentBehindDecl;
261 RawComment *CommentBeforeDecl = OffsetCommentBeforeDecl->second;
262
263 // Check that we actually have a non-member Doxygen comment.
264 if (!(CommentBeforeDecl->isDocumentation() ||
265 LangOpts.CommentOpts.ParseAllComments) ||
266 CommentBeforeDecl->isTrailingComment())
267 return nullptr;
268
269 // Decompose the end of the comment.
270 const unsigned CommentEndOffset =
271 Comments.getCommentEndOffset(C: CommentBeforeDecl);
272
273 // Get the corresponding buffer.
274 bool Invalid = false;
275 const char *Buffer = SourceMgr.getBufferData(FID: DeclLocDecomp.first,
276 Invalid: &Invalid).data();
277 if (Invalid)
278 return nullptr;
279
280 // Extract text between the comment and declaration.
281 StringRef Text(Buffer + CommentEndOffset,
282 DeclLocDecomp.second - CommentEndOffset);
283
284 // There should be no other declarations or preprocessor directives between
285 // comment and declaration.
286 if (Text.find_last_of(Chars: ";{}#@") != StringRef::npos)
287 return nullptr;
288
289 return CommentBeforeDecl;
290}
291
292RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const {
293 const auto DeclLocs = getDeclLocsForCommentSearch(D, SourceMgr);
294
295 for (const auto DeclLoc : DeclLocs) {
296 // If the declaration doesn't map directly to a location in a file, we
297 // can't find the comment.
298 if (DeclLoc.isInvalid() || !DeclLoc.isFileID())
299 continue;
300
301 if (ExternalSource && !CommentsLoaded) {
302 ExternalSource->ReadComments();
303 CommentsLoaded = true;
304 }
305
306 if (Comments.empty())
307 continue;
308
309 const FileID File = SourceMgr.getDecomposedLoc(Loc: DeclLoc).first;
310 if (!File.isValid())
311 continue;
312
313 const auto CommentsInThisFile = Comments.getCommentsInFile(File);
314 if (!CommentsInThisFile || CommentsInThisFile->empty())
315 continue;
316
317 if (RawComment *Comment =
318 getRawCommentForDeclNoCacheImpl(D, RepresentativeLocForDecl: DeclLoc, CommentsInTheFile: *CommentsInThisFile))
319 return Comment;
320 }
321
322 return nullptr;
323}
324
325void ASTContext::addComment(const RawComment &RC) {
326 assert(LangOpts.RetainCommentsFromSystemHeaders ||
327 !SourceMgr.isInSystemHeader(RC.getSourceRange().getBegin()));
328 Comments.addComment(RC, CommentOpts: LangOpts.CommentOpts, Allocator&: BumpAlloc);
329}
330
331/// If we have a 'templated' declaration for a template, adjust 'D' to
332/// refer to the actual template.
333/// If we have an implicit instantiation, adjust 'D' to refer to template.
334static const Decl &adjustDeclToTemplate(const Decl &D) {
335 if (const auto *FD = dyn_cast<FunctionDecl>(Val: &D)) {
336 // Is this function declaration part of a function template?
337 if (const FunctionTemplateDecl *FTD = FD->getDescribedFunctionTemplate())
338 return *FTD;
339
340 // Nothing to do if function is not an implicit instantiation.
341 if (FD->getTemplateSpecializationKind() != TSK_ImplicitInstantiation)
342 return D;
343
344 // Function is an implicit instantiation of a function template?
345 if (const FunctionTemplateDecl *FTD = FD->getPrimaryTemplate())
346 return *FTD;
347
348 // Function is instantiated from a member definition of a class template?
349 if (const FunctionDecl *MemberDecl =
350 FD->getInstantiatedFromMemberFunction())
351 return *MemberDecl;
352
353 return D;
354 }
355 if (const auto *VD = dyn_cast<VarDecl>(Val: &D)) {
356 // Static data member is instantiated from a member definition of a class
357 // template?
358 if (VD->isStaticDataMember())
359 if (const VarDecl *MemberDecl = VD->getInstantiatedFromStaticDataMember())
360 return *MemberDecl;
361
362 return D;
363 }
364 if (const auto *CRD = dyn_cast<CXXRecordDecl>(Val: &D)) {
365 // Is this class declaration part of a class template?
366 if (const ClassTemplateDecl *CTD = CRD->getDescribedClassTemplate())
367 return *CTD;
368
369 // Class is an implicit instantiation of a class template or partial
370 // specialization?
371 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(Val: CRD)) {
372 if (CTSD->getSpecializationKind() != TSK_ImplicitInstantiation)
373 return D;
374 llvm::PointerUnion<ClassTemplateDecl *,
375 ClassTemplatePartialSpecializationDecl *>
376 PU = CTSD->getSpecializedTemplateOrPartial();
377 return PU.is<ClassTemplateDecl *>()
378 ? *static_cast<const Decl *>(PU.get<ClassTemplateDecl *>())
379 : *static_cast<const Decl *>(
380 PU.get<ClassTemplatePartialSpecializationDecl *>());
381 }
382
383 // Class is instantiated from a member definition of a class template?
384 if (const MemberSpecializationInfo *Info =
385 CRD->getMemberSpecializationInfo())
386 return *Info->getInstantiatedFrom();
387
388 return D;
389 }
390 if (const auto *ED = dyn_cast<EnumDecl>(Val: &D)) {
391 // Enum is instantiated from a member definition of a class template?
392 if (const EnumDecl *MemberDecl = ED->getInstantiatedFromMemberEnum())
393 return *MemberDecl;
394
395 return D;
396 }
397 // FIXME: Adjust alias templates?
398 return D;
399}
400
401const RawComment *ASTContext::getRawCommentForAnyRedecl(
402 const Decl *D,
403 const Decl **OriginalDecl) const {
404 if (!D) {
405 if (OriginalDecl)
406 OriginalDecl = nullptr;
407 return nullptr;
408 }
409
410 D = &adjustDeclToTemplate(D: *D);
411
412 // Any comment directly attached to D?
413 {
414 auto DeclComment = DeclRawComments.find(Val: D);
415 if (DeclComment != DeclRawComments.end()) {
416 if (OriginalDecl)
417 *OriginalDecl = D;
418 return DeclComment->second;
419 }
420 }
421
422 // Any comment attached to any redeclaration of D?
423 const Decl *CanonicalD = D->getCanonicalDecl();
424 if (!CanonicalD)
425 return nullptr;
426
427 {
428 auto RedeclComment = RedeclChainComments.find(Val: CanonicalD);
429 if (RedeclComment != RedeclChainComments.end()) {
430 if (OriginalDecl)
431 *OriginalDecl = RedeclComment->second;
432 auto CommentAtRedecl = DeclRawComments.find(Val: RedeclComment->second);
433 assert(CommentAtRedecl != DeclRawComments.end() &&
434 "This decl is supposed to have comment attached.");
435 return CommentAtRedecl->second;
436 }
437 }
438
439 // Any redeclarations of D that we haven't checked for comments yet?
440 // We can't use DenseMap::iterator directly since it'd get invalid.
441 auto LastCheckedRedecl = [this, CanonicalD]() -> const Decl * {
442 return CommentlessRedeclChains.lookup(Val: CanonicalD);
443 }();
444
445 for (const auto Redecl : D->redecls()) {
446 assert(Redecl);
447 // Skip all redeclarations that have been checked previously.
448 if (LastCheckedRedecl) {
449 if (LastCheckedRedecl == Redecl) {
450 LastCheckedRedecl = nullptr;
451 }
452 continue;
453 }
454 const RawComment *RedeclComment = getRawCommentForDeclNoCache(D: Redecl);
455 if (RedeclComment) {
456 cacheRawCommentForDecl(OriginalD: *Redecl, Comment: *RedeclComment);
457 if (OriginalDecl)
458 *OriginalDecl = Redecl;
459 return RedeclComment;
460 }
461 CommentlessRedeclChains[CanonicalD] = Redecl;
462 }
463
464 if (OriginalDecl)
465 *OriginalDecl = nullptr;
466 return nullptr;
467}
468
469void ASTContext::cacheRawCommentForDecl(const Decl &OriginalD,
470 const RawComment &Comment) const {
471 assert(Comment.isDocumentation() || LangOpts.CommentOpts.ParseAllComments);
472 DeclRawComments.try_emplace(Key: &OriginalD, Args: &Comment);
473 const Decl *const CanonicalDecl = OriginalD.getCanonicalDecl();
474 RedeclChainComments.try_emplace(Key: CanonicalDecl, Args: &OriginalD);
475 CommentlessRedeclChains.erase(Val: CanonicalDecl);
476}
477
478static void addRedeclaredMethods(const ObjCMethodDecl *ObjCMethod,
479 SmallVectorImpl<const NamedDecl *> &Redeclared) {
480 const DeclContext *DC = ObjCMethod->getDeclContext();
481 if (const auto *IMD = dyn_cast<ObjCImplDecl>(DC)) {
482 const ObjCInterfaceDecl *ID = IMD->getClassInterface();
483 if (!ID)
484 return;
485 // Add redeclared method here.
486 for (const auto *Ext : ID->known_extensions()) {
487 if (ObjCMethodDecl *RedeclaredMethod =
488 Ext->getMethod(ObjCMethod->getSelector(),
489 ObjCMethod->isInstanceMethod()))
490 Redeclared.push_back(RedeclaredMethod);
491 }
492 }
493}
494
495void ASTContext::attachCommentsToJustParsedDecls(ArrayRef<Decl *> Decls,
496 const Preprocessor *PP) {
497 if (Comments.empty() || Decls.empty())
498 return;
499
500 FileID File;
501 for (const Decl *D : Decls) {
502 if (D->isInvalidDecl())
503 continue;
504
505 D = &adjustDeclToTemplate(D: *D);
506 SourceLocation Loc = D->getLocation();
507 if (Loc.isValid()) {
508 // See if there are any new comments that are not attached to a decl.
509 // The location doesn't have to be precise - we care only about the file.
510 File = SourceMgr.getDecomposedLoc(Loc).first;
511 break;
512 }
513 }
514
515 if (File.isInvalid())
516 return;
517
518 auto CommentsInThisFile = Comments.getCommentsInFile(File);
519 if (!CommentsInThisFile || CommentsInThisFile->empty() ||
520 CommentsInThisFile->rbegin()->second->isAttached())
521 return;
522
523 // There is at least one comment not attached to a decl.
524 // Maybe it should be attached to one of Decls?
525 //
526 // Note that this way we pick up not only comments that precede the
527 // declaration, but also comments that *follow* the declaration -- thanks to
528 // the lookahead in the lexer: we've consumed the semicolon and looked
529 // ahead through comments.
530 for (const Decl *D : Decls) {
531 assert(D);
532 if (D->isInvalidDecl())
533 continue;
534
535 D = &adjustDeclToTemplate(D: *D);
536
537 if (DeclRawComments.count(Val: D) > 0)
538 continue;
539
540 const auto DeclLocs = getDeclLocsForCommentSearch(D, SourceMgr);
541
542 for (const auto DeclLoc : DeclLocs) {
543 if (DeclLoc.isInvalid() || !DeclLoc.isFileID())
544 continue;
545
546 if (RawComment *const DocComment = getRawCommentForDeclNoCacheImpl(
547 D, RepresentativeLocForDecl: DeclLoc, CommentsInTheFile: *CommentsInThisFile)) {
548 cacheRawCommentForDecl(OriginalD: *D, Comment: *DocComment);
549 comments::FullComment *FC = DocComment->parse(Context: *this, PP, D);
550 ParsedComments[D->getCanonicalDecl()] = FC;
551 break;
552 }
553 }
554 }
555}
556
557comments::FullComment *ASTContext::cloneFullComment(comments::FullComment *FC,
558 const Decl *D) const {
559 auto *ThisDeclInfo = new (*this) comments::DeclInfo;
560 ThisDeclInfo->CommentDecl = D;
561 ThisDeclInfo->IsFilled = false;
562 ThisDeclInfo->fill();
563 ThisDeclInfo->CommentDecl = FC->getDecl();
564 if (!ThisDeclInfo->TemplateParameters)
565 ThisDeclInfo->TemplateParameters = FC->getDeclInfo()->TemplateParameters;
566 comments::FullComment *CFC =
567 new (*this) comments::FullComment(FC->getBlocks(),
568 ThisDeclInfo);
569 return CFC;
570}
571
572comments::FullComment *ASTContext::getLocalCommentForDeclUncached(const Decl *D) const {
573 const RawComment *RC = getRawCommentForDeclNoCache(D);
574 return RC ? RC->parse(Context: *this, PP: nullptr, D) : nullptr;
575}
576
577comments::FullComment *ASTContext::getCommentForDecl(
578 const Decl *D,
579 const Preprocessor *PP) const {
580 if (!D || D->isInvalidDecl())
581 return nullptr;
582 D = &adjustDeclToTemplate(D: *D);
583
584 const Decl *Canonical = D->getCanonicalDecl();
585 llvm::DenseMap<const Decl *, comments::FullComment *>::iterator Pos =
586 ParsedComments.find(Val: Canonical);
587
588 if (Pos != ParsedComments.end()) {
589 if (Canonical != D) {
590 comments::FullComment *FC = Pos->second;
591 comments::FullComment *CFC = cloneFullComment(FC, D);
592 return CFC;
593 }
594 return Pos->second;
595 }
596
597 const Decl *OriginalDecl = nullptr;
598
599 const RawComment *RC = getRawCommentForAnyRedecl(D, OriginalDecl: &OriginalDecl);
600 if (!RC) {
601 if (isa<ObjCMethodDecl>(Val: D) || isa<FunctionDecl>(Val: D)) {
602 SmallVector<const NamedDecl*, 8> Overridden;
603 const auto *OMD = dyn_cast<ObjCMethodDecl>(Val: D);
604 if (OMD && OMD->isPropertyAccessor())
605 if (const ObjCPropertyDecl *PDecl = OMD->findPropertyDecl())
606 if (comments::FullComment *FC = getCommentForDecl(PDecl, PP))
607 return cloneFullComment(FC, D);
608 if (OMD)
609 addRedeclaredMethods(ObjCMethod: OMD, Redeclared&: Overridden);
610 getOverriddenMethods(Method: dyn_cast<NamedDecl>(Val: D), Overridden);
611 for (unsigned i = 0, e = Overridden.size(); i < e; i++)
612 if (comments::FullComment *FC = getCommentForDecl(Overridden[i], PP))
613 return cloneFullComment(FC, D);
614 }
615 else if (const auto *TD = dyn_cast<TypedefNameDecl>(Val: D)) {
616 // Attach any tag type's documentation to its typedef if latter
617 // does not have one of its own.
618 QualType QT = TD->getUnderlyingType();
619 if (const auto *TT = QT->getAs<TagType>())
620 if (const Decl *TD = TT->getDecl())
621 if (comments::FullComment *FC = getCommentForDecl(D: TD, PP))
622 return cloneFullComment(FC, D);
623 }
624 else if (const auto *IC = dyn_cast<ObjCInterfaceDecl>(Val: D)) {
625 while (IC->getSuperClass()) {
626 IC = IC->getSuperClass();
627 if (comments::FullComment *FC = getCommentForDecl(IC, PP))
628 return cloneFullComment(FC, D);
629 }
630 }
631 else if (const auto *CD = dyn_cast<ObjCCategoryDecl>(Val: D)) {
632 if (const ObjCInterfaceDecl *IC = CD->getClassInterface())
633 if (comments::FullComment *FC = getCommentForDecl(IC, PP))
634 return cloneFullComment(FC, D);
635 }
636 else if (const auto *RD = dyn_cast<CXXRecordDecl>(Val: D)) {
637 if (!(RD = RD->getDefinition()))
638 return nullptr;
639 // Check non-virtual bases.
640 for (const auto &I : RD->bases()) {
641 if (I.isVirtual() || (I.getAccessSpecifier() != AS_public))
642 continue;
643 QualType Ty = I.getType();
644 if (Ty.isNull())
645 continue;
646 if (const CXXRecordDecl *NonVirtualBase = Ty->getAsCXXRecordDecl()) {
647 if (!(NonVirtualBase= NonVirtualBase->getDefinition()))
648 continue;
649
650 if (comments::FullComment *FC = getCommentForDecl((NonVirtualBase), PP))
651 return cloneFullComment(FC, D);
652 }
653 }
654 // Check virtual bases.
655 for (const auto &I : RD->vbases()) {
656 if (I.getAccessSpecifier() != AS_public)
657 continue;
658 QualType Ty = I.getType();
659 if (Ty.isNull())
660 continue;
661 if (const CXXRecordDecl *VirtualBase = Ty->getAsCXXRecordDecl()) {
662 if (!(VirtualBase= VirtualBase->getDefinition()))
663 continue;
664 if (comments::FullComment *FC = getCommentForDecl((VirtualBase), PP))
665 return cloneFullComment(FC, D);
666 }
667 }
668 }
669 return nullptr;
670 }
671
672 // If the RawComment was attached to other redeclaration of this Decl, we
673 // should parse the comment in context of that other Decl. This is important
674 // because comments can contain references to parameter names which can be
675 // different across redeclarations.
676 if (D != OriginalDecl && OriginalDecl)
677 return getCommentForDecl(D: OriginalDecl, PP);
678
679 comments::FullComment *FC = RC->parse(Context: *this, PP, D);
680 ParsedComments[Canonical] = FC;
681 return FC;
682}
683
684void
685ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID,
686 const ASTContext &C,
687 TemplateTemplateParmDecl *Parm) {
688 ID.AddInteger(Parm->getDepth());
689 ID.AddInteger(Parm->getPosition());
690 ID.AddBoolean(B: Parm->isParameterPack());
691
692 TemplateParameterList *Params = Parm->getTemplateParameters();
693 ID.AddInteger(I: Params->size());
694 for (TemplateParameterList::const_iterator P = Params->begin(),
695 PEnd = Params->end();
696 P != PEnd; ++P) {
697 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) {
698 ID.AddInteger(I: 0);
699 ID.AddBoolean(B: TTP->isParameterPack());
700 if (TTP->isExpandedParameterPack()) {
701 ID.AddBoolean(B: true);
702 ID.AddInteger(TTP->getNumExpansionParameters());
703 } else
704 ID.AddBoolean(B: false);
705 continue;
706 }
707
708 if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
709 ID.AddInteger(I: 1);
710 ID.AddBoolean(B: NTTP->isParameterPack());
711 ID.AddPointer(Ptr: C.getUnconstrainedType(T: C.getCanonicalType(NTTP->getType()))
712 .getAsOpaquePtr());
713 if (NTTP->isExpandedParameterPack()) {
714 ID.AddBoolean(B: true);
715 ID.AddInteger(NTTP->getNumExpansionTypes());
716 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
717 QualType T = NTTP->getExpansionType(I);
718 ID.AddPointer(Ptr: T.getCanonicalType().getAsOpaquePtr());
719 }
720 } else
721 ID.AddBoolean(B: false);
722 continue;
723 }
724
725 auto *TTP = cast<TemplateTemplateParmDecl>(Val: *P);
726 ID.AddInteger(I: 2);
727 Profile(ID, C, TTP);
728 }
729}
730
731TemplateTemplateParmDecl *
732ASTContext::getCanonicalTemplateTemplateParmDecl(
733 TemplateTemplateParmDecl *TTP) const {
734 // Check if we already have a canonical template template parameter.
735 llvm::FoldingSetNodeID ID;
736 CanonicalTemplateTemplateParm::Profile(ID, C: *this, Parm: TTP);
737 void *InsertPos = nullptr;
738 CanonicalTemplateTemplateParm *Canonical
739 = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
740 if (Canonical)
741 return Canonical->getParam();
742
743 // Build a canonical template parameter list.
744 TemplateParameterList *Params = TTP->getTemplateParameters();
745 SmallVector<NamedDecl *, 4> CanonParams;
746 CanonParams.reserve(N: Params->size());
747 for (TemplateParameterList::const_iterator P = Params->begin(),
748 PEnd = Params->end();
749 P != PEnd; ++P) {
750 // Note that, per C++20 [temp.over.link]/6, when determining whether
751 // template-parameters are equivalent, constraints are ignored.
752 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) {
753 TemplateTypeParmDecl *NewTTP = TemplateTypeParmDecl::Create(
754 C: *this, DC: getTranslationUnitDecl(), KeyLoc: SourceLocation(), NameLoc: SourceLocation(),
755 D: TTP->getDepth(), P: TTP->getIndex(), Id: nullptr, Typename: false,
756 ParameterPack: TTP->isParameterPack(), /*HasTypeConstraint=*/false,
757 NumExpanded: TTP->isExpandedParameterPack()
758 ? std::optional<unsigned>(TTP->getNumExpansionParameters())
759 : std::nullopt);
760 CanonParams.push_back(NewTTP);
761 } else if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
762 QualType T = getUnconstrainedType(T: getCanonicalType(NTTP->getType()));
763 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T);
764 NonTypeTemplateParmDecl *Param;
765 if (NTTP->isExpandedParameterPack()) {
766 SmallVector<QualType, 2> ExpandedTypes;
767 SmallVector<TypeSourceInfo *, 2> ExpandedTInfos;
768 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
769 ExpandedTypes.push_back(Elt: getCanonicalType(NTTP->getExpansionType(I)));
770 ExpandedTInfos.push_back(
771 Elt: getTrivialTypeSourceInfo(T: ExpandedTypes.back()));
772 }
773
774 Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(),
775 SourceLocation(),
776 SourceLocation(),
777 NTTP->getDepth(),
778 NTTP->getPosition(), nullptr,
779 T,
780 TInfo,
781 ExpandedTypes,
782 ExpandedTInfos);
783 } else {
784 Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(),
785 SourceLocation(),
786 SourceLocation(),
787 NTTP->getDepth(),
788 NTTP->getPosition(), nullptr,
789 T,
790 NTTP->isParameterPack(),
791 TInfo);
792 }
793 CanonParams.push_back(Param);
794 } else
795 CanonParams.push_back(getCanonicalTemplateTemplateParmDecl(
796 TTP: cast<TemplateTemplateParmDecl>(Val: *P)));
797 }
798
799 TemplateTemplateParmDecl *CanonTTP = TemplateTemplateParmDecl::Create(
800 *this, getTranslationUnitDecl(), SourceLocation(), TTP->getDepth(),
801 TTP->getPosition(), TTP->isParameterPack(), nullptr,
802 TemplateParameterList::Create(C: *this, TemplateLoc: SourceLocation(), LAngleLoc: SourceLocation(),
803 Params: CanonParams, RAngleLoc: SourceLocation(),
804 /*RequiresClause=*/nullptr));
805
806 // Get the new insert position for the node we care about.
807 Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
808 assert(!Canonical && "Shouldn't be in the map!");
809 (void)Canonical;
810
811 // Create the canonical template template parameter entry.
812 Canonical = new (*this) CanonicalTemplateTemplateParm(CanonTTP);
813 CanonTemplateTemplateParms.InsertNode(N: Canonical, InsertPos);
814 return CanonTTP;
815}
816
817TargetCXXABI::Kind ASTContext::getCXXABIKind() const {
818 auto Kind = getTargetInfo().getCXXABI().getKind();
819 return getLangOpts().CXXABI.value_or(u&: Kind);
820}
821
822CXXABI *ASTContext::createCXXABI(const TargetInfo &T) {
823 if (!LangOpts.CPlusPlus) return nullptr;
824
825 switch (getCXXABIKind()) {
826 case TargetCXXABI::AppleARM64:
827 case TargetCXXABI::Fuchsia:
828 case TargetCXXABI::GenericARM: // Same as Itanium at this level
829 case TargetCXXABI::iOS:
830 case TargetCXXABI::WatchOS:
831 case TargetCXXABI::GenericAArch64:
832 case TargetCXXABI::GenericMIPS:
833 case TargetCXXABI::GenericItanium:
834 case TargetCXXABI::WebAssembly:
835 case TargetCXXABI::XL:
836 return CreateItaniumCXXABI(Ctx&: *this);
837 case TargetCXXABI::Microsoft:
838 return CreateMicrosoftCXXABI(Ctx&: *this);
839 }
840 llvm_unreachable("Invalid CXXABI type!");
841}
842
843interp::Context &ASTContext::getInterpContext() {
844 if (!InterpContext) {
845 InterpContext.reset(p: new interp::Context(*this));
846 }
847 return *InterpContext.get();
848}
849
850ParentMapContext &ASTContext::getParentMapContext() {
851 if (!ParentMapCtx)
852 ParentMapCtx.reset(p: new ParentMapContext(*this));
853 return *ParentMapCtx.get();
854}
855
856static bool isAddrSpaceMapManglingEnabled(const TargetInfo &TI,
857 const LangOptions &LangOpts) {
858 switch (LangOpts.getAddressSpaceMapMangling()) {
859 case LangOptions::ASMM_Target:
860 return TI.useAddressSpaceMapMangling();
861 case LangOptions::ASMM_On:
862 return true;
863 case LangOptions::ASMM_Off:
864 return false;
865 }
866 llvm_unreachable("getAddressSpaceMapMangling() doesn't cover anything.");
867}
868
869ASTContext::ASTContext(LangOptions &LOpts, SourceManager &SM,
870 IdentifierTable &idents, SelectorTable &sels,
871 Builtin::Context &builtins, TranslationUnitKind TUKind)
872 : ConstantArrayTypes(this_(), ConstantArrayTypesLog2InitSize),
873 DependentSizedArrayTypes(this_()), DependentSizedExtVectorTypes(this_()),
874 DependentAddressSpaceTypes(this_()), DependentVectorTypes(this_()),
875 DependentSizedMatrixTypes(this_()),
876 FunctionProtoTypes(this_(), FunctionProtoTypesLog2InitSize),
877 DependentTypeOfExprTypes(this_()), DependentDecltypeTypes(this_()),
878 TemplateSpecializationTypes(this_()),
879 DependentTemplateSpecializationTypes(this_()), AutoTypes(this_()),
880 DependentBitIntTypes(this_()), SubstTemplateTemplateParmPacks(this_()),
881 CanonTemplateTemplateParms(this_()), SourceMgr(SM), LangOpts(LOpts),
882 NoSanitizeL(new NoSanitizeList(LangOpts.NoSanitizeFiles, SM)),
883 XRayFilter(new XRayFunctionFilter(LangOpts.XRayAlwaysInstrumentFiles,
884 LangOpts.XRayNeverInstrumentFiles,
885 LangOpts.XRayAttrListFiles, SM)),
886 ProfList(new ProfileList(LangOpts.ProfileListFiles, SM)),
887 PrintingPolicy(LOpts), Idents(idents), Selectors(sels),
888 BuiltinInfo(builtins), TUKind(TUKind), DeclarationNames(*this),
889 Comments(SM), CommentCommandTraits(BumpAlloc, LOpts.CommentOpts),
890 CompCategories(this_()), LastSDM(nullptr, 0) {
891 addTranslationUnitDecl();
892}
893
894void ASTContext::cleanup() {
895 // Release the DenseMaps associated with DeclContext objects.
896 // FIXME: Is this the ideal solution?
897 ReleaseDeclContextMaps();
898
899 // Call all of the deallocation functions on all of their targets.
900 for (auto &Pair : Deallocations)
901 (Pair.first)(Pair.second);
902 Deallocations.clear();
903
904 // ASTRecordLayout objects in ASTRecordLayouts must always be destroyed
905 // because they can contain DenseMaps.
906 for (llvm::DenseMap<const ObjCContainerDecl*,
907 const ASTRecordLayout*>::iterator
908 I = ObjCLayouts.begin(), E = ObjCLayouts.end(); I != E; )
909 // Increment in loop to prevent using deallocated memory.
910 if (auto *R = const_cast<ASTRecordLayout *>((I++)->second))
911 R->Destroy(Ctx&: *this);
912 ObjCLayouts.clear();
913
914 for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator
915 I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) {
916 // Increment in loop to prevent using deallocated memory.
917 if (auto *R = const_cast<ASTRecordLayout *>((I++)->second))
918 R->Destroy(Ctx&: *this);
919 }
920 ASTRecordLayouts.clear();
921
922 for (llvm::DenseMap<const Decl*, AttrVec*>::iterator A = DeclAttrs.begin(),
923 AEnd = DeclAttrs.end();
924 A != AEnd; ++A)
925 A->second->~AttrVec();
926 DeclAttrs.clear();
927
928 for (const auto &Value : ModuleInitializers)
929 Value.second->~PerModuleInitializers();
930 ModuleInitializers.clear();
931}
932
933ASTContext::~ASTContext() { cleanup(); }
934
935void ASTContext::setTraversalScope(const std::vector<Decl *> &TopLevelDecls) {
936 TraversalScope = TopLevelDecls;
937 getParentMapContext().clear();
938}
939
940void ASTContext::AddDeallocation(void (*Callback)(void *), void *Data) const {
941 Deallocations.push_back(Elt: {Callback, Data});
942}
943
944void
945ASTContext::setExternalSource(IntrusiveRefCntPtr<ExternalASTSource> Source) {
946 ExternalSource = std::move(Source);
947}
948
949void ASTContext::PrintStats() const {
950 llvm::errs() << "\n*** AST Context Stats:\n";
951 llvm::errs() << " " << Types.size() << " types total.\n";
952
953 unsigned counts[] = {
954#define TYPE(Name, Parent) 0,
955#define ABSTRACT_TYPE(Name, Parent)
956#include "clang/AST/TypeNodes.inc"
957 0 // Extra
958 };
959
960 for (unsigned i = 0, e = Types.size(); i != e; ++i) {
961 Type *T = Types[i];
962 counts[(unsigned)T->getTypeClass()]++;
963 }
964
965 unsigned Idx = 0;
966 unsigned TotalBytes = 0;
967#define TYPE(Name, Parent) \
968 if (counts[Idx]) \
969 llvm::errs() << " " << counts[Idx] << " " << #Name \
970 << " types, " << sizeof(Name##Type) << " each " \
971 << "(" << counts[Idx] * sizeof(Name##Type) \
972 << " bytes)\n"; \
973 TotalBytes += counts[Idx] * sizeof(Name##Type); \
974 ++Idx;
975#define ABSTRACT_TYPE(Name, Parent)
976#include "clang/AST/TypeNodes.inc"
977
978 llvm::errs() << "Total bytes = " << TotalBytes << "\n";
979
980 // Implicit special member functions.
981 llvm::errs() << NumImplicitDefaultConstructorsDeclared << "/"
982 << NumImplicitDefaultConstructors
983 << " implicit default constructors created\n";
984 llvm::errs() << NumImplicitCopyConstructorsDeclared << "/"
985 << NumImplicitCopyConstructors
986 << " implicit copy constructors created\n";
987 if (getLangOpts().CPlusPlus)
988 llvm::errs() << NumImplicitMoveConstructorsDeclared << "/"
989 << NumImplicitMoveConstructors
990 << " implicit move constructors created\n";
991 llvm::errs() << NumImplicitCopyAssignmentOperatorsDeclared << "/"
992 << NumImplicitCopyAssignmentOperators
993 << " implicit copy assignment operators created\n";
994 if (getLangOpts().CPlusPlus)
995 llvm::errs() << NumImplicitMoveAssignmentOperatorsDeclared << "/"
996 << NumImplicitMoveAssignmentOperators
997 << " implicit move assignment operators created\n";
998 llvm::errs() << NumImplicitDestructorsDeclared << "/"
999 << NumImplicitDestructors
1000 << " implicit destructors created\n";
1001
1002 if (ExternalSource) {
1003 llvm::errs() << "\n";
1004 ExternalSource->PrintStats();
1005 }
1006
1007 BumpAlloc.PrintStats();
1008}
1009
1010void ASTContext::mergeDefinitionIntoModule(NamedDecl *ND, Module *M,
1011 bool NotifyListeners) {
1012 if (NotifyListeners)
1013 if (auto *Listener = getASTMutationListener())
1014 Listener->RedefinedHiddenDefinition(D: ND, M);
1015
1016 MergedDefModules[cast<NamedDecl>(ND->getCanonicalDecl())].push_back(M);
1017}
1018
1019void ASTContext::deduplicateMergedDefinitonsFor(NamedDecl *ND) {
1020 auto It = MergedDefModules.find(cast<NamedDecl>(ND->getCanonicalDecl()));
1021 if (It == MergedDefModules.end())
1022 return;
1023
1024 auto &Merged = It->second;
1025 llvm::DenseSet<Module*> Found;
1026 for (Module *&M : Merged)
1027 if (!Found.insert(M).second)
1028 M = nullptr;
1029 llvm::erase(Merged, nullptr);
1030}
1031
1032ArrayRef<Module *>
1033ASTContext::getModulesWithMergedDefinition(const NamedDecl *Def) {
1034 auto MergedIt =
1035 MergedDefModules.find(cast<NamedDecl>(Def->getCanonicalDecl()));
1036 if (MergedIt == MergedDefModules.end())
1037 return std::nullopt;
1038 return MergedIt->second;
1039}
1040
1041void ASTContext::PerModuleInitializers::resolve(ASTContext &Ctx) {
1042 if (LazyInitializers.empty())
1043 return;
1044
1045 auto *Source = Ctx.getExternalSource();
1046 assert(Source && "lazy initializers but no external source");
1047
1048 auto LazyInits = std::move(LazyInitializers);
1049 LazyInitializers.clear();
1050
1051 for (auto ID : LazyInits)
1052 Initializers.push_back(Elt: Source->GetExternalDecl(ID));
1053
1054 assert(LazyInitializers.empty() &&
1055 "GetExternalDecl for lazy module initializer added more inits");
1056}
1057
1058void ASTContext::addModuleInitializer(Module *M, Decl *D) {
1059 // One special case: if we add a module initializer that imports another
1060 // module, and that module's only initializer is an ImportDecl, simplify.
1061 if (const auto *ID = dyn_cast<ImportDecl>(Val: D)) {
1062 auto It = ModuleInitializers.find(Val: ID->getImportedModule());
1063
1064 // Maybe the ImportDecl does nothing at all. (Common case.)
1065 if (It == ModuleInitializers.end())
1066 return;
1067
1068 // Maybe the ImportDecl only imports another ImportDecl.
1069 auto &Imported = *It->second;
1070 if (Imported.Initializers.size() + Imported.LazyInitializers.size() == 1) {
1071 Imported.resolve(Ctx&: *this);
1072 auto *OnlyDecl = Imported.Initializers.front();
1073 if (isa<ImportDecl>(Val: OnlyDecl))
1074 D = OnlyDecl;
1075 }
1076 }
1077
1078 auto *&Inits = ModuleInitializers[M];
1079 if (!Inits)
1080 Inits = new (*this) PerModuleInitializers;
1081 Inits->Initializers.push_back(Elt: D);
1082}
1083
1084void ASTContext::addLazyModuleInitializers(Module *M, ArrayRef<uint32_t> IDs) {
1085 auto *&Inits = ModuleInitializers[M];
1086 if (!Inits)
1087 Inits = new (*this) PerModuleInitializers;
1088 Inits->LazyInitializers.insert(I: Inits->LazyInitializers.end(),
1089 From: IDs.begin(), To: IDs.end());
1090}
1091
1092ArrayRef<Decl *> ASTContext::getModuleInitializers(Module *M) {
1093 auto It = ModuleInitializers.find(Val: M);
1094 if (It == ModuleInitializers.end())
1095 return std::nullopt;
1096
1097 auto *Inits = It->second;
1098 Inits->resolve(Ctx&: *this);
1099 return Inits->Initializers;
1100}
1101
1102void ASTContext::setCurrentNamedModule(Module *M) {
1103 assert(M->isNamedModule());
1104 assert(!CurrentCXXNamedModule &&
1105 "We should set named module for ASTContext for only once");
1106 CurrentCXXNamedModule = M;
1107}
1108
1109ExternCContextDecl *ASTContext::getExternCContextDecl() const {
1110 if (!ExternCContext)
1111 ExternCContext = ExternCContextDecl::Create(C: *this, TU: getTranslationUnitDecl());
1112
1113 return ExternCContext;
1114}
1115
1116BuiltinTemplateDecl *
1117ASTContext::buildBuiltinTemplateDecl(BuiltinTemplateKind BTK,
1118 const IdentifierInfo *II) const {
1119 auto *BuiltinTemplate =
1120 BuiltinTemplateDecl::Create(*this, getTranslationUnitDecl(), II, BTK);
1121 BuiltinTemplate->setImplicit();
1122 getTranslationUnitDecl()->addDecl(D: BuiltinTemplate);
1123
1124 return BuiltinTemplate;
1125}
1126
1127BuiltinTemplateDecl *
1128ASTContext::getMakeIntegerSeqDecl() const {
1129 if (!MakeIntegerSeqDecl)
1130 MakeIntegerSeqDecl = buildBuiltinTemplateDecl(BTK: BTK__make_integer_seq,
1131 II: getMakeIntegerSeqName());
1132 return MakeIntegerSeqDecl;
1133}
1134
1135BuiltinTemplateDecl *
1136ASTContext::getTypePackElementDecl() const {
1137 if (!TypePackElementDecl)
1138 TypePackElementDecl = buildBuiltinTemplateDecl(BTK: BTK__type_pack_element,
1139 II: getTypePackElementName());
1140 return TypePackElementDecl;
1141}
1142
1143RecordDecl *ASTContext::buildImplicitRecord(StringRef Name,
1144 RecordDecl::TagKind TK) const {
1145 SourceLocation Loc;
1146 RecordDecl *NewDecl;
1147 if (getLangOpts().CPlusPlus)
1148 NewDecl = CXXRecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc,
1149 Loc, &Idents.get(Name));
1150 else
1151 NewDecl = RecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, Loc,
1152 &Idents.get(Name));
1153 NewDecl->setImplicit();
1154 NewDecl->addAttr(TypeVisibilityAttr::CreateImplicit(
1155 const_cast<ASTContext &>(*this), TypeVisibilityAttr::Default));
1156 return NewDecl;
1157}
1158
1159TypedefDecl *ASTContext::buildImplicitTypedef(QualType T,
1160 StringRef Name) const {
1161 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T);
1162 TypedefDecl *NewDecl = TypedefDecl::Create(
1163 const_cast<ASTContext &>(*this), getTranslationUnitDecl(),
1164 SourceLocation(), SourceLocation(), &Idents.get(Name), TInfo);
1165 NewDecl->setImplicit();
1166 return NewDecl;
1167}
1168
1169TypedefDecl *ASTContext::getInt128Decl() const {
1170 if (!Int128Decl)
1171 Int128Decl = buildImplicitTypedef(Int128Ty, "__int128_t");
1172 return Int128Decl;
1173}
1174
1175TypedefDecl *ASTContext::getUInt128Decl() const {
1176 if (!UInt128Decl)
1177 UInt128Decl = buildImplicitTypedef(UnsignedInt128Ty, "__uint128_t");
1178 return UInt128Decl;
1179}
1180
1181void ASTContext::InitBuiltinType(CanQualType &R, BuiltinType::Kind K) {
1182 auto *Ty = new (*this, alignof(BuiltinType)) BuiltinType(K);
1183 R = CanQualType::CreateUnsafe(Other: QualType(Ty, 0));
1184 Types.push_back(Ty);
1185}
1186
1187void ASTContext::InitBuiltinTypes(const TargetInfo &Target,
1188 const TargetInfo *AuxTarget) {
1189 assert((!this->Target || this->Target == &Target) &&
1190 "Incorrect target reinitialization");
1191 assert(VoidTy.isNull() && "Context reinitialized?");
1192
1193 this->Target = &Target;
1194 this->AuxTarget = AuxTarget;
1195
1196 ABI.reset(p: createCXXABI(T: Target));
1197 AddrSpaceMapMangling = isAddrSpaceMapManglingEnabled(TI: Target, LangOpts);
1198
1199 // C99 6.2.5p19.
1200 InitBuiltinType(VoidTy, BuiltinType::Void);
1201
1202 // C99 6.2.5p2.
1203 InitBuiltinType(BoolTy, BuiltinType::Bool);
1204 // C99 6.2.5p3.
1205 if (LangOpts.CharIsSigned)
1206 InitBuiltinType(CharTy, BuiltinType::Char_S);
1207 else
1208 InitBuiltinType(CharTy, BuiltinType::Char_U);
1209 // C99 6.2.5p4.
1210 InitBuiltinType(SignedCharTy, BuiltinType::SChar);
1211 InitBuiltinType(ShortTy, BuiltinType::Short);
1212 InitBuiltinType(IntTy, BuiltinType::Int);
1213 InitBuiltinType(LongTy, BuiltinType::Long);
1214 InitBuiltinType(LongLongTy, BuiltinType::LongLong);
1215
1216 // C99 6.2.5p6.
1217 InitBuiltinType(UnsignedCharTy, BuiltinType::UChar);
1218 InitBuiltinType(UnsignedShortTy, BuiltinType::UShort);
1219 InitBuiltinType(UnsignedIntTy, BuiltinType::UInt);
1220 InitBuiltinType(UnsignedLongTy, BuiltinType::ULong);
1221 InitBuiltinType(UnsignedLongLongTy, BuiltinType::ULongLong);
1222
1223 // C99 6.2.5p10.
1224 InitBuiltinType(FloatTy, BuiltinType::Float);
1225 InitBuiltinType(DoubleTy, BuiltinType::Double);
1226 InitBuiltinType(LongDoubleTy, BuiltinType::LongDouble);
1227
1228 // GNU extension, __float128 for IEEE quadruple precision
1229 InitBuiltinType(Float128Ty, BuiltinType::Float128);
1230
1231 // __ibm128 for IBM extended precision
1232 InitBuiltinType(Ibm128Ty, BuiltinType::Ibm128);
1233
1234 // C11 extension ISO/IEC TS 18661-3
1235 InitBuiltinType(Float16Ty, BuiltinType::Float16);
1236
1237 // ISO/IEC JTC1 SC22 WG14 N1169 Extension
1238 InitBuiltinType(ShortAccumTy, BuiltinType::ShortAccum);
1239 InitBuiltinType(AccumTy, BuiltinType::Accum);
1240 InitBuiltinType(LongAccumTy, BuiltinType::LongAccum);
1241 InitBuiltinType(UnsignedShortAccumTy, BuiltinType::UShortAccum);
1242 InitBuiltinType(UnsignedAccumTy, BuiltinType::UAccum);
1243 InitBuiltinType(UnsignedLongAccumTy, BuiltinType::ULongAccum);
1244 InitBuiltinType(ShortFractTy, BuiltinType::ShortFract);
1245 InitBuiltinType(FractTy, BuiltinType::Fract);
1246 InitBuiltinType(LongFractTy, BuiltinType::LongFract);
1247 InitBuiltinType(UnsignedShortFractTy, BuiltinType::UShortFract);
1248 InitBuiltinType(UnsignedFractTy, BuiltinType::UFract);
1249 InitBuiltinType(UnsignedLongFractTy, BuiltinType::ULongFract);
1250 InitBuiltinType(SatShortAccumTy, BuiltinType::SatShortAccum);
1251 InitBuiltinType(SatAccumTy, BuiltinType::SatAccum);
1252 InitBuiltinType(SatLongAccumTy, BuiltinType::SatLongAccum);
1253 InitBuiltinType(SatUnsignedShortAccumTy, BuiltinType::SatUShortAccum);
1254 InitBuiltinType(SatUnsignedAccumTy, BuiltinType::SatUAccum);
1255 InitBuiltinType(SatUnsignedLongAccumTy, BuiltinType::SatULongAccum);
1256 InitBuiltinType(SatShortFractTy, BuiltinType::SatShortFract);
1257 InitBuiltinType(SatFractTy, BuiltinType::SatFract);
1258 InitBuiltinType(SatLongFractTy, BuiltinType::SatLongFract);
1259 InitBuiltinType(SatUnsignedShortFractTy, BuiltinType::SatUShortFract);
1260 InitBuiltinType(SatUnsignedFractTy, BuiltinType::SatUFract);
1261 InitBuiltinType(SatUnsignedLongFractTy, BuiltinType::SatULongFract);
1262
1263 // GNU extension, 128-bit integers.
1264 InitBuiltinType(Int128Ty, BuiltinType::Int128);
1265 InitBuiltinType(UnsignedInt128Ty, BuiltinType::UInt128);
1266
1267 // C++ 3.9.1p5
1268 if (TargetInfo::isTypeSigned(Target.getWCharType()))
1269 InitBuiltinType(WCharTy, BuiltinType::WChar_S);
1270 else // -fshort-wchar makes wchar_t be unsigned.
1271 InitBuiltinType(WCharTy, BuiltinType::WChar_U);
1272 if (LangOpts.CPlusPlus && LangOpts.WChar)
1273 WideCharTy = WCharTy;
1274 else {
1275 // C99 (or C++ using -fno-wchar).
1276 WideCharTy = getFromTargetType(Target.getWCharType());
1277 }
1278
1279 WIntTy = getFromTargetType(Target.getWIntType());
1280
1281 // C++20 (proposed)
1282 InitBuiltinType(Char8Ty, BuiltinType::Char8);
1283
1284 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++
1285 InitBuiltinType(Char16Ty, BuiltinType::Char16);
1286 else // C99
1287 Char16Ty = getFromTargetType(Target.getChar16Type());
1288
1289 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++
1290 InitBuiltinType(Char32Ty, BuiltinType::Char32);
1291 else // C99
1292 Char32Ty = getFromTargetType(Target.getChar32Type());
1293
1294 // Placeholder type for type-dependent expressions whose type is
1295 // completely unknown. No code should ever check a type against
1296 // DependentTy and users should never see it; however, it is here to
1297 // help diagnose failures to properly check for type-dependent
1298 // expressions.
1299 InitBuiltinType(DependentTy, BuiltinType::Dependent);
1300
1301 // Placeholder type for functions.
1302 InitBuiltinType(OverloadTy, BuiltinType::Overload);
1303
1304 // Placeholder type for bound members.
1305 InitBuiltinType(BoundMemberTy, BuiltinType::BoundMember);
1306
1307 // Placeholder type for pseudo-objects.
1308 InitBuiltinType(PseudoObjectTy, BuiltinType::PseudoObject);
1309
1310 // "any" type; useful for debugger-like clients.
1311 InitBuiltinType(UnknownAnyTy, BuiltinType::UnknownAny);
1312
1313 // Placeholder type for unbridged ARC casts.
1314 InitBuiltinType(ARCUnbridgedCastTy, BuiltinType::ARCUnbridgedCast);
1315
1316 // Placeholder type for builtin functions.
1317 InitBuiltinType(BuiltinFnTy, BuiltinType::BuiltinFn);
1318
1319 // Placeholder type for OMP array sections.
1320 if (LangOpts.OpenMP) {
1321 InitBuiltinType(OMPArraySectionTy, BuiltinType::OMPArraySection);
1322 InitBuiltinType(OMPArrayShapingTy, BuiltinType::OMPArrayShaping);
1323 InitBuiltinType(OMPIteratorTy, BuiltinType::OMPIterator);
1324 }
1325 // Placeholder type for OpenACC array sections.
1326 if (LangOpts.OpenACC) {
1327 // FIXME: Once we implement OpenACC array sections in Sema, this will either
1328 // be combined with the OpenMP type, or given its own type. In the meantime,
1329 // just use the OpenMP type so that parsing can work.
1330 InitBuiltinType(OMPArraySectionTy, BuiltinType::OMPArraySection);
1331 }
1332 if (LangOpts.MatrixTypes)
1333 InitBuiltinType(IncompleteMatrixIdxTy, BuiltinType::IncompleteMatrixIdx);
1334
1335 // Builtin types for 'id', 'Class', and 'SEL'.
1336 InitBuiltinType(ObjCBuiltinIdTy, BuiltinType::ObjCId);
1337 InitBuiltinType(ObjCBuiltinClassTy, BuiltinType::ObjCClass);
1338 InitBuiltinType(ObjCBuiltinSelTy, BuiltinType::ObjCSel);
1339
1340 if (LangOpts.OpenCL) {
1341#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
1342 InitBuiltinType(SingletonId, BuiltinType::Id);
1343#include "clang/Basic/OpenCLImageTypes.def"
1344
1345 InitBuiltinType(OCLSamplerTy, BuiltinType::OCLSampler);
1346 InitBuiltinType(OCLEventTy, BuiltinType::OCLEvent);
1347 InitBuiltinType(OCLClkEventTy, BuiltinType::OCLClkEvent);
1348 InitBuiltinType(OCLQueueTy, BuiltinType::OCLQueue);
1349 InitBuiltinType(OCLReserveIDTy, BuiltinType::OCLReserveID);
1350
1351#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
1352 InitBuiltinType(Id##Ty, BuiltinType::Id);
1353#include "clang/Basic/OpenCLExtensionTypes.def"
1354 }
1355
1356 if (Target.hasAArch64SVETypes()) {
1357#define SVE_TYPE(Name, Id, SingletonId) \
1358 InitBuiltinType(SingletonId, BuiltinType::Id);
1359#include "clang/Basic/AArch64SVEACLETypes.def"
1360 }
1361
1362 if (Target.getTriple().isPPC64()) {
1363#define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \
1364 InitBuiltinType(Id##Ty, BuiltinType::Id);
1365#include "clang/Basic/PPCTypes.def"
1366#define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \
1367 InitBuiltinType(Id##Ty, BuiltinType::Id);
1368#include "clang/Basic/PPCTypes.def"
1369 }
1370
1371 if (Target.hasRISCVVTypes()) {
1372#define RVV_TYPE(Name, Id, SingletonId) \
1373 InitBuiltinType(SingletonId, BuiltinType::Id);
1374#include "clang/Basic/RISCVVTypes.def"
1375 }
1376
1377 if (Target.getTriple().isWasm() && Target.hasFeature(Feature: "reference-types")) {
1378#define WASM_TYPE(Name, Id, SingletonId) \
1379 InitBuiltinType(SingletonId, BuiltinType::Id);
1380#include "clang/Basic/WebAssemblyReferenceTypes.def"
1381 }
1382
1383 // Builtin type for __objc_yes and __objc_no
1384 ObjCBuiltinBoolTy = (Target.useSignedCharForObjCBool() ?
1385 SignedCharTy : BoolTy);
1386
1387 ObjCConstantStringType = QualType();
1388
1389 ObjCSuperType = QualType();
1390
1391 // void * type
1392 if (LangOpts.OpenCLGenericAddressSpace) {
1393 auto Q = VoidTy.getQualifiers();
1394 Q.setAddressSpace(LangAS::opencl_generic);
1395 VoidPtrTy = getPointerType(getCanonicalType(
1396 getQualifiedType(VoidTy.getUnqualifiedType(), Q)));
1397 } else {
1398 VoidPtrTy = getPointerType(VoidTy);
1399 }
1400
1401 // nullptr type (C++0x 2.14.7)
1402 InitBuiltinType(NullPtrTy, BuiltinType::NullPtr);
1403
1404 // half type (OpenCL 6.1.1.1) / ARM NEON __fp16
1405 InitBuiltinType(HalfTy, BuiltinType::Half);
1406
1407 InitBuiltinType(BFloat16Ty, BuiltinType::BFloat16);
1408
1409 // Builtin type used to help define __builtin_va_list.
1410 VaListTagDecl = nullptr;
1411
1412 // MSVC predeclares struct _GUID, and we need it to create MSGuidDecls.
1413 if (LangOpts.MicrosoftExt || LangOpts.Borland) {
1414 MSGuidTagDecl = buildImplicitRecord(Name: "_GUID");
1415 getTranslationUnitDecl()->addDecl(MSGuidTagDecl);
1416 }
1417}
1418
1419DiagnosticsEngine &ASTContext::getDiagnostics() const {
1420 return SourceMgr.getDiagnostics();
1421}
1422
1423AttrVec& ASTContext::getDeclAttrs(const Decl *D) {
1424 AttrVec *&Result = DeclAttrs[D];
1425 if (!Result) {
1426 void *Mem = Allocate(Size: sizeof(AttrVec));
1427 Result = new (Mem) AttrVec;
1428 }
1429
1430 return *Result;
1431}
1432
1433/// Erase the attributes corresponding to the given declaration.
1434void ASTContext::eraseDeclAttrs(const Decl *D) {
1435 llvm::DenseMap<const Decl*, AttrVec*>::iterator Pos = DeclAttrs.find(Val: D);
1436 if (Pos != DeclAttrs.end()) {
1437 Pos->second->~AttrVec();
1438 DeclAttrs.erase(I: Pos);
1439 }
1440}
1441
1442// FIXME: Remove ?
1443MemberSpecializationInfo *
1444ASTContext::getInstantiatedFromStaticDataMember(const VarDecl *Var) {
1445 assert(Var->isStaticDataMember() && "Not a static data member");
1446 return getTemplateOrSpecializationInfo(Var)
1447 .dyn_cast<MemberSpecializationInfo *>();
1448}
1449
1450ASTContext::TemplateOrSpecializationInfo
1451ASTContext::getTemplateOrSpecializationInfo(const VarDecl *Var) {
1452 llvm::DenseMap<const VarDecl *, TemplateOrSpecializationInfo>::iterator Pos =
1453 TemplateOrInstantiation.find(Val: Var);
1454 if (Pos == TemplateOrInstantiation.end())
1455 return {};
1456
1457 return Pos->second;
1458}
1459
1460void
1461ASTContext::setInstantiatedFromStaticDataMember(VarDecl *Inst, VarDecl *Tmpl,
1462 TemplateSpecializationKind TSK,
1463 SourceLocation PointOfInstantiation) {
1464 assert(Inst->isStaticDataMember() && "Not a static data member");
1465 assert(Tmpl->isStaticDataMember() && "Not a static data member");
1466 setTemplateOrSpecializationInfo(Inst, new (*this) MemberSpecializationInfo(
1467 Tmpl, TSK, PointOfInstantiation));
1468}
1469
1470void
1471ASTContext::setTemplateOrSpecializationInfo(VarDecl *Inst,
1472 TemplateOrSpecializationInfo TSI) {
1473 assert(!TemplateOrInstantiation[Inst] &&
1474 "Already noted what the variable was instantiated from");
1475 TemplateOrInstantiation[Inst] = TSI;
1476}
1477
1478NamedDecl *
1479ASTContext::getInstantiatedFromUsingDecl(NamedDecl *UUD) {
1480 return InstantiatedFromUsingDecl.lookup(Val: UUD);
1481}
1482
1483void
1484ASTContext::setInstantiatedFromUsingDecl(NamedDecl *Inst, NamedDecl *Pattern) {
1485 assert((isa<UsingDecl>(Pattern) ||
1486 isa<UnresolvedUsingValueDecl>(Pattern) ||
1487 isa<UnresolvedUsingTypenameDecl>(Pattern)) &&
1488 "pattern decl is not a using decl");
1489 assert((isa<UsingDecl>(Inst) ||
1490 isa<UnresolvedUsingValueDecl>(Inst) ||
1491 isa<UnresolvedUsingTypenameDecl>(Inst)) &&
1492 "instantiation did not produce a using decl");
1493 assert(!InstantiatedFromUsingDecl[Inst] && "pattern already exists");
1494 InstantiatedFromUsingDecl[Inst] = Pattern;
1495}
1496
1497UsingEnumDecl *
1498ASTContext::getInstantiatedFromUsingEnumDecl(UsingEnumDecl *UUD) {
1499 return InstantiatedFromUsingEnumDecl.lookup(UUD);
1500}
1501
1502void ASTContext::setInstantiatedFromUsingEnumDecl(UsingEnumDecl *Inst,
1503 UsingEnumDecl *Pattern) {
1504 assert(!InstantiatedFromUsingEnumDecl[Inst] && "pattern already exists");
1505 InstantiatedFromUsingEnumDecl[Inst] = Pattern;
1506}
1507
1508UsingShadowDecl *
1509ASTContext::getInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst) {
1510 return InstantiatedFromUsingShadowDecl.lookup(Val: Inst);
1511}
1512
1513void
1514ASTContext::setInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst,
1515 UsingShadowDecl *Pattern) {
1516 assert(!InstantiatedFromUsingShadowDecl[Inst] && "pattern already exists");
1517 InstantiatedFromUsingShadowDecl[Inst] = Pattern;
1518}
1519
1520FieldDecl *ASTContext::getInstantiatedFromUnnamedFieldDecl(FieldDecl *Field) {
1521 return InstantiatedFromUnnamedFieldDecl.lookup(Val: Field);
1522}
1523
1524void ASTContext::setInstantiatedFromUnnamedFieldDecl(FieldDecl *Inst,
1525 FieldDecl *Tmpl) {
1526 assert(!Inst->getDeclName() && "Instantiated field decl is not unnamed");
1527 assert(!Tmpl->getDeclName() && "Template field decl is not unnamed");
1528 assert(!InstantiatedFromUnnamedFieldDecl[Inst] &&
1529 "Already noted what unnamed field was instantiated from");
1530
1531 InstantiatedFromUnnamedFieldDecl[Inst] = Tmpl;
1532}
1533
1534ASTContext::overridden_cxx_method_iterator
1535ASTContext::overridden_methods_begin(const CXXMethodDecl *Method) const {
1536 return overridden_methods(Method).begin();
1537}
1538
1539ASTContext::overridden_cxx_method_iterator
1540ASTContext::overridden_methods_end(const CXXMethodDecl *Method) const {
1541 return overridden_methods(Method).end();
1542}
1543
1544unsigned
1545ASTContext::overridden_methods_size(const CXXMethodDecl *Method) const {
1546 auto Range = overridden_methods(Method);
1547 return Range.end() - Range.begin();
1548}
1549
1550ASTContext::overridden_method_range
1551ASTContext::overridden_methods(const CXXMethodDecl *Method) const {
1552 llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos =
1553 OverriddenMethods.find(Val: Method->getCanonicalDecl());
1554 if (Pos == OverriddenMethods.end())
1555 return overridden_method_range(nullptr, nullptr);
1556 return overridden_method_range(Pos->second.begin(), Pos->second.end());
1557}
1558
1559void ASTContext::addOverriddenMethod(const CXXMethodDecl *Method,
1560 const CXXMethodDecl *Overridden) {
1561 assert(Method->isCanonicalDecl() && Overridden->isCanonicalDecl());
1562 OverriddenMethods[Method].push_back(NewVal: Overridden);
1563}
1564
1565void ASTContext::getOverriddenMethods(
1566 const NamedDecl *D,
1567 SmallVectorImpl<const NamedDecl *> &Overridden) const {
1568 assert(D);
1569
1570 if (const auto *CXXMethod = dyn_cast<CXXMethodDecl>(Val: D)) {
1571 Overridden.append(in_start: overridden_methods_begin(Method: CXXMethod),
1572 in_end: overridden_methods_end(Method: CXXMethod));
1573 return;
1574 }
1575
1576 const auto *Method = dyn_cast<ObjCMethodDecl>(Val: D);
1577 if (!Method)
1578 return;
1579
1580 SmallVector<const ObjCMethodDecl *, 8> OverDecls;
1581 Method->getOverriddenMethods(Overridden&: OverDecls);
1582 Overridden.append(in_start: OverDecls.begin(), in_end: OverDecls.end());
1583}
1584
1585void ASTContext::addedLocalImportDecl(ImportDecl *Import) {
1586 assert(!Import->getNextLocalImport() &&
1587 "Import declaration already in the chain");
1588 assert(!Import->isFromASTFile() && "Non-local import declaration");
1589 if (!FirstLocalImport) {
1590 FirstLocalImport = Import;
1591 LastLocalImport = Import;
1592 return;
1593 }
1594
1595 LastLocalImport->setNextLocalImport(Import);
1596 LastLocalImport = Import;
1597}
1598
1599//===----------------------------------------------------------------------===//
1600// Type Sizing and Analysis
1601//===----------------------------------------------------------------------===//
1602
1603/// getFloatTypeSemantics - Return the APFloat 'semantics' for the specified
1604/// scalar floating point type.
1605const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const {
1606 switch (T->castAs<BuiltinType>()->getKind()) {
1607 default:
1608 llvm_unreachable("Not a floating point type!");
1609 case BuiltinType::BFloat16:
1610 return Target->getBFloat16Format();
1611 case BuiltinType::Float16:
1612 return Target->getHalfFormat();
1613 case BuiltinType::Half:
1614 // For HLSL, when the native half type is disabled, half will be treat as
1615 // float.
1616 if (getLangOpts().HLSL)
1617 if (getLangOpts().NativeHalfType)
1618 return Target->getHalfFormat();
1619 else
1620 return Target->getFloatFormat();
1621 else
1622 return Target->getHalfFormat();
1623 case BuiltinType::Float: return Target->getFloatFormat();
1624 case BuiltinType::Double: return Target->getDoubleFormat();
1625 case BuiltinType::Ibm128:
1626 return Target->getIbm128Format();
1627 case BuiltinType::LongDouble:
1628 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice)
1629 return AuxTarget->getLongDoubleFormat();
1630 return Target->getLongDoubleFormat();
1631 case BuiltinType::Float128:
1632 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice)
1633 return AuxTarget->getFloat128Format();
1634 return Target->getFloat128Format();
1635 }
1636}
1637
1638CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const {
1639 unsigned Align = Target->getCharWidth();
1640
1641 const unsigned AlignFromAttr = D->getMaxAlignment();
1642 if (AlignFromAttr)
1643 Align = AlignFromAttr;
1644
1645 // __attribute__((aligned)) can increase or decrease alignment
1646 // *except* on a struct or struct member, where it only increases
1647 // alignment unless 'packed' is also specified.
1648 //
1649 // It is an error for alignas to decrease alignment, so we can
1650 // ignore that possibility; Sema should diagnose it.
1651 bool UseAlignAttrOnly;
1652 if (const FieldDecl *FD = dyn_cast<FieldDecl>(Val: D))
1653 UseAlignAttrOnly =
1654 FD->hasAttr<PackedAttr>() || FD->getParent()->hasAttr<PackedAttr>();
1655 else
1656 UseAlignAttrOnly = AlignFromAttr != 0;
1657 // If we're using the align attribute only, just ignore everything
1658 // else about the declaration and its type.
1659 if (UseAlignAttrOnly) {
1660 // do nothing
1661 } else if (const auto *VD = dyn_cast<ValueDecl>(Val: D)) {
1662 QualType T = VD->getType();
1663 if (const auto *RT = T->getAs<ReferenceType>()) {
1664 if (ForAlignof)
1665 T = RT->getPointeeType();
1666 else
1667 T = getPointerType(T: RT->getPointeeType());
1668 }
1669 QualType BaseT = getBaseElementType(QT: T);
1670 if (T->isFunctionType())
1671 Align = getTypeInfoImpl(T: T.getTypePtr()).Align;
1672 else if (!BaseT->isIncompleteType()) {
1673 // Adjust alignments of declarations with array type by the
1674 // large-array alignment on the target.
1675 if (const ArrayType *arrayType = getAsArrayType(T)) {
1676 unsigned MinWidth = Target->getLargeArrayMinWidth();
1677 if (!ForAlignof && MinWidth) {
1678 if (isa<VariableArrayType>(Val: arrayType))
1679 Align = std::max(a: Align, b: Target->getLargeArrayAlign());
1680 else if (isa<ConstantArrayType>(Val: arrayType) &&
1681 MinWidth <= getTypeSize(cast<ConstantArrayType>(Val: arrayType)))
1682 Align = std::max(a: Align, b: Target->getLargeArrayAlign());
1683 }
1684 }
1685 Align = std::max(a: Align, b: getPreferredTypeAlign(T: T.getTypePtr()));
1686 if (BaseT.getQualifiers().hasUnaligned())
1687 Align = Target->getCharWidth();
1688 }
1689
1690 // Ensure miminum alignment for global variables.
1691 if (const auto *VD = dyn_cast<VarDecl>(Val: D))
1692 if (VD->hasGlobalStorage() && !ForAlignof) {
1693 uint64_t TypeSize =
1694 !BaseT->isIncompleteType() ? getTypeSize(T: T.getTypePtr()) : 0;
1695 Align = std::max(a: Align, b: getMinGlobalAlignOfVar(Size: TypeSize, VD));
1696 }
1697
1698 // Fields can be subject to extra alignment constraints, like if
1699 // the field is packed, the struct is packed, or the struct has a
1700 // a max-field-alignment constraint (#pragma pack). So calculate
1701 // the actual alignment of the field within the struct, and then
1702 // (as we're expected to) constrain that by the alignment of the type.
1703 if (const auto *Field = dyn_cast<FieldDecl>(Val: VD)) {
1704 const RecordDecl *Parent = Field->getParent();
1705 // We can only produce a sensible answer if the record is valid.
1706 if (!Parent->isInvalidDecl()) {
1707 const ASTRecordLayout &Layout = getASTRecordLayout(D: Parent);
1708
1709 // Start with the record's overall alignment.
1710 unsigned FieldAlign = toBits(CharSize: Layout.getAlignment());
1711
1712 // Use the GCD of that and the offset within the record.
1713 uint64_t Offset = Layout.getFieldOffset(FieldNo: Field->getFieldIndex());
1714 if (Offset > 0) {
1715 // Alignment is always a power of 2, so the GCD will be a power of 2,
1716 // which means we get to do this crazy thing instead of Euclid's.
1717 uint64_t LowBitOfOffset = Offset & (~Offset + 1);
1718 if (LowBitOfOffset < FieldAlign)
1719 FieldAlign = static_cast<unsigned>(LowBitOfOffset);
1720 }
1721
1722 Align = std::min(a: Align, b: FieldAlign);
1723 }
1724 }
1725 }
1726
1727 // Some targets have hard limitation on the maximum requestable alignment in
1728 // aligned attribute for static variables.
1729 const unsigned MaxAlignedAttr = getTargetInfo().getMaxAlignedAttribute();
1730 const auto *VD = dyn_cast<VarDecl>(Val: D);
1731 if (MaxAlignedAttr && VD && VD->getStorageClass() == SC_Static)
1732 Align = std::min(a: Align, b: MaxAlignedAttr);
1733
1734 return toCharUnitsFromBits(BitSize: Align);
1735}
1736
1737CharUnits ASTContext::getExnObjectAlignment() const {
1738 return toCharUnitsFromBits(BitSize: Target->getExnObjectAlignment());
1739}
1740
1741// getTypeInfoDataSizeInChars - Return the size of a type, in
1742// chars. If the type is a record, its data size is returned. This is
1743// the size of the memcpy that's performed when assigning this type
1744// using a trivial copy/move assignment operator.
1745TypeInfoChars ASTContext::getTypeInfoDataSizeInChars(QualType T) const {
1746 TypeInfoChars Info = getTypeInfoInChars(T);
1747
1748 // In C++, objects can sometimes be allocated into the tail padding
1749 // of a base-class subobject. We decide whether that's possible
1750 // during class layout, so here we can just trust the layout results.
1751 if (getLangOpts().CPlusPlus) {
1752 if (const auto *RT = T->getAs<RecordType>();
1753 RT && !RT->getDecl()->isInvalidDecl()) {
1754 const ASTRecordLayout &layout = getASTRecordLayout(D: RT->getDecl());
1755 Info.Width = layout.getDataSize();
1756 }
1757 }
1758
1759 return Info;
1760}
1761
1762/// getConstantArrayInfoInChars - Performing the computation in CharUnits
1763/// instead of in bits prevents overflowing the uint64_t for some large arrays.
1764TypeInfoChars
1765static getConstantArrayInfoInChars(const ASTContext &Context,
1766 const ConstantArrayType *CAT) {
1767 TypeInfoChars EltInfo = Context.getTypeInfoInChars(CAT->getElementType());
1768 uint64_t Size = CAT->getSize().getZExtValue();
1769 assert((Size == 0 || static_cast<uint64_t>(EltInfo.Width.getQuantity()) <=
1770 (uint64_t)(-1)/Size) &&
1771 "Overflow in array type char size evaluation");
1772 uint64_t Width = EltInfo.Width.getQuantity() * Size;
1773 unsigned Align = EltInfo.Align.getQuantity();
1774 if (!Context.getTargetInfo().getCXXABI().isMicrosoft() ||
1775 Context.getTargetInfo().getPointerWidth(AddrSpace: LangAS::Default) == 64)
1776 Width = llvm::alignTo(Value: Width, Align);
1777 return TypeInfoChars(CharUnits::fromQuantity(Quantity: Width),
1778 CharUnits::fromQuantity(Quantity: Align),
1779 EltInfo.AlignRequirement);
1780}
1781
1782TypeInfoChars ASTContext::getTypeInfoInChars(const Type *T) const {
1783 if (const auto *CAT = dyn_cast<ConstantArrayType>(Val: T))
1784 return getConstantArrayInfoInChars(Context: *this, CAT);
1785 TypeInfo Info = getTypeInfo(T);
1786 return TypeInfoChars(toCharUnitsFromBits(BitSize: Info.Width),
1787 toCharUnitsFromBits(BitSize: Info.Align), Info.AlignRequirement);
1788}
1789
1790TypeInfoChars ASTContext::getTypeInfoInChars(QualType T) const {
1791 return getTypeInfoInChars(T: T.getTypePtr());
1792}
1793
1794bool ASTContext::isPromotableIntegerType(QualType T) const {
1795 // HLSL doesn't promote all small integer types to int, it
1796 // just uses the rank-based promotion rules for all types.
1797 if (getLangOpts().HLSL)
1798 return false;
1799
1800 if (const auto *BT = T->getAs<BuiltinType>())
1801 switch (BT->getKind()) {
1802 case BuiltinType::Bool:
1803 case BuiltinType::Char_S:
1804 case BuiltinType::Char_U:
1805 case BuiltinType::SChar:
1806 case BuiltinType::UChar:
1807 case BuiltinType::Short:
1808 case BuiltinType::UShort:
1809 case BuiltinType::WChar_S:
1810 case BuiltinType::WChar_U:
1811 case BuiltinType::Char8:
1812 case BuiltinType::Char16:
1813 case BuiltinType::Char32:
1814 return true;
1815 default:
1816 return false;
1817 }
1818
1819 // Enumerated types are promotable to their compatible integer types
1820 // (C99 6.3.1.1) a.k.a. its underlying type (C++ [conv.prom]p2).
1821 if (const auto *ET = T->getAs<EnumType>()) {
1822 if (T->isDependentType() || ET->getDecl()->getPromotionType().isNull() ||
1823 ET->getDecl()->isScoped())
1824 return false;
1825
1826 return true;
1827 }
1828
1829 return false;
1830}
1831
1832bool ASTContext::isAlignmentRequired(const Type *T) const {
1833 return getTypeInfo(T).AlignRequirement != AlignRequirementKind::None;
1834}
1835
1836bool ASTContext::isAlignmentRequired(QualType T) const {
1837 return isAlignmentRequired(T: T.getTypePtr());
1838}
1839
1840unsigned ASTContext::getTypeAlignIfKnown(QualType T,
1841 bool NeedsPreferredAlignment) const {
1842 // An alignment on a typedef overrides anything else.
1843 if (const auto *TT = T->getAs<TypedefType>())
1844 if (unsigned Align = TT->getDecl()->getMaxAlignment())
1845 return Align;
1846
1847 // If we have an (array of) complete type, we're done.
1848 T = getBaseElementType(QT: T);
1849 if (!T->isIncompleteType())
1850 return NeedsPreferredAlignment ? getPreferredTypeAlign(T) : getTypeAlign(T);
1851
1852 // If we had an array type, its element type might be a typedef
1853 // type with an alignment attribute.
1854 if (const auto *TT = T->getAs<TypedefType>())
1855 if (unsigned Align = TT->getDecl()->getMaxAlignment())
1856 return Align;
1857
1858 // Otherwise, see if the declaration of the type had an attribute.
1859 if (const auto *TT = T->getAs<TagType>())
1860 return TT->getDecl()->getMaxAlignment();
1861
1862 return 0;
1863}
1864
1865TypeInfo ASTContext::getTypeInfo(const Type *T) const {
1866 TypeInfoMap::iterator I = MemoizedTypeInfo.find(Val: T);
1867 if (I != MemoizedTypeInfo.end())
1868 return I->second;
1869
1870 // This call can invalidate MemoizedTypeInfo[T], so we need a second lookup.
1871 TypeInfo TI = getTypeInfoImpl(T);
1872 MemoizedTypeInfo[T] = TI;
1873 return TI;
1874}
1875
1876/// getTypeInfoImpl - Return the size of the specified type, in bits. This
1877/// method does not work on incomplete types.
1878///
1879/// FIXME: Pointers into different addr spaces could have different sizes and
1880/// alignment requirements: getPointerInfo should take an AddrSpace, this
1881/// should take a QualType, &c.
1882TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
1883 uint64_t Width = 0;
1884 unsigned Align = 8;
1885 AlignRequirementKind AlignRequirement = AlignRequirementKind::None;
1886 LangAS AS = LangAS::Default;
1887 switch (T->getTypeClass()) {
1888#define TYPE(Class, Base)
1889#define ABSTRACT_TYPE(Class, Base)
1890#define NON_CANONICAL_TYPE(Class, Base)
1891#define DEPENDENT_TYPE(Class, Base) case Type::Class:
1892#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) \
1893 case Type::Class: \
1894 assert(!T->isDependentType() && "should not see dependent types here"); \
1895 return getTypeInfo(cast<Class##Type>(T)->desugar().getTypePtr());
1896#include "clang/AST/TypeNodes.inc"
1897 llvm_unreachable("Should not see dependent types");
1898
1899 case Type::FunctionNoProto:
1900 case Type::FunctionProto:
1901 // GCC extension: alignof(function) = 32 bits
1902 Width = 0;
1903 Align = 32;
1904 break;
1905
1906 case Type::IncompleteArray:
1907 case Type::VariableArray:
1908 case Type::ConstantArray: {
1909 // Model non-constant sized arrays as size zero, but track the alignment.
1910 uint64_t Size = 0;
1911 if (const auto *CAT = dyn_cast<ConstantArrayType>(T))
1912 Size = CAT->getSize().getZExtValue();
1913
1914 TypeInfo EltInfo = getTypeInfo(cast<ArrayType>(T)->getElementType());
1915 assert((Size == 0 || EltInfo.Width <= (uint64_t)(-1) / Size) &&
1916 "Overflow in array type bit size evaluation");
1917 Width = EltInfo.Width * Size;
1918 Align = EltInfo.Align;
1919 AlignRequirement = EltInfo.AlignRequirement;
1920 if (!getTargetInfo().getCXXABI().isMicrosoft() ||
1921 getTargetInfo().getPointerWidth(AddrSpace: LangAS::Default) == 64)
1922 Width = llvm::alignTo(Value: Width, Align);
1923 break;
1924 }
1925
1926 case Type::ExtVector:
1927 case Type::Vector: {
1928 const auto *VT = cast<VectorType>(T);
1929 TypeInfo EltInfo = getTypeInfo(VT->getElementType());
1930 Width = VT->isExtVectorBoolType() ? VT->getNumElements()
1931 : EltInfo.Width * VT->getNumElements();
1932 // Enforce at least byte size and alignment.
1933 Width = std::max<unsigned>(8, Width);
1934 Align = std::max<unsigned>(8, Width);
1935
1936 // If the alignment is not a power of 2, round up to the next power of 2.
1937 // This happens for non-power-of-2 length vectors.
1938 if (Align & (Align-1)) {
1939 Align = llvm::bit_ceil(Align);
1940 Width = llvm::alignTo(Value: Width, Align);
1941 }
1942 // Adjust the alignment based on the target max.
1943 uint64_t TargetVectorAlign = Target->getMaxVectorAlign();
1944 if (TargetVectorAlign && TargetVectorAlign < Align)
1945 Align = TargetVectorAlign;
1946 if (VT->getVectorKind() == VectorKind::SveFixedLengthData)
1947 // Adjust the alignment for fixed-length SVE vectors. This is important
1948 // for non-power-of-2 vector lengths.
1949 Align = 128;
1950 else if (VT->getVectorKind() == VectorKind::SveFixedLengthPredicate)
1951 // Adjust the alignment for fixed-length SVE predicates.
1952 Align = 16;
1953 else if (VT->getVectorKind() == VectorKind::RVVFixedLengthData ||
1954 VT->getVectorKind() == VectorKind::RVVFixedLengthMask)
1955 // Adjust the alignment for fixed-length RVV vectors.
1956 Align = std::min<unsigned>(64, Width);
1957 break;
1958 }
1959
1960 case Type::ConstantMatrix: {
1961 const auto *MT = cast<ConstantMatrixType>(T);
1962 TypeInfo ElementInfo = getTypeInfo(MT->getElementType());
1963 // The internal layout of a matrix value is implementation defined.
1964 // Initially be ABI compatible with arrays with respect to alignment and
1965 // size.
1966 Width = ElementInfo.Width * MT->getNumRows() * MT->getNumColumns();
1967 Align = ElementInfo.Align;
1968 break;
1969 }
1970
1971 case Type::Builtin:
1972 switch (cast<BuiltinType>(T)->getKind()) {
1973 default: llvm_unreachable("Unknown builtin type!");
1974 case BuiltinType::Void:
1975 // GCC extension: alignof(void) = 8 bits.
1976 Width = 0;
1977 Align = 8;
1978 break;
1979 case BuiltinType::Bool:
1980 Width = Target->getBoolWidth();
1981 Align = Target->getBoolAlign();
1982 break;
1983 case BuiltinType::Char_S:
1984 case BuiltinType::Char_U:
1985 case BuiltinType::UChar:
1986 case BuiltinType::SChar:
1987 case BuiltinType::Char8:
1988 Width = Target->getCharWidth();
1989 Align = Target->getCharAlign();
1990 break;
1991 case BuiltinType::WChar_S:
1992 case BuiltinType::WChar_U:
1993 Width = Target->getWCharWidth();
1994 Align = Target->getWCharAlign();
1995 break;
1996 case BuiltinType::Char16:
1997 Width = Target->getChar16Width();
1998 Align = Target->getChar16Align();
1999 break;
2000 case BuiltinType::Char32:
2001 Width = Target->getChar32Width();
2002 Align = Target->getChar32Align();
2003 break;
2004 case BuiltinType::UShort:
2005 case BuiltinType::Short:
2006 Width = Target->getShortWidth();
2007 Align = Target->getShortAlign();
2008 break;
2009 case BuiltinType::UInt:
2010 case BuiltinType::Int:
2011 Width = Target->getIntWidth();
2012 Align = Target->getIntAlign();
2013 break;
2014 case BuiltinType::ULong:
2015 case BuiltinType::Long:
2016 Width = Target->getLongWidth();
2017 Align = Target->getLongAlign();
2018 break;
2019 case BuiltinType::ULongLong:
2020 case BuiltinType::LongLong:
2021 Width = Target->getLongLongWidth();
2022 Align = Target->getLongLongAlign();
2023 break;
2024 case BuiltinType::Int128:
2025 case BuiltinType::UInt128:
2026 Width = 128;
2027 Align = Target->getInt128Align();
2028 break;
2029 case BuiltinType::ShortAccum:
2030 case BuiltinType::UShortAccum:
2031 case BuiltinType::SatShortAccum:
2032 case BuiltinType::SatUShortAccum:
2033 Width = Target->getShortAccumWidth();
2034 Align = Target->getShortAccumAlign();
2035 break;
2036 case BuiltinType::Accum:
2037 case BuiltinType::UAccum:
2038 case BuiltinType::SatAccum:
2039 case BuiltinType::SatUAccum:
2040 Width = Target->getAccumWidth();
2041 Align = Target->getAccumAlign();
2042 break;
2043 case BuiltinType::LongAccum:
2044 case BuiltinType::ULongAccum:
2045 case BuiltinType::SatLongAccum:
2046 case BuiltinType::SatULongAccum:
2047 Width = Target->getLongAccumWidth();
2048 Align = Target->getLongAccumAlign();
2049 break;
2050 case BuiltinType::ShortFract:
2051 case BuiltinType::UShortFract:
2052 case BuiltinType::SatShortFract:
2053 case BuiltinType::SatUShortFract:
2054 Width = Target->getShortFractWidth();
2055 Align = Target->getShortFractAlign();
2056 break;
2057 case BuiltinType::Fract:
2058 case BuiltinType::UFract:
2059 case BuiltinType::SatFract:
2060 case BuiltinType::SatUFract:
2061 Width = Target->getFractWidth();
2062 Align = Target->getFractAlign();
2063 break;
2064 case BuiltinType::LongFract:
2065 case BuiltinType::ULongFract:
2066 case BuiltinType::SatLongFract:
2067 case BuiltinType::SatULongFract:
2068 Width = Target->getLongFractWidth();
2069 Align = Target->getLongFractAlign();
2070 break;
2071 case BuiltinType::BFloat16:
2072 if (Target->hasBFloat16Type()) {
2073 Width = Target->getBFloat16Width();
2074 Align = Target->getBFloat16Align();
2075 } else if ((getLangOpts().SYCLIsDevice ||
2076 (getLangOpts().OpenMP &&
2077 getLangOpts().OpenMPIsTargetDevice)) &&
2078 AuxTarget->hasBFloat16Type()) {
2079 Width = AuxTarget->getBFloat16Width();
2080 Align = AuxTarget->getBFloat16Align();
2081 }
2082 break;
2083 case BuiltinType::Float16:
2084 case BuiltinType::Half:
2085 if (Target->hasFloat16Type() || !getLangOpts().OpenMP ||
2086 !getLangOpts().OpenMPIsTargetDevice) {
2087 Width = Target->getHalfWidth();
2088 Align = Target->getHalfAlign();
2089 } else {
2090 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice &&
2091 "Expected OpenMP device compilation.");
2092 Width = AuxTarget->getHalfWidth();
2093 Align = AuxTarget->getHalfAlign();
2094 }
2095 break;
2096 case BuiltinType::Float:
2097 Width = Target->getFloatWidth();
2098 Align = Target->getFloatAlign();
2099 break;
2100 case BuiltinType::Double:
2101 Width = Target->getDoubleWidth();
2102 Align = Target->getDoubleAlign();
2103 break;
2104 case BuiltinType::Ibm128:
2105 Width = Target->getIbm128Width();
2106 Align = Target->getIbm128Align();
2107 break;
2108 case BuiltinType::LongDouble:
2109 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice &&
2110 (Target->getLongDoubleWidth() != AuxTarget->getLongDoubleWidth() ||
2111 Target->getLongDoubleAlign() != AuxTarget->getLongDoubleAlign())) {
2112 Width = AuxTarget->getLongDoubleWidth();
2113 Align = AuxTarget->getLongDoubleAlign();
2114 } else {
2115 Width = Target->getLongDoubleWidth();
2116 Align = Target->getLongDoubleAlign();
2117 }
2118 break;
2119 case BuiltinType::Float128:
2120 if (Target->hasFloat128Type() || !getLangOpts().OpenMP ||
2121 !getLangOpts().OpenMPIsTargetDevice) {
2122 Width = Target->getFloat128Width();
2123 Align = Target->getFloat128Align();
2124 } else {
2125 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice &&
2126 "Expected OpenMP device compilation.");
2127 Width = AuxTarget->getFloat128Width();
2128 Align = AuxTarget->getFloat128Align();
2129 }
2130 break;
2131 case BuiltinType::NullPtr:
2132 // C++ 3.9.1p11: sizeof(nullptr_t) == sizeof(void*)
2133 Width = Target->getPointerWidth(AddrSpace: LangAS::Default);
2134 Align = Target->getPointerAlign(AddrSpace: LangAS::Default);
2135 break;
2136 case BuiltinType::ObjCId:
2137 case BuiltinType::ObjCClass:
2138 case BuiltinType::ObjCSel:
2139 Width = Target->getPointerWidth(AddrSpace: LangAS::Default);
2140 Align = Target->getPointerAlign(AddrSpace: LangAS::Default);
2141 break;
2142 case BuiltinType::OCLSampler:
2143 case BuiltinType::OCLEvent:
2144 case BuiltinType::OCLClkEvent:
2145 case BuiltinType::OCLQueue:
2146 case BuiltinType::OCLReserveID:
2147#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
2148 case BuiltinType::Id:
2149#include "clang/Basic/OpenCLImageTypes.def"
2150#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
2151 case BuiltinType::Id:
2152#include "clang/Basic/OpenCLExtensionTypes.def"
2153 AS = Target->getOpenCLTypeAddrSpace(TK: getOpenCLTypeKind(T));
2154 Width = Target->getPointerWidth(AddrSpace: AS);
2155 Align = Target->getPointerAlign(AddrSpace: AS);
2156 break;
2157 // The SVE types are effectively target-specific. The length of an
2158 // SVE_VECTOR_TYPE is only known at runtime, but it is always a multiple
2159 // of 128 bits. There is one predicate bit for each vector byte, so the
2160 // length of an SVE_PREDICATE_TYPE is always a multiple of 16 bits.
2161 //
2162 // Because the length is only known at runtime, we use a dummy value
2163 // of 0 for the static length. The alignment values are those defined
2164 // by the Procedure Call Standard for the Arm Architecture.
2165#define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \
2166 IsSigned, IsFP, IsBF) \
2167 case BuiltinType::Id: \
2168 Width = 0; \
2169 Align = 128; \
2170 break;
2171#define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \
2172 case BuiltinType::Id: \
2173 Width = 0; \
2174 Align = 16; \
2175 break;
2176#define SVE_OPAQUE_TYPE(Name, MangledName, Id, SingletonId) \
2177 case BuiltinType::Id: \
2178 Width = 0; \
2179 Align = 16; \
2180 break;
2181#include "clang/Basic/AArch64SVEACLETypes.def"
2182#define PPC_VECTOR_TYPE(Name, Id, Size) \
2183 case BuiltinType::Id: \
2184 Width = Size; \
2185 Align = Size; \
2186 break;
2187#include "clang/Basic/PPCTypes.def"
2188#define RVV_VECTOR_TYPE(Name, Id, SingletonId, ElKind, ElBits, NF, IsSigned, \
2189 IsFP, IsBF) \
2190 case BuiltinType::Id: \
2191 Width = 0; \
2192 Align = ElBits; \
2193 break;
2194#define RVV_PREDICATE_TYPE(Name, Id, SingletonId, ElKind) \
2195 case BuiltinType::Id: \
2196 Width = 0; \
2197 Align = 8; \
2198 break;
2199#include "clang/Basic/RISCVVTypes.def"
2200#define WASM_TYPE(Name, Id, SingletonId) \
2201 case BuiltinType::Id: \
2202 Width = 0; \
2203 Align = 8; \
2204 break;
2205#include "clang/Basic/WebAssemblyReferenceTypes.def"
2206 }
2207 break;
2208 case Type::ObjCObjectPointer:
2209 Width = Target->getPointerWidth(AddrSpace: LangAS::Default);
2210 Align = Target->getPointerAlign(AddrSpace: LangAS::Default);
2211 break;
2212 case Type::BlockPointer:
2213 AS = cast<BlockPointerType>(T)->getPointeeType().getAddressSpace();
2214 Width = Target->getPointerWidth(AddrSpace: AS);
2215 Align = Target->getPointerAlign(AddrSpace: AS);
2216 break;
2217 case Type::LValueReference:
2218 case Type::RValueReference:
2219 // alignof and sizeof should never enter this code path here, so we go
2220 // the pointer route.
2221 AS = cast<ReferenceType>(T)->getPointeeType().getAddressSpace();
2222 Width = Target->getPointerWidth(AddrSpace: AS);
2223 Align = Target->getPointerAlign(AddrSpace: AS);
2224 break;
2225 case Type::Pointer:
2226 AS = cast<PointerType>(T)->getPointeeType().getAddressSpace();
2227 Width = Target->getPointerWidth(AddrSpace: AS);
2228 Align = Target->getPointerAlign(AddrSpace: AS);
2229 break;
2230 case Type::MemberPointer: {
2231 const auto *MPT = cast<MemberPointerType>(T);
2232 CXXABI::MemberPointerInfo MPI = ABI->getMemberPointerInfo(MPT: MPT);
2233 Width = MPI.Width;
2234 Align = MPI.Align;
2235 break;
2236 }
2237 case Type::Complex: {
2238 // Complex types have the same alignment as their elements, but twice the
2239 // size.
2240 TypeInfo EltInfo = getTypeInfo(cast<ComplexType>(T)->getElementType());
2241 Width = EltInfo.Width * 2;
2242 Align = EltInfo.Align;
2243 break;
2244 }
2245 case Type::ObjCObject:
2246 return getTypeInfo(cast<ObjCObjectType>(T)->getBaseType().getTypePtr());
2247 case Type::Adjusted:
2248 case Type::Decayed:
2249 return getTypeInfo(cast<AdjustedType>(T)->getAdjustedType().getTypePtr());
2250 case Type::ObjCInterface: {
2251 const auto *ObjCI = cast<ObjCInterfaceType>(T);
2252 if (ObjCI->getDecl()->isInvalidDecl()) {
2253 Width = 8;
2254 Align = 8;
2255 break;
2256 }
2257 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(D: ObjCI->getDecl());
2258 Width = toBits(CharSize: Layout.getSize());
2259 Align = toBits(CharSize: Layout.getAlignment());
2260 break;
2261 }
2262 case Type::BitInt: {
2263 const auto *EIT = cast<BitIntType>(T);
2264 Align = std::clamp<unsigned>(llvm::PowerOf2Ceil(A: EIT->getNumBits()),
2265 getCharWidth(), Target->getLongLongAlign());
2266 Width = llvm::alignTo(EIT->getNumBits(), Align);
2267 break;
2268 }
2269 case Type::Record:
2270 case Type::Enum: {
2271 const auto *TT = cast<TagType>(T);
2272
2273 if (TT->getDecl()->isInvalidDecl()) {
2274 Width = 8;
2275 Align = 8;
2276 break;
2277 }
2278
2279 if (const auto *ET = dyn_cast<EnumType>(TT)) {
2280 const EnumDecl *ED = ET->getDecl();
2281 TypeInfo Info =
2282 getTypeInfo(T: ED->getIntegerType()->getUnqualifiedDesugaredType());
2283 if (unsigned AttrAlign = ED->getMaxAlignment()) {
2284 Info.Align = AttrAlign;
2285 Info.AlignRequirement = AlignRequirementKind::RequiredByEnum;
2286 }
2287 return Info;
2288 }
2289
2290 const auto *RT = cast<RecordType>(TT);
2291 const RecordDecl *RD = RT->getDecl();
2292 const ASTRecordLayout &Layout = getASTRecordLayout(D: RD);
2293 Width = toBits(CharSize: Layout.getSize());
2294 Align = toBits(CharSize: Layout.getAlignment());
2295 AlignRequirement = RD->hasAttr<AlignedAttr>()
2296 ? AlignRequirementKind::RequiredByRecord
2297 : AlignRequirementKind::None;
2298 break;
2299 }
2300
2301 case Type::SubstTemplateTypeParm:
2302 return getTypeInfo(cast<SubstTemplateTypeParmType>(T)->
2303 getReplacementType().getTypePtr());
2304
2305 case Type::Auto:
2306 case Type::DeducedTemplateSpecialization: {
2307 const auto *A = cast<DeducedType>(T);
2308 assert(!A->getDeducedType().isNull() &&
2309 "cannot request the size of an undeduced or dependent auto type");
2310 return getTypeInfo(A->getDeducedType().getTypePtr());
2311 }
2312
2313 case Type::Paren:
2314 return getTypeInfo(cast<ParenType>(T)->getInnerType().getTypePtr());
2315
2316 case Type::MacroQualified:
2317 return getTypeInfo(
2318 cast<MacroQualifiedType>(T)->getUnderlyingType().getTypePtr());
2319
2320 case Type::ObjCTypeParam:
2321 return getTypeInfo(cast<ObjCTypeParamType>(T)->desugar().getTypePtr());
2322
2323 case Type::Using:
2324 return getTypeInfo(cast<UsingType>(T)->desugar().getTypePtr());
2325
2326 case Type::Typedef: {
2327 const auto *TT = cast<TypedefType>(T);
2328 TypeInfo Info = getTypeInfo(TT->desugar().getTypePtr());
2329 // If the typedef has an aligned attribute on it, it overrides any computed
2330 // alignment we have. This violates the GCC documentation (which says that
2331 // attribute(aligned) can only round up) but matches its implementation.
2332 if (unsigned AttrAlign = TT->getDecl()->getMaxAlignment()) {
2333 Align = AttrAlign;
2334 AlignRequirement = AlignRequirementKind::RequiredByTypedef;
2335 } else {
2336 Align = Info.Align;
2337 AlignRequirement = Info.AlignRequirement;
2338 }
2339 Width = Info.Width;
2340 break;
2341 }
2342
2343 case Type::Elaborated:
2344 return getTypeInfo(cast<ElaboratedType>(T)->getNamedType().getTypePtr());
2345
2346 case Type::Attributed:
2347 return getTypeInfo(
2348 cast<AttributedType>(T)->getEquivalentType().getTypePtr());
2349
2350 case Type::BTFTagAttributed:
2351 return getTypeInfo(
2352 cast<BTFTagAttributedType>(T)->getWrappedType().getTypePtr());
2353
2354 case Type::Atomic: {
2355 // Start with the base type information.
2356 TypeInfo Info = getTypeInfo(cast<AtomicType>(T)->getValueType());
2357 Width = Info.Width;
2358 Align = Info.Align;
2359
2360 if (!Width) {
2361 // An otherwise zero-sized type should still generate an
2362 // atomic operation.
2363 Width = Target->getCharWidth();
2364 assert(Align);
2365 } else if (Width <= Target->getMaxAtomicPromoteWidth()) {
2366 // If the size of the type doesn't exceed the platform's max
2367 // atomic promotion width, make the size and alignment more
2368 // favorable to atomic operations:
2369
2370 // Round the size up to a power of 2.
2371 Width = llvm::bit_ceil(Width);
2372
2373 // Set the alignment equal to the size.
2374 Align = static_cast<unsigned>(Width);
2375 }
2376 }
2377 break;
2378
2379 case Type::Pipe:
2380 Width = Target->getPointerWidth(AddrSpace: LangAS::opencl_global);
2381 Align = Target->getPointerAlign(AddrSpace: LangAS::opencl_global);
2382 break;
2383 }
2384
2385 assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2");
2386 return TypeInfo(Width, Align, AlignRequirement);
2387}
2388
2389unsigned ASTContext::getTypeUnadjustedAlign(const Type *T) const {
2390 UnadjustedAlignMap::iterator I = MemoizedUnadjustedAlign.find(Val: T);
2391 if (I != MemoizedUnadjustedAlign.end())
2392 return I->second;
2393
2394 unsigned UnadjustedAlign;
2395 if (const auto *RT = T->getAs<RecordType>()) {
2396 const RecordDecl *RD = RT->getDecl();
2397 const ASTRecordLayout &Layout = getASTRecordLayout(D: RD);
2398 UnadjustedAlign = toBits(CharSize: Layout.getUnadjustedAlignment());
2399 } else if (const auto *ObjCI = T->getAs<ObjCInterfaceType>()) {
2400 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(D: ObjCI->getDecl());
2401 UnadjustedAlign = toBits(CharSize: Layout.getUnadjustedAlignment());
2402 } else {
2403 UnadjustedAlign = getTypeAlign(T: T->getUnqualifiedDesugaredType());
2404 }
2405
2406 MemoizedUnadjustedAlign[T] = UnadjustedAlign;
2407 return UnadjustedAlign;
2408}
2409
2410unsigned ASTContext::getOpenMPDefaultSimdAlign(QualType T) const {
2411 unsigned SimdAlign = llvm::OpenMPIRBuilder::getOpenMPDefaultSimdAlign(
2412 TargetTriple: getTargetInfo().getTriple(), Features: Target->getTargetOpts().FeatureMap);
2413 return SimdAlign;
2414}
2415
2416/// toCharUnitsFromBits - Convert a size in bits to a size in characters.
2417CharUnits ASTContext::toCharUnitsFromBits(int64_t BitSize) const {
2418 return CharUnits::fromQuantity(Quantity: BitSize / getCharWidth());
2419}
2420
2421/// toBits - Convert a size in characters to a size in characters.
2422int64_t ASTContext::toBits(CharUnits CharSize) const {
2423 return CharSize.getQuantity() * getCharWidth();
2424}
2425
2426/// getTypeSizeInChars - Return the size of the specified type, in characters.
2427/// This method does not work on incomplete types.
2428CharUnits ASTContext::getTypeSizeInChars(QualType T) const {
2429 return getTypeInfoInChars(T).Width;
2430}
2431CharUnits ASTContext::getTypeSizeInChars(const Type *T) const {
2432 return getTypeInfoInChars(T).Width;
2433}
2434
2435/// getTypeAlignInChars - Return the ABI-specified alignment of a type, in
2436/// characters. This method does not work on incomplete types.
2437CharUnits ASTContext::getTypeAlignInChars(QualType T) const {
2438 return toCharUnitsFromBits(BitSize: getTypeAlign(T));
2439}
2440CharUnits ASTContext::getTypeAlignInChars(const Type *T) const {
2441 return toCharUnitsFromBits(BitSize: getTypeAlign(T));
2442}
2443
2444/// getTypeUnadjustedAlignInChars - Return the ABI-specified alignment of a
2445/// type, in characters, before alignment adjustments. This method does
2446/// not work on incomplete types.
2447CharUnits ASTContext::getTypeUnadjustedAlignInChars(QualType T) const {
2448 return toCharUnitsFromBits(BitSize: getTypeUnadjustedAlign(T));
2449}
2450CharUnits ASTContext::getTypeUnadjustedAlignInChars(const Type *T) const {
2451 return toCharUnitsFromBits(BitSize: getTypeUnadjustedAlign(T));
2452}
2453
2454/// getPreferredTypeAlign - Return the "preferred" alignment of the specified
2455/// type for the current target in bits. This can be different than the ABI
2456/// alignment in cases where it is beneficial for performance or backwards
2457/// compatibility preserving to overalign a data type. (Note: despite the name,
2458/// the preferred alignment is ABI-impacting, and not an optimization.)
2459unsigned ASTContext::getPreferredTypeAlign(const Type *T) const {
2460 TypeInfo TI = getTypeInfo(T);
2461 unsigned ABIAlign = TI.Align;
2462
2463 T = T->getBaseElementTypeUnsafe();
2464
2465 // The preferred alignment of member pointers is that of a pointer.
2466 if (T->isMemberPointerType())
2467 return getPreferredTypeAlign(T: getPointerDiffType().getTypePtr());
2468
2469 if (!Target->allowsLargerPreferedTypeAlignment())
2470 return ABIAlign;
2471
2472 if (const auto *RT = T->getAs<RecordType>()) {
2473 const RecordDecl *RD = RT->getDecl();
2474
2475 // When used as part of a typedef, or together with a 'packed' attribute,
2476 // the 'aligned' attribute can be used to decrease alignment. Note that the
2477 // 'packed' case is already taken into consideration when computing the
2478 // alignment, we only need to handle the typedef case here.
2479 if (TI.AlignRequirement == AlignRequirementKind::RequiredByTypedef ||
2480 RD->isInvalidDecl())
2481 return ABIAlign;
2482
2483 unsigned PreferredAlign = static_cast<unsigned>(
2484 toBits(CharSize: getASTRecordLayout(D: RD).PreferredAlignment));
2485 assert(PreferredAlign >= ABIAlign &&
2486 "PreferredAlign should be at least as large as ABIAlign.");
2487 return PreferredAlign;
2488 }
2489
2490 // Double (and, for targets supporting AIX `power` alignment, long double) and
2491 // long long should be naturally aligned (despite requiring less alignment) if
2492 // possible.
2493 if (const auto *CT = T->getAs<ComplexType>())
2494 T = CT->getElementType().getTypePtr();
2495 if (const auto *ET = T->getAs<EnumType>())
2496 T = ET->getDecl()->getIntegerType().getTypePtr();
2497 if (T->isSpecificBuiltinType(K: BuiltinType::Double) ||
2498 T->isSpecificBuiltinType(K: BuiltinType::LongLong) ||
2499 T->isSpecificBuiltinType(K: BuiltinType::ULongLong) ||
2500 (T->isSpecificBuiltinType(K: BuiltinType::LongDouble) &&
2501 Target->defaultsToAIXPowerAlignment()))
2502 // Don't increase the alignment if an alignment attribute was specified on a
2503 // typedef declaration.
2504 if (!TI.isAlignRequired())
2505 return std::max(a: ABIAlign, b: (unsigned)getTypeSize(T));
2506
2507 return ABIAlign;
2508}
2509
2510/// getTargetDefaultAlignForAttributeAligned - Return the default alignment
2511/// for __attribute__((aligned)) on this target, to be used if no alignment
2512/// value is specified.
2513unsigned ASTContext::getTargetDefaultAlignForAttributeAligned() const {
2514 return getTargetInfo().getDefaultAlignForAttributeAligned();
2515}
2516
2517/// getAlignOfGlobalVar - Return the alignment in bits that should be given
2518/// to a global variable of the specified type.
2519unsigned ASTContext::getAlignOfGlobalVar(QualType T, const VarDecl *VD) const {
2520 uint64_t TypeSize = getTypeSize(T: T.getTypePtr());
2521 return std::max(a: getPreferredTypeAlign(T),
2522 b: getMinGlobalAlignOfVar(Size: TypeSize, VD));
2523}
2524
2525/// getAlignOfGlobalVarInChars - Return the alignment in characters that
2526/// should be given to a global variable of the specified type.
2527CharUnits ASTContext::getAlignOfGlobalVarInChars(QualType T,
2528 const VarDecl *VD) const {
2529 return toCharUnitsFromBits(BitSize: getAlignOfGlobalVar(T, VD));
2530}
2531
2532unsigned ASTContext::getMinGlobalAlignOfVar(uint64_t Size,
2533 const VarDecl *VD) const {
2534 // Make the default handling as that of a non-weak definition in the
2535 // current translation unit.
2536 bool HasNonWeakDef = !VD || (VD->hasDefinition() && !VD->isWeak());
2537 return getTargetInfo().getMinGlobalAlign(Size, HasNonWeakDef);
2538}
2539
2540CharUnits ASTContext::getOffsetOfBaseWithVBPtr(const CXXRecordDecl *RD) const {
2541 CharUnits Offset = CharUnits::Zero();
2542 const ASTRecordLayout *Layout = &getASTRecordLayout(RD);
2543 while (const CXXRecordDecl *Base = Layout->getBaseSharingVBPtr()) {
2544 Offset += Layout->getBaseClassOffset(Base);
2545 Layout = &getASTRecordLayout(Base);
2546 }
2547 return Offset;
2548}
2549
2550CharUnits ASTContext::getMemberPointerPathAdjustment(const APValue &MP) const {
2551 const ValueDecl *MPD = MP.getMemberPointerDecl();
2552 CharUnits ThisAdjustment = CharUnits::Zero();
2553 ArrayRef<const CXXRecordDecl*> Path = MP.getMemberPointerPath();
2554 bool DerivedMember = MP.isMemberPointerToDerivedMember();
2555 const CXXRecordDecl *RD = cast<CXXRecordDecl>(MPD->getDeclContext());
2556 for (unsigned I = 0, N = Path.size(); I != N; ++I) {
2557 const CXXRecordDecl *Base = RD;
2558 const CXXRecordDecl *Derived = Path[I];
2559 if (DerivedMember)
2560 std::swap(a&: Base, b&: Derived);
2561 ThisAdjustment += getASTRecordLayout(Derived).getBaseClassOffset(Base);
2562 RD = Path[I];
2563 }
2564 if (DerivedMember)
2565 ThisAdjustment = -ThisAdjustment;
2566 return ThisAdjustment;
2567}
2568
2569/// DeepCollectObjCIvars -
2570/// This routine first collects all declared, but not synthesized, ivars in
2571/// super class and then collects all ivars, including those synthesized for
2572/// current class. This routine is used for implementation of current class
2573/// when all ivars, declared and synthesized are known.
2574void ASTContext::DeepCollectObjCIvars(const ObjCInterfaceDecl *OI,
2575 bool leafClass,
2576 SmallVectorImpl<const ObjCIvarDecl*> &Ivars) const {
2577 if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass())
2578 DeepCollectObjCIvars(OI: SuperClass, leafClass: false, Ivars);
2579 if (!leafClass) {
2580 llvm::append_range(C&: Ivars, R: OI->ivars());
2581 } else {
2582 auto *IDecl = const_cast<ObjCInterfaceDecl *>(OI);
2583 for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv;
2584 Iv= Iv->getNextIvar())
2585 Ivars.push_back(Elt: Iv);
2586 }
2587}
2588
2589/// CollectInheritedProtocols - Collect all protocols in current class and
2590/// those inherited by it.
2591void ASTContext::CollectInheritedProtocols(const Decl *CDecl,
2592 llvm::SmallPtrSet<ObjCProtocolDecl*, 8> &Protocols) {
2593 if (const auto *OI = dyn_cast<ObjCInterfaceDecl>(Val: CDecl)) {
2594 // We can use protocol_iterator here instead of
2595 // all_referenced_protocol_iterator since we are walking all categories.
2596 for (auto *Proto : OI->all_referenced_protocols()) {
2597 CollectInheritedProtocols(Proto, Protocols);
2598 }
2599
2600 // Categories of this Interface.
2601 for (const auto *Cat : OI->visible_categories())
2602 CollectInheritedProtocols(Cat, Protocols);
2603
2604 if (ObjCInterfaceDecl *SD = OI->getSuperClass())
2605 while (SD) {
2606 CollectInheritedProtocols(SD, Protocols);
2607 SD = SD->getSuperClass();
2608 }
2609 } else if (const auto *OC = dyn_cast<ObjCCategoryDecl>(Val: CDecl)) {
2610 for (auto *Proto : OC->protocols()) {
2611 CollectInheritedProtocols(Proto, Protocols);
2612 }
2613 } else if (const auto *OP = dyn_cast<ObjCProtocolDecl>(Val: CDecl)) {
2614 // Insert the protocol.
2615 if (!Protocols.insert(
2616 Ptr: const_cast<ObjCProtocolDecl *>(OP->getCanonicalDecl())).second)
2617 return;
2618
2619 for (auto *Proto : OP->protocols())
2620 CollectInheritedProtocols(Proto, Protocols);
2621 }
2622}
2623
2624static bool unionHasUniqueObjectRepresentations(const ASTContext &Context,
2625 const RecordDecl *RD,
2626 bool CheckIfTriviallyCopyable) {
2627 assert(RD->isUnion() && "Must be union type");
2628 CharUnits UnionSize = Context.getTypeSizeInChars(RD->getTypeForDecl());
2629
2630 for (const auto *Field : RD->fields()) {
2631 if (!Context.hasUniqueObjectRepresentations(Ty: Field->getType(),
2632 CheckIfTriviallyCopyable))
2633 return false;
2634 CharUnits FieldSize = Context.getTypeSizeInChars(Field->getType());
2635 if (FieldSize != UnionSize)
2636 return false;
2637 }
2638 return !RD->field_empty();
2639}
2640
2641static int64_t getSubobjectOffset(const FieldDecl *Field,
2642 const ASTContext &Context,
2643 const clang::ASTRecordLayout & /*Layout*/) {
2644 return Context.getFieldOffset(Field);
2645}
2646
2647static int64_t getSubobjectOffset(const CXXRecordDecl *RD,
2648 const ASTContext &Context,
2649 const clang::ASTRecordLayout &Layout) {
2650 return Context.toBits(CharSize: Layout.getBaseClassOffset(Base: RD));
2651}
2652
2653static std::optional<int64_t>
2654structHasUniqueObjectRepresentations(const ASTContext &Context,
2655 const RecordDecl *RD,
2656 bool CheckIfTriviallyCopyable);
2657
2658static std::optional<int64_t>
2659getSubobjectSizeInBits(const FieldDecl *Field, const ASTContext &Context,
2660 bool CheckIfTriviallyCopyable) {
2661 if (Field->getType()->isRecordType()) {
2662 const RecordDecl *RD = Field->getType()->getAsRecordDecl();
2663 if (!RD->isUnion())
2664 return structHasUniqueObjectRepresentations(Context, RD,
2665 CheckIfTriviallyCopyable);
2666 }
2667
2668 // A _BitInt type may not be unique if it has padding bits
2669 // but if it is a bitfield the padding bits are not used.
2670 bool IsBitIntType = Field->getType()->isBitIntType();
2671 if (!Field->getType()->isReferenceType() && !IsBitIntType &&
2672 !Context.hasUniqueObjectRepresentations(Ty: Field->getType(),
2673 CheckIfTriviallyCopyable))
2674 return std::nullopt;
2675
2676 int64_t FieldSizeInBits =
2677 Context.toBits(CharSize: Context.getTypeSizeInChars(Field->getType()));
2678 if (Field->isBitField()) {
2679 // If we have explicit padding bits, they don't contribute bits
2680 // to the actual object representation, so return 0.
2681 if (Field->isUnnamedBitfield())
2682 return 0;
2683
2684 int64_t BitfieldSize = Field->getBitWidthValue(Ctx: Context);
2685 if (IsBitIntType) {
2686 if ((unsigned)BitfieldSize >
2687 cast<BitIntType>(Field->getType())->getNumBits())
2688 return std::nullopt;
2689 } else if (BitfieldSize > FieldSizeInBits) {
2690 return std::nullopt;
2691 }
2692 FieldSizeInBits = BitfieldSize;
2693 } else if (IsBitIntType && !Context.hasUniqueObjectRepresentations(
2694 Ty: Field->getType(), CheckIfTriviallyCopyable)) {
2695 return std::nullopt;
2696 }
2697 return FieldSizeInBits;
2698}
2699
2700static std::optional<int64_t>
2701getSubobjectSizeInBits(const CXXRecordDecl *RD, const ASTContext &Context,
2702 bool CheckIfTriviallyCopyable) {
2703 return structHasUniqueObjectRepresentations(Context, RD,
2704 CheckIfTriviallyCopyable);
2705}
2706
2707template <typename RangeT>
2708static std::optional<int64_t> structSubobjectsHaveUniqueObjectRepresentations(
2709 const RangeT &Subobjects, int64_t CurOffsetInBits,
2710 const ASTContext &Context, const clang::ASTRecordLayout &Layout,
2711 bool CheckIfTriviallyCopyable) {
2712 for (const auto *Subobject : Subobjects) {
2713 std::optional<int64_t> SizeInBits =
2714 getSubobjectSizeInBits(Subobject, Context, CheckIfTriviallyCopyable);
2715 if (!SizeInBits)
2716 return std::nullopt;
2717 if (*SizeInBits != 0) {
2718 int64_t Offset = getSubobjectOffset(Subobject, Context, Layout);
2719 if (Offset != CurOffsetInBits)
2720 return std::nullopt;
2721 CurOffsetInBits += *SizeInBits;
2722 }
2723 }
2724 return CurOffsetInBits;
2725}
2726
2727static std::optional<int64_t>
2728structHasUniqueObjectRepresentations(const ASTContext &Context,
2729 const RecordDecl *RD,
2730 bool CheckIfTriviallyCopyable) {
2731 assert(!RD->isUnion() && "Must be struct/class type");
2732 const auto &Layout = Context.getASTRecordLayout(D: RD);
2733
2734 int64_t CurOffsetInBits = 0;
2735 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(Val: RD)) {
2736 if (ClassDecl->isDynamicClass())
2737 return std::nullopt;
2738
2739 SmallVector<CXXRecordDecl *, 4> Bases;
2740 for (const auto &Base : ClassDecl->bases()) {
2741 // Empty types can be inherited from, and non-empty types can potentially
2742 // have tail padding, so just make sure there isn't an error.
2743 Bases.emplace_back(Args: Base.getType()->getAsCXXRecordDecl());
2744 }
2745
2746 llvm::sort(C&: Bases, Comp: [&](const CXXRecordDecl *L, const CXXRecordDecl *R) {
2747 return Layout.getBaseClassOffset(Base: L) < Layout.getBaseClassOffset(Base: R);
2748 });
2749
2750 std::optional<int64_t> OffsetAfterBases =
2751 structSubobjectsHaveUniqueObjectRepresentations(
2752 Subobjects: Bases, CurOffsetInBits, Context, Layout, CheckIfTriviallyCopyable);
2753 if (!OffsetAfterBases)
2754 return std::nullopt;
2755 CurOffsetInBits = *OffsetAfterBases;
2756 }
2757
2758 std::optional<int64_t> OffsetAfterFields =
2759 structSubobjectsHaveUniqueObjectRepresentations(
2760 Subobjects: RD->fields(), CurOffsetInBits, Context, Layout,
2761 CheckIfTriviallyCopyable);
2762 if (!OffsetAfterFields)
2763 return std::nullopt;
2764 CurOffsetInBits = *OffsetAfterFields;
2765
2766 return CurOffsetInBits;
2767}
2768
2769bool ASTContext::hasUniqueObjectRepresentations(
2770 QualType Ty, bool CheckIfTriviallyCopyable) const {
2771 // C++17 [meta.unary.prop]:
2772 // The predicate condition for a template specialization
2773 // has_unique_object_representations<T> shall be satisfied if and only if:
2774 // (9.1) - T is trivially copyable, and
2775 // (9.2) - any two objects of type T with the same value have the same
2776 // object representation, where:
2777 // - two objects of array or non-union class type are considered to have
2778 // the same value if their respective sequences of direct subobjects
2779 // have the same values, and
2780 // - two objects of union type are considered to have the same value if
2781 // they have the same active member and the corresponding members have
2782 // the same value.
2783 // The set of scalar types for which this condition holds is
2784 // implementation-defined. [ Note: If a type has padding bits, the condition
2785 // does not hold; otherwise, the condition holds true for unsigned integral
2786 // types. -- end note ]
2787 assert(!Ty.isNull() && "Null QualType sent to unique object rep check");
2788
2789 // Arrays are unique only if their element type is unique.
2790 if (Ty->isArrayType())
2791 return hasUniqueObjectRepresentations(Ty: getBaseElementType(QT: Ty),
2792 CheckIfTriviallyCopyable);
2793
2794 // (9.1) - T is trivially copyable...
2795 if (CheckIfTriviallyCopyable && !Ty.isTriviallyCopyableType(Context: *this))
2796 return false;
2797
2798 // All integrals and enums are unique.
2799 if (Ty->isIntegralOrEnumerationType()) {
2800 // Except _BitInt types that have padding bits.
2801 if (const auto *BIT = Ty->getAs<BitIntType>())
2802 return getTypeSize(BIT) == BIT->getNumBits();
2803
2804 return true;
2805 }
2806
2807 // All other pointers are unique.
2808 if (Ty->isPointerType())
2809 return true;
2810
2811 if (const auto *MPT = Ty->getAs<MemberPointerType>())
2812 return !ABI->getMemberPointerInfo(MPT).HasPadding;
2813
2814 if (Ty->isRecordType()) {
2815 const RecordDecl *Record = Ty->castAs<RecordType>()->getDecl();
2816
2817 if (Record->isInvalidDecl())
2818 return false;
2819
2820 if (Record->isUnion())
2821 return unionHasUniqueObjectRepresentations(Context: *this, RD: Record,
2822 CheckIfTriviallyCopyable);
2823
2824 std::optional<int64_t> StructSize = structHasUniqueObjectRepresentations(
2825 Context: *this, RD: Record, CheckIfTriviallyCopyable);
2826
2827 return StructSize && *StructSize == static_cast<int64_t>(getTypeSize(T: Ty));
2828 }
2829
2830 // FIXME: More cases to handle here (list by rsmith):
2831 // vectors (careful about, eg, vector of 3 foo)
2832 // _Complex int and friends
2833 // _Atomic T
2834 // Obj-C block pointers
2835 // Obj-C object pointers
2836 // and perhaps OpenCL's various builtin types (pipe, sampler_t, event_t,
2837 // clk_event_t, queue_t, reserve_id_t)
2838 // There're also Obj-C class types and the Obj-C selector type, but I think it
2839 // makes sense for those to return false here.
2840
2841 return false;
2842}
2843
2844unsigned ASTContext::CountNonClassIvars(const ObjCInterfaceDecl *OI) const {
2845 unsigned count = 0;
2846 // Count ivars declared in class extension.
2847 for (const auto *Ext : OI->known_extensions())
2848 count += Ext->ivar_size();
2849
2850 // Count ivar defined in this class's implementation. This
2851 // includes synthesized ivars.
2852 if (ObjCImplementationDecl *ImplDecl = OI->getImplementation())
2853 count += ImplDecl->ivar_size();
2854
2855 return count;
2856}
2857
2858bool ASTContext::isSentinelNullExpr(const Expr *E) {
2859 if (!E)
2860 return false;
2861
2862 // nullptr_t is always treated as null.
2863 if (E->getType()->isNullPtrType()) return true;
2864
2865 if (E->getType()->isAnyPointerType() &&
2866 E->IgnoreParenCasts()->isNullPointerConstant(Ctx&: *this,
2867 NPC: Expr::NPC_ValueDependentIsNull))
2868 return true;
2869
2870 // Unfortunately, __null has type 'int'.
2871 if (isa<GNUNullExpr>(Val: E)) return true;
2872
2873 return false;
2874}
2875
2876/// Get the implementation of ObjCInterfaceDecl, or nullptr if none
2877/// exists.
2878ObjCImplementationDecl *ASTContext::getObjCImplementation(ObjCInterfaceDecl *D) {
2879 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator
2880 I = ObjCImpls.find(D);
2881 if (I != ObjCImpls.end())
2882 return cast<ObjCImplementationDecl>(Val: I->second);
2883 return nullptr;
2884}
2885
2886/// Get the implementation of ObjCCategoryDecl, or nullptr if none
2887/// exists.
2888ObjCCategoryImplDecl *ASTContext::getObjCImplementation(ObjCCategoryDecl *D) {
2889 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator
2890 I = ObjCImpls.find(D);
2891 if (I != ObjCImpls.end())
2892 return cast<ObjCCategoryImplDecl>(Val: I->second);
2893 return nullptr;
2894}
2895
2896/// Set the implementation of ObjCInterfaceDecl.
2897void ASTContext::setObjCImplementation(ObjCInterfaceDecl *IFaceD,
2898 ObjCImplementationDecl *ImplD) {
2899 assert(IFaceD && ImplD && "Passed null params");
2900 ObjCImpls[IFaceD] = ImplD;
2901}
2902
2903/// Set the implementation of ObjCCategoryDecl.
2904void ASTContext::setObjCImplementation(ObjCCategoryDecl *CatD,
2905 ObjCCategoryImplDecl *ImplD) {
2906 assert(CatD && ImplD && "Passed null params");
2907 ObjCImpls[CatD] = ImplD;
2908}
2909
2910const ObjCMethodDecl *
2911ASTContext::getObjCMethodRedeclaration(const ObjCMethodDecl *MD) const {
2912 return ObjCMethodRedecls.lookup(Val: MD);
2913}
2914
2915void ASTContext::setObjCMethodRedeclaration(const ObjCMethodDecl *MD,
2916 const ObjCMethodDecl *Redecl) {
2917 assert(!getObjCMethodRedeclaration(MD) && "MD already has a redeclaration");
2918 ObjCMethodRedecls[MD] = Redecl;
2919}
2920
2921const ObjCInterfaceDecl *ASTContext::getObjContainingInterface(
2922 const NamedDecl *ND) const {
2923 if (const auto *ID = dyn_cast<ObjCInterfaceDecl>(ND->getDeclContext()))
2924 return ID;
2925 if (const auto *CD = dyn_cast<ObjCCategoryDecl>(ND->getDeclContext()))
2926 return CD->getClassInterface();
2927 if (const auto *IMD = dyn_cast<ObjCImplDecl>(ND->getDeclContext()))
2928 return IMD->getClassInterface();
2929
2930 return nullptr;
2931}
2932
2933/// Get the copy initialization expression of VarDecl, or nullptr if
2934/// none exists.
2935BlockVarCopyInit ASTContext::getBlockVarCopyInit(const VarDecl *VD) const {
2936 assert(VD && "Passed null params");
2937 assert(VD->hasAttr<BlocksAttr>() &&
2938 "getBlockVarCopyInits - not __block var");
2939 auto I = BlockVarCopyInits.find(Val: VD);
2940 if (I != BlockVarCopyInits.end())
2941 return I->second;
2942 return {nullptr, false};
2943}
2944
2945/// Set the copy initialization expression of a block var decl.
2946void ASTContext::setBlockVarCopyInit(const VarDecl*VD, Expr *CopyExpr,
2947 bool CanThrow) {
2948 assert(VD && CopyExpr && "Passed null params");
2949 assert(VD->hasAttr<BlocksAttr>() &&
2950 "setBlockVarCopyInits - not __block var");
2951 BlockVarCopyInits[VD].setExprAndFlag(CopyExpr, CanThrow);
2952}
2953
2954TypeSourceInfo *ASTContext::CreateTypeSourceInfo(QualType T,
2955 unsigned DataSize) const {
2956 if (!DataSize)
2957 DataSize = TypeLoc::getFullDataSizeForType(Ty: T);
2958 else
2959 assert(DataSize == TypeLoc::getFullDataSizeForType(T) &&
2960 "incorrect data size provided to CreateTypeSourceInfo!");
2961
2962 auto *TInfo =
2963 (TypeSourceInfo*)BumpAlloc.Allocate(Size: sizeof(TypeSourceInfo) + DataSize, Alignment: 8);
2964 new (TInfo) TypeSourceInfo(T, DataSize);
2965 return TInfo;
2966}
2967
2968TypeSourceInfo *ASTContext::getTrivialTypeSourceInfo(QualType T,
2969 SourceLocation L) const {
2970 TypeSourceInfo *DI = CreateTypeSourceInfo(T);
2971 DI->getTypeLoc().initialize(Context&: const_cast<ASTContext &>(*this), Loc: L);
2972 return DI;
2973}
2974
2975const ASTRecordLayout &
2976ASTContext::getASTObjCInterfaceLayout(const ObjCInterfaceDecl *D) const {
2977 return getObjCLayout(D, Impl: nullptr);
2978}
2979
2980const ASTRecordLayout &
2981ASTContext::getASTObjCImplementationLayout(
2982 const ObjCImplementationDecl *D) const {
2983 return getObjCLayout(D: D->getClassInterface(), Impl: D);
2984}
2985
2986static auto getCanonicalTemplateArguments(const ASTContext &C,
2987 ArrayRef<TemplateArgument> Args,
2988 bool &AnyNonCanonArgs) {
2989 SmallVector<TemplateArgument, 16> CanonArgs(Args);
2990 for (auto &Arg : CanonArgs) {
2991 TemplateArgument OrigArg = Arg;
2992 Arg = C.getCanonicalTemplateArgument(Arg);
2993 AnyNonCanonArgs |= !Arg.structurallyEquals(Other: OrigArg);
2994 }
2995 return CanonArgs;
2996}
2997
2998//===----------------------------------------------------------------------===//
2999// Type creation/memoization methods
3000//===----------------------------------------------------------------------===//
3001
3002QualType
3003ASTContext::getExtQualType(const Type *baseType, Qualifiers quals) const {
3004 unsigned fastQuals = quals.getFastQualifiers();
3005 quals.removeFastQualifiers();
3006
3007 // Check if we've already instantiated this type.
3008 llvm::FoldingSetNodeID ID;
3009 ExtQuals::Profile(ID, BaseType: baseType, Quals: quals);
3010 void *insertPos = nullptr;
3011 if (ExtQuals *eq = ExtQualNodes.FindNodeOrInsertPos(ID, InsertPos&: insertPos)) {
3012 assert(eq->getQualifiers() == quals);
3013 return QualType(eq, fastQuals);
3014 }
3015
3016 // If the base type is not canonical, make the appropriate canonical type.
3017 QualType canon;
3018 if (!baseType->isCanonicalUnqualified()) {
3019 SplitQualType canonSplit = baseType->getCanonicalTypeInternal().split();
3020 canonSplit.Quals.addConsistentQualifiers(qs: quals);
3021 canon = getExtQualType(baseType: canonSplit.Ty, quals: canonSplit.Quals);
3022
3023 // Re-find the insert position.
3024 (void) ExtQualNodes.FindNodeOrInsertPos(ID, InsertPos&: insertPos);
3025 }
3026
3027 auto *eq = new (*this, alignof(ExtQuals)) ExtQuals(baseType, canon, quals);
3028 ExtQualNodes.InsertNode(N: eq, InsertPos: insertPos);
3029 return QualType(eq, fastQuals);
3030}
3031
3032QualType ASTContext::getAddrSpaceQualType(QualType T,
3033 LangAS AddressSpace) const {
3034 QualType CanT = getCanonicalType(T);
3035 if (CanT.getAddressSpace() == AddressSpace)
3036 return T;
3037
3038 // If we are composing extended qualifiers together, merge together
3039 // into one ExtQuals node.
3040 QualifierCollector Quals;
3041 const Type *TypeNode = Quals.strip(type: T);
3042
3043 // If this type already has an address space specified, it cannot get
3044 // another one.
3045 assert(!Quals.hasAddressSpace() &&
3046 "Type cannot be in multiple addr spaces!");
3047 Quals.addAddressSpace(space: AddressSpace);
3048
3049 return getExtQualType(baseType: TypeNode, quals: Quals);
3050}
3051
3052QualType ASTContext::removeAddrSpaceQualType(QualType T) const {
3053 // If the type is not qualified with an address space, just return it
3054 // immediately.
3055 if (!T.hasAddressSpace())
3056 return T;
3057
3058 // If we are composing extended qualifiers together, merge together
3059 // into one ExtQuals node.
3060 QualifierCollector Quals;
3061 const Type *TypeNode;
3062
3063 while (T.hasAddressSpace()) {
3064 TypeNode = Quals.strip(type: T);
3065
3066 // If the type no longer has an address space after stripping qualifiers,
3067 // jump out.
3068 if (!QualType(TypeNode, 0).hasAddressSpace())
3069 break;
3070
3071 // There might be sugar in the way. Strip it and try again.
3072 T = T.getSingleStepDesugaredType(Context: *this);
3073 }
3074
3075 Quals.removeAddressSpace();
3076
3077 // Removal of the address space can mean there are no longer any
3078 // non-fast qualifiers, so creating an ExtQualType isn't possible (asserts)
3079 // or required.
3080 if (Quals.hasNonFastQualifiers())
3081 return getExtQualType(baseType: TypeNode, quals: Quals);
3082 else
3083 return QualType(TypeNode, Quals.getFastQualifiers());
3084}
3085
3086QualType ASTContext::getObjCGCQualType(QualType T,
3087 Qualifiers::GC GCAttr) const {
3088 QualType CanT = getCanonicalType(T);
3089 if (CanT.getObjCGCAttr() == GCAttr)
3090 return T;
3091
3092 if (const auto *ptr = T->getAs<PointerType>()) {
3093 QualType Pointee = ptr->getPointeeType();
3094 if (Pointee->isAnyPointerType()) {
3095 QualType ResultType = getObjCGCQualType(T: Pointee, GCAttr);
3096 return getPointerType(T: ResultType);
3097 }
3098 }
3099
3100 // If we are composing extended qualifiers together, merge together
3101 // into one ExtQuals node.
3102 QualifierCollector Quals;
3103 const Type *TypeNode = Quals.strip(type: T);
3104
3105 // If this type already has an ObjCGC specified, it cannot get
3106 // another one.
3107 assert(!Quals.hasObjCGCAttr() &&
3108 "Type cannot have multiple ObjCGCs!");
3109 Quals.addObjCGCAttr(type: GCAttr);
3110
3111 return getExtQualType(baseType: TypeNode, quals: Quals);
3112}
3113
3114QualType ASTContext::removePtrSizeAddrSpace(QualType T) const {
3115 if (const PointerType *Ptr = T->getAs<PointerType>()) {
3116 QualType Pointee = Ptr->getPointeeType();
3117 if (isPtrSizeAddressSpace(AS: Pointee.getAddressSpace())) {
3118 return getPointerType(T: removeAddrSpaceQualType(T: Pointee));
3119 }
3120 }
3121 return T;
3122}
3123
3124const FunctionType *ASTContext::adjustFunctionType(const FunctionType *T,
3125 FunctionType::ExtInfo Info) {
3126 if (T->getExtInfo() == Info)
3127 return T;
3128
3129 QualType Result;
3130 if (const auto *FNPT = dyn_cast<FunctionNoProtoType>(Val: T)) {
3131 Result = getFunctionNoProtoType(FNPT->getReturnType(), Info);
3132 } else {
3133 const auto *FPT = cast<FunctionProtoType>(Val: T);
3134 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
3135 EPI.ExtInfo = Info;
3136 Result = getFunctionType(ResultTy: FPT->getReturnType(), Args: FPT->getParamTypes(), EPI);
3137 }
3138
3139 return cast<FunctionType>(Val: Result.getTypePtr());
3140}
3141
3142void ASTContext::adjustDeducedFunctionResultType(FunctionDecl *FD,
3143 QualType ResultType) {
3144 FD = FD->getMostRecentDecl();
3145 while (true) {
3146 const auto *FPT = FD->getType()->castAs<FunctionProtoType>();
3147 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
3148 FD->setType(getFunctionType(ResultTy: ResultType, Args: FPT->getParamTypes(), EPI));
3149 if (FunctionDecl *Next = FD->getPreviousDecl())
3150 FD = Next;
3151 else
3152 break;
3153 }
3154 if (ASTMutationListener *L = getASTMutationListener())
3155 L->DeducedReturnType(FD, ReturnType: ResultType);
3156}
3157
3158/// Get a function type and produce the equivalent function type with the
3159/// specified exception specification. Type sugar that can be present on a
3160/// declaration of a function with an exception specification is permitted
3161/// and preserved. Other type sugar (for instance, typedefs) is not.
3162QualType ASTContext::getFunctionTypeWithExceptionSpec(
3163 QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI) const {
3164 // Might have some parens.
3165 if (const auto *PT = dyn_cast<ParenType>(Val&: Orig))
3166 return getParenType(
3167 NamedType: getFunctionTypeWithExceptionSpec(Orig: PT->getInnerType(), ESI));
3168
3169 // Might be wrapped in a macro qualified type.
3170 if (const auto *MQT = dyn_cast<MacroQualifiedType>(Val&: Orig))
3171 return getMacroQualifiedType(
3172 UnderlyingTy: getFunctionTypeWithExceptionSpec(Orig: MQT->getUnderlyingType(), ESI),
3173 MacroII: MQT->getMacroIdentifier());
3174
3175 // Might have a calling-convention attribute.
3176 if (const auto *AT = dyn_cast<AttributedType>(Val&: Orig))
3177 return getAttributedType(
3178 attrKind: AT->getAttrKind(),
3179 modifiedType: getFunctionTypeWithExceptionSpec(Orig: AT->getModifiedType(), ESI),
3180 equivalentType: getFunctionTypeWithExceptionSpec(Orig: AT->getEquivalentType(), ESI));
3181
3182 // Anything else must be a function type. Rebuild it with the new exception
3183 // specification.
3184 const auto *Proto = Orig->castAs<FunctionProtoType>();
3185 return getFunctionType(
3186 ResultTy: Proto->getReturnType(), Args: Proto->getParamTypes(),
3187 EPI: Proto->getExtProtoInfo().withExceptionSpec(ESI));
3188}
3189
3190bool ASTContext::hasSameFunctionTypeIgnoringExceptionSpec(QualType T,
3191 QualType U) const {
3192 return hasSameType(T1: T, T2: U) ||
3193 (getLangOpts().CPlusPlus17 &&
3194 hasSameType(T1: getFunctionTypeWithExceptionSpec(Orig: T, ESI: EST_None),
3195 T2: getFunctionTypeWithExceptionSpec(Orig: U, ESI: EST_None)));
3196}
3197
3198QualType ASTContext::getFunctionTypeWithoutPtrSizes(QualType T) {
3199 if (const auto *Proto = T->getAs<FunctionProtoType>()) {
3200 QualType RetTy = removePtrSizeAddrSpace(T: Proto->getReturnType());
3201 SmallVector<QualType, 16> Args(Proto->param_types().size());
3202 for (unsigned i = 0, n = Args.size(); i != n; ++i)
3203 Args[i] = removePtrSizeAddrSpace(T: Proto->param_types()[i]);
3204 return getFunctionType(ResultTy: RetTy, Args, EPI: Proto->getExtProtoInfo());
3205 }
3206
3207 if (const FunctionNoProtoType *Proto = T->getAs<FunctionNoProtoType>()) {
3208 QualType RetTy = removePtrSizeAddrSpace(T: Proto->getReturnType());
3209 return getFunctionNoProtoType(RetTy, Proto->getExtInfo());
3210 }
3211
3212 return T;
3213}
3214
3215bool ASTContext::hasSameFunctionTypeIgnoringPtrSizes(QualType T, QualType U) {
3216 return hasSameType(T1: T, T2: U) ||
3217 hasSameType(T1: getFunctionTypeWithoutPtrSizes(T),
3218 T2: getFunctionTypeWithoutPtrSizes(T: U));
3219}
3220
3221void ASTContext::adjustExceptionSpec(
3222 FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI,
3223 bool AsWritten) {
3224 // Update the type.
3225 QualType Updated =
3226 getFunctionTypeWithExceptionSpec(Orig: FD->getType(), ESI);
3227 FD->setType(Updated);
3228
3229 if (!AsWritten)
3230 return;
3231
3232 // Update the type in the type source information too.
3233 if (TypeSourceInfo *TSInfo = FD->getTypeSourceInfo()) {
3234 // If the type and the type-as-written differ, we may need to update
3235 // the type-as-written too.
3236 if (TSInfo->getType() != FD->getType())
3237 Updated = getFunctionTypeWithExceptionSpec(Orig: TSInfo->getType(), ESI);
3238
3239 // FIXME: When we get proper type location information for exceptions,
3240 // we'll also have to rebuild the TypeSourceInfo. For now, we just patch
3241 // up the TypeSourceInfo;
3242 assert(TypeLoc::getFullDataSizeForType(Updated) ==
3243 TypeLoc::getFullDataSizeForType(TSInfo->getType()) &&
3244 "TypeLoc size mismatch from updating exception specification");
3245 TSInfo->overrideType(T: Updated);
3246 }
3247}
3248
3249/// getComplexType - Return the uniqued reference to the type for a complex
3250/// number with the specified element type.
3251QualType ASTContext::getComplexType(QualType T) const {
3252 // Unique pointers, to guarantee there is only one pointer of a particular
3253 // structure.
3254 llvm::FoldingSetNodeID ID;
3255 ComplexType::Profile(ID, Element: T);
3256
3257 void *InsertPos = nullptr;
3258 if (ComplexType *CT = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos))
3259 return QualType(CT, 0);
3260
3261 // If the pointee type isn't canonical, this won't be a canonical type either,
3262 // so fill in the canonical type field.
3263 QualType Canonical;
3264 if (!T.isCanonical()) {
3265 Canonical = getComplexType(T: getCanonicalType(T));
3266
3267 // Get the new insert position for the node we care about.
3268 ComplexType *NewIP = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos);
3269 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3270 }
3271 auto *New = new (*this, alignof(ComplexType)) ComplexType(T, Canonical);
3272 Types.push_back(New);
3273 ComplexTypes.InsertNode(N: New, InsertPos);
3274 return QualType(New, 0);
3275}
3276
3277/// getPointerType - Return the uniqued reference to the type for a pointer to
3278/// the specified type.
3279QualType ASTContext::getPointerType(QualType T) const {
3280 // Unique pointers, to guarantee there is only one pointer of a particular
3281 // structure.
3282 llvm::FoldingSetNodeID ID;
3283 PointerType::Profile(ID, Pointee: T);
3284
3285 void *InsertPos = nullptr;
3286 if (PointerType *PT = PointerTypes.FindNodeOrInsertPos(ID, InsertPos))
3287 return QualType(PT, 0);
3288
3289 // If the pointee type isn't canonical, this won't be a canonical type either,
3290 // so fill in the canonical type field.
3291 QualType Canonical;
3292 if (!T.isCanonical()) {
3293 Canonical = getPointerType(T: getCanonicalType(T));
3294
3295 // Get the new insert position for the node we care about.
3296 PointerType *NewIP = PointerTypes.FindNodeOrInsertPos(ID, InsertPos);
3297 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3298 }
3299 auto *New = new (*this, alignof(PointerType)) PointerType(T, Canonical);
3300 Types.push_back(New);
3301 PointerTypes.InsertNode(N: New, InsertPos);
3302 return QualType(New, 0);
3303}
3304
3305QualType ASTContext::getAdjustedType(QualType Orig, QualType New) const {
3306 llvm::FoldingSetNodeID ID;
3307 AdjustedType::Profile(ID, Orig, New);
3308 void *InsertPos = nullptr;
3309 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3310 if (AT)
3311 return QualType(AT, 0);
3312
3313 QualType Canonical = getCanonicalType(T: New);
3314
3315 // Get the new insert position for the node we care about.
3316 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3317 assert(!AT && "Shouldn't be in the map!");
3318
3319 AT = new (*this, alignof(AdjustedType))
3320 AdjustedType(Type::Adjusted, Orig, New, Canonical);
3321 Types.push_back(AT);
3322 AdjustedTypes.InsertNode(N: AT, InsertPos);
3323 return QualType(AT, 0);
3324}
3325
3326QualType ASTContext::getDecayedType(QualType Orig, QualType Decayed) const {
3327 llvm::FoldingSetNodeID ID;
3328 AdjustedType::Profile(ID, Orig, New: Decayed);
3329 void *InsertPos = nullptr;
3330 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3331 if (AT)
3332 return QualType(AT, 0);
3333
3334 QualType Canonical = getCanonicalType(T: Decayed);
3335
3336 // Get the new insert position for the node we care about.
3337 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos);
3338 assert(!AT && "Shouldn't be in the map!");
3339
3340 AT = new (*this, alignof(DecayedType)) DecayedType(Orig, Decayed, Canonical);
3341 Types.push_back(AT);
3342 AdjustedTypes.InsertNode(N: AT, InsertPos);
3343 return QualType(AT, 0);
3344}
3345
3346QualType ASTContext::getDecayedType(QualType T) const {
3347 assert((T->isArrayType() || T->isFunctionType()) && "T does not decay");
3348
3349 QualType Decayed;
3350
3351 // C99 6.7.5.3p7:
3352 // A declaration of a parameter as "array of type" shall be
3353 // adjusted to "qualified pointer to type", where the type
3354 // qualifiers (if any) are those specified within the [ and ] of
3355 // the array type derivation.
3356 if (T->isArrayType())
3357 Decayed = getArrayDecayedType(T);
3358
3359 // C99 6.7.5.3p8:
3360 // A declaration of a parameter as "function returning type"
3361 // shall be adjusted to "pointer to function returning type", as
3362 // in 6.3.2.1.
3363 if (T->isFunctionType())
3364 Decayed = getPointerType(T);
3365
3366 return getDecayedType(Orig: T, Decayed);
3367}
3368
3369/// getBlockPointerType - Return the uniqued reference to the type for
3370/// a pointer to the specified block.
3371QualType ASTContext::getBlockPointerType(QualType T) const {
3372 assert(T->isFunctionType() && "block of function types only");
3373 // Unique pointers, to guarantee there is only one block of a particular
3374 // structure.
3375 llvm::FoldingSetNodeID ID;
3376 BlockPointerType::Profile(ID, Pointee: T);
3377
3378 void *InsertPos = nullptr;
3379 if (BlockPointerType *PT =
3380 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
3381 return QualType(PT, 0);
3382
3383 // If the block pointee type isn't canonical, this won't be a canonical
3384 // type either so fill in the canonical type field.
3385 QualType Canonical;
3386 if (!T.isCanonical()) {
3387 Canonical = getBlockPointerType(T: getCanonicalType(T));
3388
3389 // Get the new insert position for the node we care about.
3390 BlockPointerType *NewIP =
3391 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
3392 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3393 }
3394 auto *New =
3395 new (*this, alignof(BlockPointerType)) BlockPointerType(T, Canonical);
3396 Types.push_back(New);
3397 BlockPointerTypes.InsertNode(N: New, InsertPos);
3398 return QualType(New, 0);
3399}
3400
3401/// getLValueReferenceType - Return the uniqued reference to the type for an
3402/// lvalue reference to the specified type.
3403QualType
3404ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const {
3405 assert((!T->isPlaceholderType() ||
3406 T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) &&
3407 "Unresolved placeholder type");
3408
3409 // Unique pointers, to guarantee there is only one pointer of a particular
3410 // structure.
3411 llvm::FoldingSetNodeID ID;
3412 ReferenceType::Profile(ID, Referencee: T, SpelledAsLValue);
3413
3414 void *InsertPos = nullptr;
3415 if (LValueReferenceType *RT =
3416 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos))
3417 return QualType(RT, 0);
3418
3419 const auto *InnerRef = T->getAs<ReferenceType>();
3420
3421 // If the referencee type isn't canonical, this won't be a canonical type
3422 // either, so fill in the canonical type field.
3423 QualType Canonical;
3424 if (!SpelledAsLValue || InnerRef || !T.isCanonical()) {
3425 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T);
3426 Canonical = getLValueReferenceType(T: getCanonicalType(T: PointeeType));
3427
3428 // Get the new insert position for the node we care about.
3429 LValueReferenceType *NewIP =
3430 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos);
3431 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3432 }
3433
3434 auto *New = new (*this, alignof(LValueReferenceType))
3435 LValueReferenceType(T, Canonical, SpelledAsLValue);
3436 Types.push_back(New);
3437 LValueReferenceTypes.InsertNode(N: New, InsertPos);
3438
3439 return QualType(New, 0);
3440}
3441
3442/// getRValueReferenceType - Return the uniqued reference to the type for an
3443/// rvalue reference to the specified type.
3444QualType ASTContext::getRValueReferenceType(QualType T) const {
3445 assert((!T->isPlaceholderType() ||
3446 T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) &&
3447 "Unresolved placeholder type");
3448
3449 // Unique pointers, to guarantee there is only one pointer of a particular
3450 // structure.
3451 llvm::FoldingSetNodeID ID;
3452 ReferenceType::Profile(ID, Referencee: T, SpelledAsLValue: false);
3453
3454 void *InsertPos = nullptr;
3455 if (RValueReferenceType *RT =
3456 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos))
3457 return QualType(RT, 0);
3458
3459 const auto *InnerRef = T->getAs<ReferenceType>();
3460
3461 // If the referencee type isn't canonical, this won't be a canonical type
3462 // either, so fill in the canonical type field.
3463 QualType Canonical;
3464 if (InnerRef || !T.isCanonical()) {
3465 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T);
3466 Canonical = getRValueReferenceType(T: getCanonicalType(T: PointeeType));
3467
3468 // Get the new insert position for the node we care about.
3469 RValueReferenceType *NewIP =
3470 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos);
3471 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3472 }
3473
3474 auto *New = new (*this, alignof(RValueReferenceType))
3475 RValueReferenceType(T, Canonical);
3476 Types.push_back(New);
3477 RValueReferenceTypes.InsertNode(N: New, InsertPos);
3478 return QualType(New, 0);
3479}
3480
3481/// getMemberPointerType - Return the uniqued reference to the type for a
3482/// member pointer to the specified type, in the specified class.
3483QualType ASTContext::getMemberPointerType(QualType T, const Type *Cls) const {
3484 // Unique pointers, to guarantee there is only one pointer of a particular
3485 // structure.
3486 llvm::FoldingSetNodeID ID;
3487 MemberPointerType::Profile(ID, Pointee: T, Class: Cls);
3488
3489 void *InsertPos = nullptr;
3490 if (MemberPointerType *PT =
3491 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
3492 return QualType(PT, 0);
3493
3494 // If the pointee or class type isn't canonical, this won't be a canonical
3495 // type either, so fill in the canonical type field.
3496 QualType Canonical;
3497 if (!T.isCanonical() || !Cls->isCanonicalUnqualified()) {
3498 Canonical = getMemberPointerType(T: getCanonicalType(T),Cls: getCanonicalType(T: Cls));
3499
3500 // Get the new insert position for the node we care about.
3501 MemberPointerType *NewIP =
3502 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
3503 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3504 }
3505 auto *New = new (*this, alignof(MemberPointerType))
3506 MemberPointerType(T, Cls, Canonical);
3507 Types.push_back(New);
3508 MemberPointerTypes.InsertNode(N: New, InsertPos);
3509 return QualType(New, 0);
3510}
3511
3512/// getConstantArrayType - Return the unique reference to the type for an
3513/// array of the specified element type.
3514QualType ASTContext::getConstantArrayType(QualType EltTy,
3515 const llvm::APInt &ArySizeIn,
3516 const Expr *SizeExpr,
3517 ArraySizeModifier ASM,
3518 unsigned IndexTypeQuals) const {
3519 assert((EltTy->isDependentType() ||
3520 EltTy->isIncompleteType() || EltTy->isConstantSizeType()) &&
3521 "Constant array of VLAs is illegal!");
3522
3523 // We only need the size as part of the type if it's instantiation-dependent.
3524 if (SizeExpr && !SizeExpr->isInstantiationDependent())
3525 SizeExpr = nullptr;
3526
3527 // Convert the array size into a canonical width matching the pointer size for
3528 // the target.
3529 llvm::APInt ArySize(ArySizeIn);
3530 ArySize = ArySize.zextOrTrunc(width: Target->getMaxPointerWidth());
3531
3532 llvm::FoldingSetNodeID ID;
3533 ConstantArrayType::Profile(ID, Ctx: *this, ET: EltTy, ArraySize: ArySize, SizeExpr, SizeMod: ASM,
3534 TypeQuals: IndexTypeQuals);
3535
3536 void *InsertPos = nullptr;
3537 if (ConstantArrayType *ATP =
3538 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos))
3539 return QualType(ATP, 0);
3540
3541 // If the element type isn't canonical or has qualifiers, or the array bound
3542 // is instantiation-dependent, this won't be a canonical type either, so fill
3543 // in the canonical type field.
3544 QualType Canon;
3545 // FIXME: Check below should look for qualifiers behind sugar.
3546 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers() || SizeExpr) {
3547 SplitQualType canonSplit = getCanonicalType(T: EltTy).split();
3548 Canon = getConstantArrayType(EltTy: QualType(canonSplit.Ty, 0), ArySizeIn: ArySize, SizeExpr: nullptr,
3549 ASM, IndexTypeQuals);
3550 Canon = getQualifiedType(T: Canon, Qs: canonSplit.Quals);
3551
3552 // Get the new insert position for the node we care about.
3553 ConstantArrayType *NewIP =
3554 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos);
3555 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
3556 }
3557
3558 void *Mem = Allocate(
3559 ConstantArrayType::totalSizeToAlloc<const Expr *>(SizeExpr ? 1 : 0),
3560 alignof(ConstantArrayType));
3561 auto *New = new (Mem)
3562 ConstantArrayType(EltTy, Canon, ArySize, SizeExpr, ASM, IndexTypeQuals);
3563 ConstantArrayTypes.InsertNode(N: New, InsertPos);
3564 Types.push_back(New);
3565 return QualType(New, 0);
3566}
3567
3568/// getVariableArrayDecayedType - Turns the given type, which may be
3569/// variably-modified, into the corresponding type with all the known
3570/// sizes replaced with [*].
3571QualType ASTContext::getVariableArrayDecayedType(QualType type) const {
3572 // Vastly most common case.
3573 if (!type->isVariablyModifiedType()) return type;
3574
3575 QualType result;
3576
3577 SplitQualType split = type.getSplitDesugaredType();
3578 const Type *ty = split.Ty;
3579 switch (ty->getTypeClass()) {
3580#define TYPE(Class, Base)
3581#define ABSTRACT_TYPE(Class, Base)
3582#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3583#include "clang/AST/TypeNodes.inc"
3584 llvm_unreachable("didn't desugar past all non-canonical types?");
3585
3586 // These types should never be variably-modified.
3587 case Type::Builtin:
3588 case Type::Complex:
3589 case Type::Vector:
3590 case Type::DependentVector:
3591 case Type::ExtVector:
3592 case Type::DependentSizedExtVector:
3593 case Type::ConstantMatrix:
3594 case Type::DependentSizedMatrix:
3595 case Type::DependentAddressSpace:
3596 case Type::ObjCObject:
3597 case Type::ObjCInterface:
3598 case Type::ObjCObjectPointer:
3599 case Type::Record:
3600 case Type::Enum:
3601 case Type::UnresolvedUsing:
3602 case Type::TypeOfExpr:
3603 case Type::TypeOf:
3604 case Type::Decltype:
3605 case Type::UnaryTransform:
3606 case Type::DependentName:
3607 case Type::InjectedClassName:
3608 case Type::TemplateSpecialization:
3609 case Type::DependentTemplateSpecialization:
3610 case Type::TemplateTypeParm:
3611 case Type::SubstTemplateTypeParmPack:
3612 case Type::Auto:
3613 case Type::DeducedTemplateSpecialization:
3614 case Type::PackExpansion:
3615 case Type::PackIndexing:
3616 case Type::BitInt:
3617 case Type::DependentBitInt:
3618 llvm_unreachable("type should never be variably-modified");
3619
3620 // These types can be variably-modified but should never need to
3621 // further decay.
3622 case Type::FunctionNoProto:
3623 case Type::FunctionProto:
3624 case Type::BlockPointer:
3625 case Type::MemberPointer:
3626 case Type::Pipe:
3627 return type;
3628
3629 // These types can be variably-modified. All these modifications
3630 // preserve structure except as noted by comments.
3631 // TODO: if we ever care about optimizing VLAs, there are no-op
3632 // optimizations available here.
3633 case Type::Pointer:
3634 result = getPointerType(getVariableArrayDecayedType(
3635 type: cast<PointerType>(ty)->getPointeeType()));
3636 break;
3637
3638 case Type::LValueReference: {
3639 const auto *lv = cast<LValueReferenceType>(ty);
3640 result = getLValueReferenceType(
3641 T: getVariableArrayDecayedType(type: lv->getPointeeType()),
3642 SpelledAsLValue: lv->isSpelledAsLValue());
3643 break;
3644 }
3645
3646 case Type::RValueReference: {
3647 const auto *lv = cast<RValueReferenceType>(ty);
3648 result = getRValueReferenceType(
3649 T: getVariableArrayDecayedType(type: lv->getPointeeType()));
3650 break;
3651 }
3652
3653 case Type::Atomic: {
3654 const auto *at = cast<AtomicType>(ty);
3655 result = getAtomicType(T: getVariableArrayDecayedType(type: at->getValueType()));
3656 break;
3657 }
3658
3659 case Type::ConstantArray: {
3660 const auto *cat = cast<ConstantArrayType>(ty);
3661 result = getConstantArrayType(
3662 EltTy: getVariableArrayDecayedType(type: cat->getElementType()),
3663 ArySizeIn: cat->getSize(),
3664 SizeExpr: cat->getSizeExpr(),
3665 ASM: cat->getSizeModifier(),
3666 IndexTypeQuals: cat->getIndexTypeCVRQualifiers());
3667 break;
3668 }
3669
3670 case Type::DependentSizedArray: {
3671 const auto *dat = cast<DependentSizedArrayType>(ty);
3672 result = getDependentSizedArrayType(
3673 EltTy: getVariableArrayDecayedType(type: dat->getElementType()),
3674 NumElts: dat->getSizeExpr(),
3675 ASM: dat->getSizeModifier(),
3676 IndexTypeQuals: dat->getIndexTypeCVRQualifiers(),
3677 Brackets: dat->getBracketsRange());
3678 break;
3679 }
3680
3681 // Turn incomplete types into [*] types.
3682 case Type::IncompleteArray: {
3683 const auto *iat = cast<IncompleteArrayType>(ty);
3684 result =
3685 getVariableArrayType(EltTy: getVariableArrayDecayedType(type: iat->getElementType()),
3686 /*size*/ NumElts: nullptr, ASM: ArraySizeModifier::Normal,
3687 IndexTypeQuals: iat->getIndexTypeCVRQualifiers(), Brackets: SourceRange());
3688 break;
3689 }
3690
3691 // Turn VLA types into [*] types.
3692 case Type::VariableArray: {
3693 const auto *vat = cast<VariableArrayType>(ty);
3694 result = getVariableArrayType(
3695 EltTy: getVariableArrayDecayedType(type: vat->getElementType()),
3696 /*size*/ NumElts: nullptr, ASM: ArraySizeModifier::Star,
3697 IndexTypeQuals: vat->getIndexTypeCVRQualifiers(), Brackets: vat->getBracketsRange());
3698 break;
3699 }
3700 }
3701
3702 // Apply the top-level qualifiers from the original.
3703 return getQualifiedType(T: result, Qs: split.Quals);
3704}
3705
3706/// getVariableArrayType - Returns a non-unique reference to the type for a
3707/// variable array of the specified element type.
3708QualType ASTContext::getVariableArrayType(QualType EltTy, Expr *NumElts,
3709 ArraySizeModifier ASM,
3710 unsigned IndexTypeQuals,
3711 SourceRange Brackets) const {
3712 // Since we don't unique expressions, it isn't possible to unique VLA's
3713 // that have an expression provided for their size.
3714 QualType Canon;
3715
3716 // Be sure to pull qualifiers off the element type.
3717 // FIXME: Check below should look for qualifiers behind sugar.
3718 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) {
3719 SplitQualType canonSplit = getCanonicalType(T: EltTy).split();
3720 Canon = getVariableArrayType(EltTy: QualType(canonSplit.Ty, 0), NumElts, ASM,
3721 IndexTypeQuals, Brackets);
3722 Canon = getQualifiedType(T: Canon, Qs: canonSplit.Quals);
3723 }
3724
3725 auto *New = new (*this, alignof(VariableArrayType))
3726 VariableArrayType(EltTy, Canon, NumElts, ASM, IndexTypeQuals, Brackets);
3727
3728 VariableArrayTypes.push_back(x: New);
3729 Types.push_back(New);
3730 return QualType(New, 0);
3731}
3732
3733/// getDependentSizedArrayType - Returns a non-unique reference to
3734/// the type for a dependently-sized array of the specified element
3735/// type.
3736QualType ASTContext::getDependentSizedArrayType(QualType elementType,
3737 Expr *numElements,
3738 ArraySizeModifier ASM,
3739 unsigned elementTypeQuals,
3740 SourceRange brackets) const {
3741 assert((!numElements || numElements->isTypeDependent() ||
3742 numElements->isValueDependent()) &&
3743 "Size must be type- or value-dependent!");
3744
3745 // Dependently-sized array types that do not have a specified number
3746 // of elements will have their sizes deduced from a dependent
3747 // initializer. We do no canonicalization here at all, which is okay
3748 // because they can't be used in most locations.
3749 if (!numElements) {
3750 auto *newType = new (*this, alignof(DependentSizedArrayType))
3751 DependentSizedArrayType(elementType, QualType(), numElements, ASM,
3752 elementTypeQuals, brackets);
3753 Types.push_back(newType);
3754 return QualType(newType, 0);
3755 }
3756
3757 // Otherwise, we actually build a new type every time, but we
3758 // also build a canonical type.
3759
3760 SplitQualType canonElementType = getCanonicalType(T: elementType).split();
3761
3762 void *insertPos = nullptr;
3763 llvm::FoldingSetNodeID ID;
3764 DependentSizedArrayType::Profile(ID, Context: *this,
3765 ET: QualType(canonElementType.Ty, 0),
3766 SizeMod: ASM, TypeQuals: elementTypeQuals, E: numElements);
3767
3768 // Look for an existing type with these properties.
3769 DependentSizedArrayType *canonTy =
3770 DependentSizedArrayTypes.FindNodeOrInsertPos(ID, InsertPos&: insertPos);
3771
3772 // If we don't have one, build one.
3773 if (!canonTy) {
3774 canonTy = new (*this, alignof(DependentSizedArrayType))
3775 DependentSizedArrayType(QualType(canonElementType.Ty, 0), QualType(),
3776 numElements, ASM, elementTypeQuals, brackets);
3777 DependentSizedArrayTypes.InsertNode(N: canonTy, InsertPos: insertPos);
3778 Types.push_back(canonTy);
3779 }
3780
3781 // Apply qualifiers from the element type to the array.
3782 QualType canon = getQualifiedType(T: QualType(canonTy,0),
3783 Qs: canonElementType.Quals);
3784
3785 // If we didn't need extra canonicalization for the element type or the size
3786 // expression, then just use that as our result.
3787 if (QualType(canonElementType.Ty, 0) == elementType &&
3788 canonTy->getSizeExpr() == numElements)
3789 return canon;
3790
3791 // Otherwise, we need to build a type which follows the spelling
3792 // of the element type.
3793 auto *sugaredType = new (*this, alignof(DependentSizedArrayType))
3794 DependentSizedArrayType(elementType, canon, numElements, ASM,
3795 elementTypeQuals, brackets);
3796 Types.push_back(Elt: sugaredType);
3797 return QualType(sugaredType, 0);
3798}
3799
3800QualType ASTContext::getIncompleteArrayType(QualType elementType,
3801 ArraySizeModifier ASM,
3802 unsigned elementTypeQuals) const {
3803 llvm::FoldingSetNodeID ID;
3804 IncompleteArrayType::Profile(ID, ET: elementType, SizeMod: ASM, TypeQuals: elementTypeQuals);
3805
3806 void *insertPos = nullptr;
3807 if (IncompleteArrayType *iat =
3808 IncompleteArrayTypes.FindNodeOrInsertPos(ID, InsertPos&: insertPos))
3809 return QualType(iat, 0);
3810
3811 // If the element type isn't canonical, this won't be a canonical type
3812 // either, so fill in the canonical type field. We also have to pull
3813 // qualifiers off the element type.
3814 QualType canon;
3815
3816 // FIXME: Check below should look for qualifiers behind sugar.
3817 if (!elementType.isCanonical() || elementType.hasLocalQualifiers()) {
3818 SplitQualType canonSplit = getCanonicalType(T: elementType).split();
3819 canon = getIncompleteArrayType(elementType: QualType(canonSplit.Ty, 0),
3820 ASM, elementTypeQuals);
3821 canon = getQualifiedType(T: canon, Qs: canonSplit.Quals);
3822
3823 // Get the new insert position for the node we care about.
3824 IncompleteArrayType *existing =
3825 IncompleteArrayTypes.FindNodeOrInsertPos(ID, InsertPos&: insertPos);
3826 assert(!existing && "Shouldn't be in the map!"); (void) existing;
3827 }
3828
3829 auto *newType = new (*this, alignof(IncompleteArrayType))
3830 IncompleteArrayType(elementType, canon, ASM, elementTypeQuals);
3831
3832 IncompleteArrayTypes.InsertNode(N: newType, InsertPos: insertPos);
3833 Types.push_back(newType);
3834 return QualType(newType, 0);
3835}
3836
3837ASTContext::BuiltinVectorTypeInfo
3838ASTContext::getBuiltinVectorTypeInfo(const BuiltinType *Ty) const {
3839#define SVE_INT_ELTTY(BITS, ELTS, SIGNED, NUMVECTORS) \
3840 {getIntTypeForBitwidth(BITS, SIGNED), llvm::ElementCount::getScalable(ELTS), \
3841 NUMVECTORS};
3842
3843#define SVE_ELTTY(ELTTY, ELTS, NUMVECTORS) \
3844 {ELTTY, llvm::ElementCount::getScalable(ELTS), NUMVECTORS};
3845
3846 switch (Ty->getKind()) {
3847 default:
3848 llvm_unreachable("Unsupported builtin vector type");
3849 case BuiltinType::SveInt8:
3850 return SVE_INT_ELTTY(8, 16, true, 1);
3851 case BuiltinType::SveUint8:
3852 return SVE_INT_ELTTY(8, 16, false, 1);
3853 case BuiltinType::SveInt8x2:
3854 return SVE_INT_ELTTY(8, 16, true, 2);
3855 case BuiltinType::SveUint8x2:
3856 return SVE_INT_ELTTY(8, 16, false, 2);
3857 case BuiltinType::SveInt8x3:
3858 return SVE_INT_ELTTY(8, 16, true, 3);
3859 case BuiltinType::SveUint8x3:
3860 return SVE_INT_ELTTY(8, 16, false, 3);
3861 case BuiltinType::SveInt8x4:
3862 return SVE_INT_ELTTY(8, 16, true, 4);
3863 case BuiltinType::SveUint8x4:
3864 return SVE_INT_ELTTY(8, 16, false, 4);
3865 case BuiltinType::SveInt16:
3866 return SVE_INT_ELTTY(16, 8, true, 1);
3867 case BuiltinType::SveUint16:
3868 return SVE_INT_ELTTY(16, 8, false, 1);
3869 case BuiltinType::SveInt16x2:
3870 return SVE_INT_ELTTY(16, 8, true, 2);
3871 case BuiltinType::SveUint16x2:
3872 return SVE_INT_ELTTY(16, 8, false, 2);
3873 case BuiltinType::SveInt16x3:
3874 return SVE_INT_ELTTY(16, 8, true, 3);
3875 case BuiltinType::SveUint16x3:
3876 return SVE_INT_ELTTY(16, 8, false, 3);
3877 case BuiltinType::SveInt16x4:
3878 return SVE_INT_ELTTY(16, 8, true, 4);
3879 case BuiltinType::SveUint16x4:
3880 return SVE_INT_ELTTY(16, 8, false, 4);
3881 case BuiltinType::SveInt32:
3882 return SVE_INT_ELTTY(32, 4, true, 1);
3883 case BuiltinType::SveUint32:
3884 return SVE_INT_ELTTY(32, 4, false, 1);
3885 case BuiltinType::SveInt32x2:
3886 return SVE_INT_ELTTY(32, 4, true, 2);
3887 case BuiltinType::SveUint32x2:
3888 return SVE_INT_ELTTY(32, 4, false, 2);
3889 case BuiltinType::SveInt32x3:
3890 return SVE_INT_ELTTY(32, 4, true, 3);
3891 case BuiltinType::SveUint32x3:
3892 return SVE_INT_ELTTY(32, 4, false, 3);
3893 case BuiltinType::SveInt32x4:
3894 return SVE_INT_ELTTY(32, 4, true, 4);
3895 case BuiltinType::SveUint32x4:
3896 return SVE_INT_ELTTY(32, 4, false, 4);
3897 case BuiltinType::SveInt64:
3898 return SVE_INT_ELTTY(64, 2, true, 1);
3899 case BuiltinType::SveUint64:
3900 return SVE_INT_ELTTY(64, 2, false, 1);
3901 case BuiltinType::SveInt64x2:
3902 return SVE_INT_ELTTY(64, 2, true, 2);
3903 case BuiltinType::SveUint64x2:
3904 return SVE_INT_ELTTY(64, 2, false, 2);
3905 case BuiltinType::SveInt64x3:
3906 return SVE_INT_ELTTY(64, 2, true, 3);
3907 case BuiltinType::SveUint64x3:
3908 return SVE_INT_ELTTY(64, 2, false, 3);
3909 case BuiltinType::SveInt64x4:
3910 return SVE_INT_ELTTY(64, 2, true, 4);
3911 case BuiltinType::SveUint64x4:
3912 return SVE_INT_ELTTY(64, 2, false, 4);
3913 case BuiltinType::SveBool:
3914 return SVE_ELTTY(BoolTy, 16, 1);
3915 case BuiltinType::SveBoolx2:
3916 return SVE_ELTTY(BoolTy, 16, 2);
3917 case BuiltinType::SveBoolx4:
3918 return SVE_ELTTY(BoolTy, 16, 4);
3919 case BuiltinType::SveFloat16:
3920 return SVE_ELTTY(HalfTy, 8, 1);
3921 case BuiltinType::SveFloat16x2:
3922 return SVE_ELTTY(HalfTy, 8, 2);
3923 case BuiltinType::SveFloat16x3:
3924 return SVE_ELTTY(HalfTy, 8, 3);
3925 case BuiltinType::SveFloat16x4:
3926 return SVE_ELTTY(HalfTy, 8, 4);
3927 case BuiltinType::SveFloat32:
3928 return SVE_ELTTY(FloatTy, 4, 1);
3929 case BuiltinType::SveFloat32x2:
3930 return SVE_ELTTY(FloatTy, 4, 2);
3931 case BuiltinType::SveFloat32x3:
3932 return SVE_ELTTY(FloatTy, 4, 3);
3933 case BuiltinType::SveFloat32x4:
3934 return SVE_ELTTY(FloatTy, 4, 4);
3935 case BuiltinType::SveFloat64:
3936 return SVE_ELTTY(DoubleTy, 2, 1);
3937 case BuiltinType::SveFloat64x2:
3938 return SVE_ELTTY(DoubleTy, 2, 2);
3939 case BuiltinType::SveFloat64x3:
3940 return SVE_ELTTY(DoubleTy, 2, 3);
3941 case BuiltinType::SveFloat64x4:
3942 return SVE_ELTTY(DoubleTy, 2, 4);
3943 case BuiltinType::SveBFloat16:
3944 return SVE_ELTTY(BFloat16Ty, 8, 1);
3945 case BuiltinType::SveBFloat16x2:
3946 return SVE_ELTTY(BFloat16Ty, 8, 2);
3947 case BuiltinType::SveBFloat16x3:
3948 return SVE_ELTTY(BFloat16Ty, 8, 3);
3949 case BuiltinType::SveBFloat16x4:
3950 return SVE_ELTTY(BFloat16Ty, 8, 4);
3951#define RVV_VECTOR_TYPE_INT(Name, Id, SingletonId, NumEls, ElBits, NF, \
3952 IsSigned) \
3953 case BuiltinType::Id: \
3954 return {getIntTypeForBitwidth(ElBits, IsSigned), \
3955 llvm::ElementCount::getScalable(NumEls), NF};
3956#define RVV_VECTOR_TYPE_FLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \
3957 case BuiltinType::Id: \
3958 return {ElBits == 16 ? Float16Ty : (ElBits == 32 ? FloatTy : DoubleTy), \
3959 llvm::ElementCount::getScalable(NumEls), NF};
3960#define RVV_VECTOR_TYPE_BFLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \
3961 case BuiltinType::Id: \
3962 return {BFloat16Ty, llvm::ElementCount::getScalable(NumEls), NF};
3963#define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \
3964 case BuiltinType::Id: \
3965 return {BoolTy, llvm::ElementCount::getScalable(NumEls), 1};
3966#include "clang/Basic/RISCVVTypes.def"
3967 }
3968}
3969
3970/// getExternrefType - Return a WebAssembly externref type, which represents an
3971/// opaque reference to a host value.
3972QualType ASTContext::getWebAssemblyExternrefType() const {
3973 if (Target->getTriple().isWasm() && Target->hasFeature(Feature: "reference-types")) {
3974#define WASM_REF_TYPE(Name, MangledName, Id, SingletonId, AS) \
3975 if (BuiltinType::Id == BuiltinType::WasmExternRef) \
3976 return SingletonId;
3977#include "clang/Basic/WebAssemblyReferenceTypes.def"
3978 }
3979 llvm_unreachable(
3980 "shouldn't try to generate type externref outside WebAssembly target");
3981}
3982
3983/// getScalableVectorType - Return the unique reference to a scalable vector
3984/// type of the specified element type and size. VectorType must be a built-in
3985/// type.
3986QualType ASTContext::getScalableVectorType(QualType EltTy, unsigned NumElts,
3987 unsigned NumFields) const {
3988 if (Target->hasAArch64SVETypes()) {
3989 uint64_t EltTySize = getTypeSize(T: EltTy);
3990#define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \
3991 IsSigned, IsFP, IsBF) \
3992 if (!EltTy->isBooleanType() && \
3993 ((EltTy->hasIntegerRepresentation() && \
3994 EltTy->hasSignedIntegerRepresentation() == IsSigned) || \
3995 (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \
3996 IsFP && !IsBF) || \
3997 (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \
3998 IsBF && !IsFP)) && \
3999 EltTySize == ElBits && NumElts == NumEls) { \
4000 return SingletonId; \
4001 }
4002#define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \
4003 if (EltTy->isBooleanType() && NumElts == NumEls) \
4004 return SingletonId;
4005#define SVE_OPAQUE_TYPE(Name, MangledName, Id, SingleTonId)
4006#include "clang/Basic/AArch64SVEACLETypes.def"
4007 } else if (Target->hasRISCVVTypes()) {
4008 uint64_t EltTySize = getTypeSize(T: EltTy);
4009#define RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned, \
4010 IsFP, IsBF) \
4011 if (!EltTy->isBooleanType() && \
4012 ((EltTy->hasIntegerRepresentation() && \
4013 EltTy->hasSignedIntegerRepresentation() == IsSigned) || \
4014 (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \
4015 IsFP && !IsBF) || \
4016 (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \
4017 IsBF && !IsFP)) && \
4018 EltTySize == ElBits && NumElts == NumEls && NumFields == NF) \
4019 return SingletonId;
4020#define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \
4021 if (EltTy->isBooleanType() && NumElts == NumEls) \
4022 return SingletonId;
4023#include "clang/Basic/RISCVVTypes.def"
4024 }
4025 return QualType();
4026}
4027
4028/// getVectorType - Return the unique reference to a vector type of
4029/// the specified element type and size. VectorType must be a built-in type.
4030QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts,
4031 VectorKind VecKind) const {
4032 assert(vecType->isBuiltinType() ||
4033 (vecType->isBitIntType() &&
4034 // Only support _BitInt elements with byte-sized power of 2 NumBits.
4035 llvm::isPowerOf2_32(vecType->castAs<BitIntType>()->getNumBits()) &&
4036 vecType->castAs<BitIntType>()->getNumBits() >= 8));
4037
4038 // Check if we've already instantiated a vector of this type.
4039 llvm::FoldingSetNodeID ID;
4040 VectorType::Profile(ID, vecType, NumElts, Type::Vector, VecKind);
4041
4042 void *InsertPos = nullptr;
4043 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
4044 return QualType(VTP, 0);
4045
4046 // If the element type isn't canonical, this won't be a canonical type either,
4047 // so fill in the canonical type field.
4048 QualType Canonical;
4049 if (!vecType.isCanonical()) {
4050 Canonical = getVectorType(vecType: getCanonicalType(T: vecType), NumElts, VecKind);
4051
4052 // Get the new insert position for the node we care about.
4053 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4054 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4055 }
4056 auto *New = new (*this, alignof(VectorType))
4057 VectorType(vecType, NumElts, Canonical, VecKind);
4058 VectorTypes.InsertNode(N: New, InsertPos);
4059 Types.push_back(New);
4060 return QualType(New, 0);
4061}
4062
4063QualType ASTContext::getDependentVectorType(QualType VecType, Expr *SizeExpr,
4064 SourceLocation AttrLoc,
4065 VectorKind VecKind) const {
4066 llvm::FoldingSetNodeID ID;
4067 DependentVectorType::Profile(ID, Context: *this, ElementType: getCanonicalType(T: VecType), SizeExpr,
4068 VecKind);
4069 void *InsertPos = nullptr;
4070 DependentVectorType *Canon =
4071 DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4072 DependentVectorType *New;
4073
4074 if (Canon) {
4075 New = new (*this, alignof(DependentVectorType)) DependentVectorType(
4076 VecType, QualType(Canon, 0), SizeExpr, AttrLoc, VecKind);
4077 } else {
4078 QualType CanonVecTy = getCanonicalType(T: VecType);
4079 if (CanonVecTy == VecType) {
4080 New = new (*this, alignof(DependentVectorType))
4081 DependentVectorType(VecType, QualType(), SizeExpr, AttrLoc, VecKind);
4082
4083 DependentVectorType *CanonCheck =
4084 DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4085 assert(!CanonCheck &&
4086 "Dependent-sized vector_size canonical type broken");
4087 (void)CanonCheck;
4088 DependentVectorTypes.InsertNode(N: New, InsertPos);
4089 } else {
4090 QualType CanonTy = getDependentVectorType(VecType: CanonVecTy, SizeExpr,
4091 AttrLoc: SourceLocation(), VecKind);
4092 New = new (*this, alignof(DependentVectorType))
4093 DependentVectorType(VecType, CanonTy, SizeExpr, AttrLoc, VecKind);
4094 }
4095 }
4096
4097 Types.push_back(New);
4098 return QualType(New, 0);
4099}
4100
4101/// getExtVectorType - Return the unique reference to an extended vector type of
4102/// the specified element type and size. VectorType must be a built-in type.
4103QualType ASTContext::getExtVectorType(QualType vecType,
4104 unsigned NumElts) const {
4105 assert(vecType->isBuiltinType() || vecType->isDependentType() ||
4106 (vecType->isBitIntType() &&
4107 // Only support _BitInt elements with byte-sized power of 2 NumBits.
4108 llvm::isPowerOf2_32(vecType->castAs<BitIntType>()->getNumBits()) &&
4109 vecType->castAs<BitIntType>()->getNumBits() >= 8));
4110
4111 // Check if we've already instantiated a vector of this type.
4112 llvm::FoldingSetNodeID ID;
4113 VectorType::Profile(ID, vecType, NumElts, Type::ExtVector,
4114 VectorKind::Generic);
4115 void *InsertPos = nullptr;
4116 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
4117 return QualType(VTP, 0);
4118
4119 // If the element type isn't canonical, this won't be a canonical type either,
4120 // so fill in the canonical type field.
4121 QualType Canonical;
4122 if (!vecType.isCanonical()) {
4123 Canonical = getExtVectorType(vecType: getCanonicalType(T: vecType), NumElts);
4124
4125 // Get the new insert position for the node we care about.
4126 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4127 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4128 }
4129 auto *New = new (*this, alignof(ExtVectorType))
4130 ExtVectorType(vecType, NumElts, Canonical);
4131 VectorTypes.InsertNode(New, InsertPos);
4132 Types.push_back(New);
4133 return QualType(New, 0);
4134}
4135
4136QualType
4137ASTContext::getDependentSizedExtVectorType(QualType vecType,
4138 Expr *SizeExpr,
4139 SourceLocation AttrLoc) const {
4140 llvm::FoldingSetNodeID ID;
4141 DependentSizedExtVectorType::Profile(ID, Context: *this, ElementType: getCanonicalType(T: vecType),
4142 SizeExpr);
4143
4144 void *InsertPos = nullptr;
4145 DependentSizedExtVectorType *Canon
4146 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4147 DependentSizedExtVectorType *New;
4148 if (Canon) {
4149 // We already have a canonical version of this array type; use it as
4150 // the canonical type for a newly-built type.
4151 New = new (*this, alignof(DependentSizedExtVectorType))
4152 DependentSizedExtVectorType(vecType, QualType(Canon, 0), SizeExpr,
4153 AttrLoc);
4154 } else {
4155 QualType CanonVecTy = getCanonicalType(T: vecType);
4156 if (CanonVecTy == vecType) {
4157 New = new (*this, alignof(DependentSizedExtVectorType))
4158 DependentSizedExtVectorType(vecType, QualType(), SizeExpr, AttrLoc);
4159
4160 DependentSizedExtVectorType *CanonCheck
4161 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
4162 assert(!CanonCheck && "Dependent-sized ext_vector canonical type broken");
4163 (void)CanonCheck;
4164 DependentSizedExtVectorTypes.InsertNode(N: New, InsertPos);
4165 } else {
4166 QualType CanonExtTy = getDependentSizedExtVectorType(vecType: CanonVecTy, SizeExpr,
4167 AttrLoc: SourceLocation());
4168 New = new (*this, alignof(DependentSizedExtVectorType))
4169 DependentSizedExtVectorType(vecType, CanonExtTy, SizeExpr, AttrLoc);
4170 }
4171 }
4172
4173 Types.push_back(New);
4174 return QualType(New, 0);
4175}
4176
4177QualType ASTContext::getConstantMatrixType(QualType ElementTy, unsigned NumRows,
4178 unsigned NumColumns) const {
4179 llvm::FoldingSetNodeID ID;
4180 ConstantMatrixType::Profile(ID, ElementTy, NumRows, NumColumns,
4181 Type::ConstantMatrix);
4182
4183 assert(MatrixType::isValidElementType(ElementTy) &&
4184 "need a valid element type");
4185 assert(ConstantMatrixType::isDimensionValid(NumRows) &&
4186 ConstantMatrixType::isDimensionValid(NumColumns) &&
4187 "need valid matrix dimensions");
4188 void *InsertPos = nullptr;
4189 if (ConstantMatrixType *MTP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos))
4190 return QualType(MTP, 0);
4191
4192 QualType Canonical;
4193 if (!ElementTy.isCanonical()) {
4194 Canonical =
4195 getConstantMatrixType(ElementTy: getCanonicalType(T: ElementTy), NumRows, NumColumns);
4196
4197 ConstantMatrixType *NewIP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos);
4198 assert(!NewIP && "Matrix type shouldn't already exist in the map");
4199 (void)NewIP;
4200 }
4201
4202 auto *New = new (*this, alignof(ConstantMatrixType))
4203 ConstantMatrixType(ElementTy, NumRows, NumColumns, Canonical);
4204 MatrixTypes.InsertNode(N: New, InsertPos);
4205 Types.push_back(New);
4206 return QualType(New, 0);
4207}
4208
4209QualType ASTContext::getDependentSizedMatrixType(QualType ElementTy,
4210 Expr *RowExpr,
4211 Expr *ColumnExpr,
4212 SourceLocation AttrLoc) const {
4213 QualType CanonElementTy = getCanonicalType(T: ElementTy);
4214 llvm::FoldingSetNodeID ID;
4215 DependentSizedMatrixType::Profile(ID, Context: *this, ElementType: CanonElementTy, RowExpr,
4216 ColumnExpr);
4217
4218 void *InsertPos = nullptr;
4219 DependentSizedMatrixType *Canon =
4220 DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos);
4221
4222 if (!Canon) {
4223 Canon = new (*this, alignof(DependentSizedMatrixType))
4224 DependentSizedMatrixType(CanonElementTy, QualType(), RowExpr,
4225 ColumnExpr, AttrLoc);
4226#ifndef NDEBUG
4227 DependentSizedMatrixType *CanonCheck =
4228 DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos);
4229 assert(!CanonCheck && "Dependent-sized matrix canonical type broken");
4230#endif
4231 DependentSizedMatrixTypes.InsertNode(N: Canon, InsertPos);
4232 Types.push_back(Canon);
4233 }
4234
4235 // Already have a canonical version of the matrix type
4236 //
4237 // If it exactly matches the requested type, use it directly.
4238 if (Canon->getElementType() == ElementTy && Canon->getRowExpr() == RowExpr &&
4239 Canon->getRowExpr() == ColumnExpr)
4240 return QualType(Canon, 0);
4241
4242 // Use Canon as the canonical type for newly-built type.
4243 DependentSizedMatrixType *New = new (*this, alignof(DependentSizedMatrixType))
4244 DependentSizedMatrixType(ElementTy, QualType(Canon, 0), RowExpr,
4245 ColumnExpr, AttrLoc);
4246 Types.push_back(New);
4247 return QualType(New, 0);
4248}
4249
4250QualType ASTContext::getDependentAddressSpaceType(QualType PointeeType,
4251 Expr *AddrSpaceExpr,
4252 SourceLocation AttrLoc) const {
4253 assert(AddrSpaceExpr->isInstantiationDependent());
4254
4255 QualType canonPointeeType = getCanonicalType(T: PointeeType);
4256
4257 void *insertPos = nullptr;
4258 llvm::FoldingSetNodeID ID;
4259 DependentAddressSpaceType::Profile(ID, Context: *this, PointeeType: canonPointeeType,
4260 AddrSpaceExpr);
4261
4262 DependentAddressSpaceType *canonTy =
4263 DependentAddressSpaceTypes.FindNodeOrInsertPos(ID, InsertPos&: insertPos);
4264
4265 if (!canonTy) {
4266 canonTy = new (*this, alignof(DependentAddressSpaceType))
4267 DependentAddressSpaceType(canonPointeeType, QualType(), AddrSpaceExpr,
4268 AttrLoc);
4269 DependentAddressSpaceTypes.InsertNode(N: canonTy, InsertPos: insertPos);
4270 Types.push_back(canonTy);
4271 }
4272
4273 if (canonPointeeType == PointeeType &&
4274 canonTy->getAddrSpaceExpr() == AddrSpaceExpr)
4275 return QualType(canonTy, 0);
4276
4277 auto *sugaredType = new (*this, alignof(DependentAddressSpaceType))
4278 DependentAddressSpaceType(PointeeType, QualType(canonTy, 0),
4279 AddrSpaceExpr, AttrLoc);
4280 Types.push_back(Elt: sugaredType);
4281 return QualType(sugaredType, 0);
4282}
4283
4284/// Determine whether \p T is canonical as the result type of a function.
4285static bool isCanonicalResultType(QualType T) {
4286 return T.isCanonical() &&
4287 (T.getObjCLifetime() == Qualifiers::OCL_None ||
4288 T.getObjCLifetime() == Qualifiers::OCL_ExplicitNone);
4289}
4290
4291/// getFunctionNoProtoType - Return a K&R style C function type like 'int()'.
4292QualType
4293ASTContext::getFunctionNoProtoType(QualType ResultTy,
4294 const FunctionType::ExtInfo &Info) const {
4295 // FIXME: This assertion cannot be enabled (yet) because the ObjC rewriter
4296 // functionality creates a function without a prototype regardless of
4297 // language mode (so it makes them even in C++). Once the rewriter has been
4298 // fixed, this assertion can be enabled again.
4299 //assert(!LangOpts.requiresStrictPrototypes() &&
4300 // "strict prototypes are disabled");
4301
4302 // Unique functions, to guarantee there is only one function of a particular
4303 // structure.
4304 llvm::FoldingSetNodeID ID;
4305 FunctionNoProtoType::Profile(ID, ResultType: ResultTy, Info);
4306
4307 void *InsertPos = nullptr;
4308 if (FunctionNoProtoType *FT =
4309 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos))
4310 return QualType(FT, 0);
4311
4312 QualType Canonical;
4313 if (!isCanonicalResultType(T: ResultTy)) {
4314 Canonical =
4315 getFunctionNoProtoType(ResultTy: getCanonicalFunctionResultType(ResultType: ResultTy), Info);
4316
4317 // Get the new insert position for the node we care about.
4318 FunctionNoProtoType *NewIP =
4319 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos);
4320 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4321 }
4322
4323 auto *New = new (*this, alignof(FunctionNoProtoType))
4324 FunctionNoProtoType(ResultTy, Canonical, Info);
4325 Types.push_back(New);
4326 FunctionNoProtoTypes.InsertNode(N: New, InsertPos);
4327 return QualType(New, 0);
4328}
4329
4330CanQualType
4331ASTContext::getCanonicalFunctionResultType(QualType ResultType) const {
4332 CanQualType CanResultType = getCanonicalType(T: ResultType);
4333
4334 // Canonical result types do not have ARC lifetime qualifiers.
4335 if (CanResultType.getQualifiers().hasObjCLifetime()) {
4336 Qualifiers Qs = CanResultType.getQualifiers();
4337 Qs.removeObjCLifetime();
4338 return CanQualType::CreateUnsafe(
4339 Other: getQualifiedType(T: CanResultType.getUnqualifiedType(), Qs));
4340 }
4341
4342 return CanResultType;
4343}
4344
4345static bool isCanonicalExceptionSpecification(
4346 const FunctionProtoType::ExceptionSpecInfo &ESI, bool NoexceptInType) {
4347 if (ESI.Type == EST_None)
4348 return true;
4349 if (!NoexceptInType)
4350 return false;
4351
4352 // C++17 onwards: exception specification is part of the type, as a simple
4353 // boolean "can this function type throw".
4354 if (ESI.Type == EST_BasicNoexcept)
4355 return true;
4356
4357 // A noexcept(expr) specification is (possibly) canonical if expr is
4358 // value-dependent.
4359 if (ESI.Type == EST_DependentNoexcept)
4360 return true;
4361
4362 // A dynamic exception specification is canonical if it only contains pack
4363 // expansions (so we can't tell whether it's non-throwing) and all its
4364 // contained types are canonical.
4365 if (ESI.Type == EST_Dynamic) {
4366 bool AnyPackExpansions = false;
4367 for (QualType ET : ESI.Exceptions) {
4368 if (!ET.isCanonical())
4369 return false;
4370 if (ET->getAs<PackExpansionType>())
4371 AnyPackExpansions = true;
4372 }
4373 return AnyPackExpansions;
4374 }
4375
4376 return false;
4377}
4378
4379QualType ASTContext::getFunctionTypeInternal(
4380 QualType ResultTy, ArrayRef<QualType> ArgArray,
4381 const FunctionProtoType::ExtProtoInfo &EPI, bool OnlyWantCanonical) const {
4382 size_t NumArgs = ArgArray.size();
4383
4384 // Unique functions, to guarantee there is only one function of a particular
4385 // structure.
4386 llvm::FoldingSetNodeID ID;
4387 FunctionProtoType::Profile(ID, Result: ResultTy, ArgTys: ArgArray.begin(), NumArgs, EPI,
4388 Context: *this, Canonical: true);
4389
4390 QualType Canonical;
4391 bool Unique = false;
4392
4393 void *InsertPos = nullptr;
4394 if (FunctionProtoType *FPT =
4395 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) {
4396 QualType Existing = QualType(FPT, 0);
4397
4398 // If we find a pre-existing equivalent FunctionProtoType, we can just reuse
4399 // it so long as our exception specification doesn't contain a dependent
4400 // noexcept expression, or we're just looking for a canonical type.
4401 // Otherwise, we're going to need to create a type
4402 // sugar node to hold the concrete expression.
4403 if (OnlyWantCanonical || !isComputedNoexcept(EPI.ExceptionSpec.Type) ||
4404 EPI.ExceptionSpec.NoexceptExpr == FPT->getNoexceptExpr())
4405 return Existing;
4406
4407 // We need a new type sugar node for this one, to hold the new noexcept
4408 // expression. We do no canonicalization here, but that's OK since we don't
4409 // expect to see the same noexcept expression much more than once.
4410 Canonical = getCanonicalType(T: Existing);
4411 Unique = true;
4412 }
4413
4414 bool NoexceptInType = getLangOpts().CPlusPlus17;
4415 bool IsCanonicalExceptionSpec =
4416 isCanonicalExceptionSpecification(EPI.ExceptionSpec, NoexceptInType);
4417
4418 // Determine whether the type being created is already canonical or not.
4419 bool isCanonical = !Unique && IsCanonicalExceptionSpec &&
4420 isCanonicalResultType(T: ResultTy) && !EPI.HasTrailingReturn;
4421 for (unsigned i = 0; i != NumArgs && isCanonical; ++i)
4422 if (!ArgArray[i].isCanonicalAsParam())
4423 isCanonical = false;
4424
4425 if (OnlyWantCanonical)
4426 assert(isCanonical &&
4427 "given non-canonical parameters constructing canonical type");
4428
4429 // If this type isn't canonical, get the canonical version of it if we don't
4430 // already have it. The exception spec is only partially part of the
4431 // canonical type, and only in C++17 onwards.
4432 if (!isCanonical && Canonical.isNull()) {
4433 SmallVector<QualType, 16> CanonicalArgs;
4434 CanonicalArgs.reserve(N: NumArgs);
4435 for (unsigned i = 0; i != NumArgs; ++i)
4436 CanonicalArgs.push_back(Elt: getCanonicalParamType(T: ArgArray[i]));
4437
4438 llvm::SmallVector<QualType, 8> ExceptionTypeStorage;
4439 FunctionProtoType::ExtProtoInfo CanonicalEPI = EPI;
4440 CanonicalEPI.HasTrailingReturn = false;
4441
4442 if (IsCanonicalExceptionSpec) {
4443 // Exception spec is already OK.
4444 } else if (NoexceptInType) {
4445 switch (EPI.ExceptionSpec.Type) {
4446 case EST_Unparsed: case EST_Unevaluated: case EST_Uninstantiated:
4447 // We don't know yet. It shouldn't matter what we pick here; no-one
4448 // should ever look at this.
4449 [[fallthrough]];
4450 case EST_None: case EST_MSAny: case EST_NoexceptFalse:
4451 CanonicalEPI.ExceptionSpec.Type = EST_None;
4452 break;
4453
4454 // A dynamic exception specification is almost always "not noexcept",
4455 // with the exception that a pack expansion might expand to no types.
4456 case EST_Dynamic: {
4457 bool AnyPacks = false;
4458 for (QualType ET : EPI.ExceptionSpec.Exceptions) {
4459 if (ET->getAs<PackExpansionType>())
4460 AnyPacks = true;
4461 ExceptionTypeStorage.push_back(getCanonicalType(ET));
4462 }
4463 if (!AnyPacks)
4464 CanonicalEPI.ExceptionSpec.Type = EST_None;
4465 else {
4466 CanonicalEPI.ExceptionSpec.Type = EST_Dynamic;
4467 CanonicalEPI.ExceptionSpec.Exceptions = ExceptionTypeStorage;
4468 }
4469 break;
4470 }
4471
4472 case EST_DynamicNone:
4473 case EST_BasicNoexcept:
4474 case EST_NoexceptTrue:
4475 case EST_NoThrow:
4476 CanonicalEPI.ExceptionSpec.Type = EST_BasicNoexcept;
4477 break;
4478
4479 case EST_DependentNoexcept:
4480 llvm_unreachable("dependent noexcept is already canonical");
4481 }
4482 } else {
4483 CanonicalEPI.ExceptionSpec = FunctionProtoType::ExceptionSpecInfo();
4484 }
4485
4486 // Adjust the canonical function result type.
4487 CanQualType CanResultTy = getCanonicalFunctionResultType(ResultType: ResultTy);
4488 Canonical =
4489 getFunctionTypeInternal(ResultTy: CanResultTy, ArgArray: CanonicalArgs, EPI: CanonicalEPI, OnlyWantCanonical: true);
4490
4491 // Get the new insert position for the node we care about.
4492 FunctionProtoType *NewIP =
4493 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos);
4494 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
4495 }
4496
4497 // Compute the needed size to hold this FunctionProtoType and the
4498 // various trailing objects.
4499 auto ESH = FunctionProtoType::getExceptionSpecSize(
4500 EPI.ExceptionSpec.Type, EPI.ExceptionSpec.Exceptions.size());
4501 size_t Size = FunctionProtoType::totalSizeToAlloc<
4502 QualType, SourceLocation, FunctionType::FunctionTypeExtraBitfields,
4503 FunctionType::FunctionTypeArmAttributes, FunctionType::ExceptionType,
4504 Expr *, FunctionDecl *, FunctionProtoType::ExtParameterInfo, Qualifiers>(
4505 NumArgs, EPI.Variadic, EPI.requiresFunctionProtoTypeExtraBitfields(),
4506 EPI.requiresFunctionProtoTypeArmAttributes(), ESH.NumExceptionType,
4507 ESH.NumExprPtr, ESH.NumFunctionDeclPtr,
4508 EPI.ExtParameterInfos ? NumArgs : 0,
4509 EPI.TypeQuals.hasNonFastQualifiers() ? 1 : 0);
4510
4511 auto *FTP = (FunctionProtoType *)Allocate(Size, Align: alignof(FunctionProtoType));
4512 FunctionProtoType::ExtProtoInfo newEPI = EPI;
4513 new (FTP) FunctionProtoType(ResultTy, ArgArray, Canonical, newEPI);
4514 Types.push_back(FTP);
4515 if (!Unique)
4516 FunctionProtoTypes.InsertNode(N: FTP, InsertPos);
4517 return QualType(FTP, 0);
4518}
4519
4520QualType ASTContext::getPipeType(QualType T, bool ReadOnly) const {
4521 llvm::FoldingSetNodeID ID;
4522 PipeType::Profile(ID, T, isRead: ReadOnly);
4523
4524 void *InsertPos = nullptr;
4525 if (PipeType *PT = PipeTypes.FindNodeOrInsertPos(ID, InsertPos))
4526 return QualType(PT, 0);
4527
4528 // If the pipe element type isn't canonical, this won't be a canonical type
4529 // either, so fill in the canonical type field.
4530 QualType Canonical;
4531 if (!T.isCanonical()) {
4532 Canonical = getPipeType(T: getCanonicalType(T), ReadOnly);
4533
4534 // Get the new insert position for the node we care about.
4535 PipeType *NewIP = PipeTypes.FindNodeOrInsertPos(ID, InsertPos);
4536 assert(!NewIP && "Shouldn't be in the map!");
4537 (void)NewIP;
4538 }
4539 auto *New = new (*this, alignof(PipeType)) PipeType(T, Canonical, ReadOnly);
4540 Types.push_back(New);
4541 PipeTypes.InsertNode(N: New, InsertPos);
4542 return QualType(New, 0);
4543}
4544
4545QualType ASTContext::adjustStringLiteralBaseType(QualType Ty) const {
4546 // OpenCL v1.1 s6.5.3: a string literal is in the constant address space.
4547 return LangOpts.OpenCL ? getAddrSpaceQualType(T: Ty, AddressSpace: LangAS::opencl_constant)
4548 : Ty;
4549}
4550
4551QualType ASTContext::getReadPipeType(QualType T) const {
4552 return getPipeType(T, ReadOnly: true);
4553}
4554
4555QualType ASTContext::getWritePipeType(QualType T) const {
4556 return getPipeType(T, ReadOnly: false);
4557}
4558
4559QualType ASTContext::getBitIntType(bool IsUnsigned, unsigned NumBits) const {
4560 llvm::FoldingSetNodeID ID;
4561 BitIntType::Profile(ID, IsUnsigned, NumBits);
4562
4563 void *InsertPos = nullptr;
4564 if (BitIntType *EIT = BitIntTypes.FindNodeOrInsertPos(ID, InsertPos))
4565 return QualType(EIT, 0);
4566
4567 auto *New = new (*this, alignof(BitIntType)) BitIntType(IsUnsigned, NumBits);
4568 BitIntTypes.InsertNode(N: New, InsertPos);
4569 Types.push_back(New);
4570 return QualType(New, 0);
4571}
4572
4573QualType ASTContext::getDependentBitIntType(bool IsUnsigned,
4574 Expr *NumBitsExpr) const {
4575 assert(NumBitsExpr->isInstantiationDependent() && "Only good for dependent");
4576 llvm::FoldingSetNodeID ID;
4577 DependentBitIntType::Profile(ID, Context: *this, IsUnsigned, NumBitsExpr);
4578
4579 void *InsertPos = nullptr;
4580 if (DependentBitIntType *Existing =
4581 DependentBitIntTypes.FindNodeOrInsertPos(ID, InsertPos))
4582 return QualType(Existing, 0);
4583
4584 auto *New = new (*this, alignof(DependentBitIntType))
4585 DependentBitIntType(IsUnsigned, NumBitsExpr);
4586 DependentBitIntTypes.InsertNode(N: New, InsertPos);
4587
4588 Types.push_back(New);
4589 return QualType(New, 0);
4590}
4591
4592#ifndef NDEBUG
4593static bool NeedsInjectedClassNameType(const RecordDecl *D) {
4594 if (!isa<CXXRecordDecl>(Val: D)) return false;
4595 const auto *RD = cast<CXXRecordDecl>(Val: D);
4596 if (isa<ClassTemplatePartialSpecializationDecl>(Val: RD))
4597 return true;
4598 if (RD->getDescribedClassTemplate() &&
4599 !isa<ClassTemplateSpecializationDecl>(Val: RD))
4600 return true;
4601 return false;
4602}
4603#endif
4604
4605/// getInjectedClassNameType - Return the unique reference to the
4606/// injected class name type for the specified templated declaration.
4607QualType ASTContext::getInjectedClassNameType(CXXRecordDecl *Decl,
4608 QualType TST) const {
4609 assert(NeedsInjectedClassNameType(Decl));
4610 if (Decl->TypeForDecl) {
4611 assert(isa<InjectedClassNameType>(Decl->TypeForDecl));
4612 } else if (CXXRecordDecl *PrevDecl = Decl->getPreviousDecl()) {
4613 assert(PrevDecl->TypeForDecl && "previous declaration has no type");
4614 Decl->TypeForDecl = PrevDecl->TypeForDecl;
4615 assert(isa<InjectedClassNameType>(Decl->TypeForDecl));
4616 } else {
4617 Type *newType = new (*this, alignof(InjectedClassNameType))
4618 InjectedClassNameType(Decl, TST);
4619 Decl->TypeForDecl = newType;
4620 Types.push_back(Elt: newType);
4621 }
4622 return QualType(Decl->TypeForDecl, 0);
4623}
4624
4625/// getTypeDeclType - Return the unique reference to the type for the
4626/// specified type declaration.
4627QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) const {
4628 assert(Decl && "Passed null for Decl param");
4629 assert(!Decl->TypeForDecl && "TypeForDecl present in slow case");
4630
4631 if (const auto *Typedef = dyn_cast<TypedefNameDecl>(Val: Decl))
4632 return getTypedefType(Decl: Typedef);
4633
4634 assert(!isa<TemplateTypeParmDecl>(Decl) &&
4635 "Template type parameter types are always available.");
4636
4637 if (const auto *Record = dyn_cast<RecordDecl>(Val: Decl)) {
4638 assert(Record->isFirstDecl() && "struct/union has previous declaration");
4639 assert(!NeedsInjectedClassNameType(Record));
4640 return getRecordType(Decl: Record);
4641 } else if (const auto *Enum = dyn_cast<EnumDecl>(Val: Decl)) {
4642 assert(Enum->isFirstDecl() && "enum has previous declaration");
4643 return getEnumType(Decl: Enum);
4644 } else if (const auto *Using = dyn_cast<UnresolvedUsingTypenameDecl>(Val: Decl)) {
4645 return getUnresolvedUsingType(Decl: Using);
4646 } else
4647 llvm_unreachable("TypeDecl without a type?");
4648
4649 return QualType(Decl->TypeForDecl, 0);
4650}
4651
4652/// getTypedefType - Return the unique reference to the type for the
4653/// specified typedef name decl.
4654QualType ASTContext::getTypedefType(const TypedefNameDecl *Decl,
4655 QualType Underlying) const {
4656 if (!Decl->TypeForDecl) {
4657 if (Underlying.isNull())
4658 Underlying = Decl->getUnderlyingType();
4659 auto *NewType = new (*this, alignof(TypedefType)) TypedefType(
4660 Type::Typedef, Decl, QualType(), getCanonicalType(Underlying));
4661 Decl->TypeForDecl = NewType;
4662 Types.push_back(Elt: NewType);
4663 return QualType(NewType, 0);
4664 }
4665 if (Underlying.isNull() || Decl->getUnderlyingType() == Underlying)
4666 return QualType(Decl->TypeForDecl, 0);
4667 assert(hasSameType(Decl->getUnderlyingType(), Underlying));
4668
4669 llvm::FoldingSetNodeID ID;
4670 TypedefType::Profile(ID, Decl, Underlying);
4671
4672 void *InsertPos = nullptr;
4673 if (TypedefType *T = TypedefTypes.FindNodeOrInsertPos(ID, InsertPos)) {
4674 assert(!T->typeMatchesDecl() &&
4675 "non-divergent case should be handled with TypeDecl");
4676 return QualType(T, 0);
4677 }
4678
4679 void *Mem = Allocate(TypedefType::totalSizeToAlloc<QualType>(true),
4680 alignof(TypedefType));
4681 auto *NewType = new (Mem) TypedefType(Type::Typedef, Decl, Underlying,
4682 getCanonicalType(Underlying));
4683 TypedefTypes.InsertNode(NewType, InsertPos);
4684 Types.push_back(Elt: NewType);
4685 return QualType(NewType, 0);
4686}
4687
4688QualType ASTContext::getUsingType(const UsingShadowDecl *Found,
4689 QualType Underlying) const {
4690 llvm::FoldingSetNodeID ID;
4691 UsingType::Profile(ID, Found, Underlying);
4692
4693 void *InsertPos = nullptr;
4694 if (UsingType *T = UsingTypes.FindNodeOrInsertPos(ID, InsertPos))
4695 return QualType(T, 0);
4696
4697 const Type *TypeForDecl =
4698 cast<TypeDecl>(Val: Found->getTargetDecl())->getTypeForDecl();
4699
4700 assert(!Underlying.hasLocalQualifiers());
4701 QualType Canon = Underlying->getCanonicalTypeInternal();
4702 assert(TypeForDecl->getCanonicalTypeInternal() == Canon);
4703
4704 if (Underlying.getTypePtr() == TypeForDecl)
4705 Underlying = QualType();
4706 void *Mem =
4707 Allocate(UsingType::totalSizeToAlloc<QualType>(!Underlying.isNull()),
4708 alignof(UsingType));
4709 UsingType *NewType = new (Mem) UsingType(Found, Underlying, Canon);
4710 Types.push_back(NewType);
4711 UsingTypes.InsertNode(N: NewType, InsertPos);
4712 return QualType(NewType, 0);
4713}
4714
4715QualType ASTContext::getRecordType(const RecordDecl *Decl) const {
4716 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
4717
4718 if (const RecordDecl *PrevDecl = Decl->getPreviousDecl())
4719 if (PrevDecl->TypeForDecl)
4720 return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0);
4721
4722 auto *newType = new (*this, alignof(RecordType)) RecordType(Decl);
4723 Decl->TypeForDecl = newType;
4724 Types.push_back(newType);
4725 return QualType(newType, 0);
4726}
4727
4728QualType ASTContext::getEnumType(const EnumDecl *Decl) const {
4729 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
4730
4731 if (const EnumDecl *PrevDecl = Decl->getPreviousDecl())
4732 if (PrevDecl->TypeForDecl)
4733 return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0);
4734
4735 auto *newType = new (*this, alignof(EnumType)) EnumType(Decl);
4736 Decl->TypeForDecl = newType;
4737 Types.push_back(newType);
4738 return QualType(newType, 0);
4739}
4740
4741QualType ASTContext::getUnresolvedUsingType(
4742 const UnresolvedUsingTypenameDecl *Decl) const {
4743 if (Decl->TypeForDecl)
4744 return QualType(Decl->TypeForDecl, 0);
4745
4746 if (const UnresolvedUsingTypenameDecl *CanonicalDecl =
4747 Decl->getCanonicalDecl())
4748 if (CanonicalDecl->TypeForDecl)
4749 return QualType(Decl->TypeForDecl = CanonicalDecl->TypeForDecl, 0);
4750
4751 Type *newType =
4752 new (*this, alignof(UnresolvedUsingType)) UnresolvedUsingType(Decl);
4753 Decl->TypeForDecl = newType;
4754 Types.push_back(Elt: newType);
4755 return QualType(newType, 0);
4756}
4757
4758QualType ASTContext::getAttributedType(attr::Kind attrKind,
4759 QualType modifiedType,
4760 QualType equivalentType) const {
4761 llvm::FoldingSetNodeID id;
4762 AttributedType::Profile(ID&: id, attrKind, modified: modifiedType, equivalent: equivalentType);
4763
4764 void *insertPos = nullptr;
4765 AttributedType *type = AttributedTypes.FindNodeOrInsertPos(ID: id, InsertPos&: insertPos);
4766 if (type) return QualType(type, 0);
4767
4768 QualType canon = getCanonicalType(T: equivalentType);
4769 type = new (*this, alignof(AttributedType))
4770 AttributedType(canon, attrKind, modifiedType, equivalentType);
4771
4772 Types.push_back(type);
4773 AttributedTypes.InsertNode(N: type, InsertPos: insertPos);
4774
4775 return QualType(type, 0);
4776}
4777
4778QualType ASTContext::getBTFTagAttributedType(const BTFTypeTagAttr *BTFAttr,
4779 QualType Wrapped) {
4780 llvm::FoldingSetNodeID ID;
4781 BTFTagAttributedType::Profile(ID, Wrapped, BTFAttr);
4782
4783 void *InsertPos = nullptr;
4784 BTFTagAttributedType *Ty =
4785 BTFTagAttributedTypes.FindNodeOrInsertPos(ID, InsertPos);
4786 if (Ty)
4787 return QualType(Ty, 0);
4788
4789 QualType Canon = getCanonicalType(T: Wrapped);
4790 Ty = new (*this, alignof(BTFTagAttributedType))
4791 BTFTagAttributedType(Canon, Wrapped, BTFAttr);
4792
4793 Types.push_back(Ty);
4794 BTFTagAttributedTypes.InsertNode(N: Ty, InsertPos);
4795
4796 return QualType(Ty, 0);
4797}
4798
4799/// Retrieve a substitution-result type.
4800QualType ASTContext::getSubstTemplateTypeParmType(
4801 QualType Replacement, Decl *AssociatedDecl, unsigned Index,
4802 std::optional<unsigned> PackIndex) const {
4803 llvm::FoldingSetNodeID ID;
4804 SubstTemplateTypeParmType::Profile(ID, Replacement, AssociatedDecl, Index,
4805 PackIndex);
4806 void *InsertPos = nullptr;
4807 SubstTemplateTypeParmType *SubstParm =
4808 SubstTemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
4809
4810 if (!SubstParm) {
4811 void *Mem = Allocate(SubstTemplateTypeParmType::totalSizeToAlloc<QualType>(
4812 !Replacement.isCanonical()),
4813 alignof(SubstTemplateTypeParmType));
4814 SubstParm = new (Mem) SubstTemplateTypeParmType(Replacement, AssociatedDecl,
4815 Index, PackIndex);
4816 Types.push_back(SubstParm);
4817 SubstTemplateTypeParmTypes.InsertNode(N: SubstParm, InsertPos);
4818 }
4819
4820 return QualType(SubstParm, 0);
4821}
4822
4823/// Retrieve a
4824QualType
4825ASTContext::getSubstTemplateTypeParmPackType(Decl *AssociatedDecl,
4826 unsigned Index, bool Final,
4827 const TemplateArgument &ArgPack) {
4828#ifndef NDEBUG
4829 for (const auto &P : ArgPack.pack_elements())
4830 assert(P.getKind() == TemplateArgument::Type && "Pack contains a non-type");
4831#endif
4832
4833 llvm::FoldingSetNodeID ID;
4834 SubstTemplateTypeParmPackType::Profile(ID, AssociatedDecl, Index, Final,
4835 ArgPack);
4836 void *InsertPos = nullptr;
4837 if (SubstTemplateTypeParmPackType *SubstParm =
4838 SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos))
4839 return QualType(SubstParm, 0);
4840
4841 QualType Canon;
4842 {
4843 TemplateArgument CanonArgPack = getCanonicalTemplateArgument(Arg: ArgPack);
4844 if (!AssociatedDecl->isCanonicalDecl() ||
4845 !CanonArgPack.structurallyEquals(Other: ArgPack)) {
4846 Canon = getSubstTemplateTypeParmPackType(
4847 AssociatedDecl: AssociatedDecl->getCanonicalDecl(), Index, Final, ArgPack: CanonArgPack);
4848 [[maybe_unused]] const auto *Nothing =
4849 SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos);
4850 assert(!Nothing);
4851 }
4852 }
4853
4854 auto *SubstParm = new (*this, alignof(SubstTemplateTypeParmPackType))
4855 SubstTemplateTypeParmPackType(Canon, AssociatedDecl, Index, Final,
4856 ArgPack);
4857 Types.push_back(SubstParm);
4858 SubstTemplateTypeParmPackTypes.InsertNode(N: SubstParm, InsertPos);
4859 return QualType(SubstParm, 0);
4860}
4861
4862/// Retrieve the template type parameter type for a template
4863/// parameter or parameter pack with the given depth, index, and (optionally)
4864/// name.
4865QualType ASTContext::getTemplateTypeParmType(unsigned Depth, unsigned Index,
4866 bool ParameterPack,
4867 TemplateTypeParmDecl *TTPDecl) const {
4868 llvm::FoldingSetNodeID ID;
4869 TemplateTypeParmType::Profile(ID, Depth, Index, ParameterPack, TTPDecl);
4870 void *InsertPos = nullptr;
4871 TemplateTypeParmType *TypeParm
4872 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
4873
4874 if (TypeParm)
4875 return QualType(TypeParm, 0);
4876
4877 if (TTPDecl) {
4878 QualType Canon = getTemplateTypeParmType(Depth, Index, ParameterPack);
4879 TypeParm = new (*this, alignof(TemplateTypeParmType))
4880 TemplateTypeParmType(TTPDecl, Canon);
4881
4882 TemplateTypeParmType *TypeCheck
4883 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
4884 assert(!TypeCheck && "Template type parameter canonical type broken");
4885 (void)TypeCheck;
4886 } else
4887 TypeParm = new (*this, alignof(TemplateTypeParmType))
4888 TemplateTypeParmType(Depth, Index, ParameterPack);
4889
4890 Types.push_back(TypeParm);
4891 TemplateTypeParmTypes.InsertNode(N: TypeParm, InsertPos);
4892
4893 return QualType(TypeParm, 0);
4894}
4895
4896TypeSourceInfo *
4897ASTContext::getTemplateSpecializationTypeInfo(TemplateName Name,
4898 SourceLocation NameLoc,
4899 const TemplateArgumentListInfo &Args,
4900 QualType Underlying) const {
4901 assert(!Name.getAsDependentTemplateName() &&
4902 "No dependent template names here!");
4903 QualType TST =
4904 getTemplateSpecializationType(T: Name, Args: Args.arguments(), Canon: Underlying);
4905
4906 TypeSourceInfo *DI = CreateTypeSourceInfo(T: TST);
4907 TemplateSpecializationTypeLoc TL =
4908 DI->getTypeLoc().castAs<TemplateSpecializationTypeLoc>();
4909 TL.setTemplateKeywordLoc(SourceLocation());
4910 TL.setTemplateNameLoc(NameLoc);
4911 TL.setLAngleLoc(Args.getLAngleLoc());
4912 TL.setRAngleLoc(Args.getRAngleLoc());
4913 for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i)
4914 TL.setArgLocInfo(i, AI: Args[i].getLocInfo());
4915 return DI;
4916}
4917
4918QualType
4919ASTContext::getTemplateSpecializationType(TemplateName Template,
4920 ArrayRef<TemplateArgumentLoc> Args,
4921 QualType Underlying) const {
4922 assert(!Template.getAsDependentTemplateName() &&
4923 "No dependent template names here!");
4924
4925 SmallVector<TemplateArgument, 4> ArgVec;
4926 ArgVec.reserve(N: Args.size());
4927 for (const TemplateArgumentLoc &Arg : Args)
4928 ArgVec.push_back(Elt: Arg.getArgument());
4929
4930 return getTemplateSpecializationType(T: Template, Args: ArgVec, Canon: Underlying);
4931}
4932
4933#ifndef NDEBUG
4934static bool hasAnyPackExpansions(ArrayRef<TemplateArgument> Args) {
4935 for (const TemplateArgument &Arg : Args)
4936 if (Arg.isPackExpansion())
4937 return true;
4938
4939 return true;
4940}
4941#endif
4942
4943QualType
4944ASTContext::getTemplateSpecializationType(TemplateName Template,
4945 ArrayRef<TemplateArgument> Args,
4946 QualType Underlying) const {
4947 assert(!Template.getAsDependentTemplateName() &&
4948 "No dependent template names here!");
4949 // Look through qualified template names.
4950 if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
4951 Template = QTN->getUnderlyingTemplate();
4952
4953 const auto *TD = Template.getAsTemplateDecl();
4954 bool IsTypeAlias = TD && TD->isTypeAlias();
4955 QualType CanonType;
4956 if (!Underlying.isNull())
4957 CanonType = getCanonicalType(T: Underlying);
4958 else {
4959 // We can get here with an alias template when the specialization contains
4960 // a pack expansion that does not match up with a parameter pack.
4961 assert((!IsTypeAlias || hasAnyPackExpansions(Args)) &&
4962 "Caller must compute aliased type");
4963 IsTypeAlias = false;
4964 CanonType = getCanonicalTemplateSpecializationType(T: Template, Args);
4965 }
4966
4967 // Allocate the (non-canonical) template specialization type, but don't
4968 // try to unique it: these types typically have location information that
4969 // we don't unique and don't want to lose.
4970 void *Mem = Allocate(Size: sizeof(TemplateSpecializationType) +
4971 sizeof(TemplateArgument) * Args.size() +
4972 (IsTypeAlias ? sizeof(QualType) : 0),
4973 Align: alignof(TemplateSpecializationType));
4974 auto *Spec
4975 = new (Mem) TemplateSpecializationType(Template, Args, CanonType,
4976 IsTypeAlias ? Underlying : QualType());
4977
4978 Types.push_back(Spec);
4979 return QualType(Spec, 0);
4980}
4981
4982QualType ASTContext::getCanonicalTemplateSpecializationType(
4983 TemplateName Template, ArrayRef<TemplateArgument> Args) const {
4984 assert(!Template.getAsDependentTemplateName() &&
4985 "No dependent template names here!");
4986
4987 // Look through qualified template names.
4988 if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
4989 Template = TemplateName(QTN->getUnderlyingTemplate());
4990
4991 // Build the canonical template specialization type.
4992 TemplateName CanonTemplate = getCanonicalTemplateName(Name: Template);
4993 bool AnyNonCanonArgs = false;
4994 auto CanonArgs =
4995 ::getCanonicalTemplateArguments(C: *this, Args, AnyNonCanonArgs);
4996
4997 // Determine whether this canonical template specialization type already
4998 // exists.
4999 llvm::FoldingSetNodeID ID;
5000 TemplateSpecializationType::Profile(ID, T: CanonTemplate,
5001 Args: CanonArgs, Context: *this);
5002
5003 void *InsertPos = nullptr;
5004 TemplateSpecializationType *Spec
5005 = TemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
5006
5007 if (!Spec) {
5008 // Allocate a new canonical template specialization type.
5009 void *Mem = Allocate(Size: (sizeof(TemplateSpecializationType) +
5010 sizeof(TemplateArgument) * CanonArgs.size()),
5011 Align: alignof(TemplateSpecializationType));
5012 Spec = new (Mem) TemplateSpecializationType(CanonTemplate,
5013 CanonArgs,
5014 QualType(), QualType());
5015 Types.push_back(Spec);
5016 TemplateSpecializationTypes.InsertNode(N: Spec, InsertPos);
5017 }
5018
5019 assert(Spec->isDependentType() &&
5020 "Non-dependent template-id type must have a canonical type");
5021 return QualType(Spec, 0);
5022}
5023
5024QualType ASTContext::getElaboratedType(ElaboratedTypeKeyword Keyword,
5025 NestedNameSpecifier *NNS,
5026 QualType NamedType,
5027 TagDecl *OwnedTagDecl) const {
5028 llvm::FoldingSetNodeID ID;
5029 ElaboratedType::Profile(ID, Keyword, NNS, NamedType, OwnedTagDecl);
5030
5031 void *InsertPos = nullptr;
5032 ElaboratedType *T = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos);
5033 if (T)
5034 return QualType(T, 0);
5035
5036 QualType Canon = NamedType;
5037 if (!Canon.isCanonical()) {
5038 Canon = getCanonicalType(T: NamedType);
5039 ElaboratedType *CheckT = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos);
5040 assert(!CheckT && "Elaborated canonical type broken");
5041 (void)CheckT;
5042 }
5043
5044 void *Mem =
5045 Allocate(ElaboratedType::totalSizeToAlloc<TagDecl *>(!!OwnedTagDecl),
5046 alignof(ElaboratedType));
5047 T = new (Mem) ElaboratedType(Keyword, NNS, NamedType, Canon, OwnedTagDecl);
5048
5049 Types.push_back(T);
5050 ElaboratedTypes.InsertNode(N: T, InsertPos);
5051 return QualType(T, 0);
5052}
5053
5054QualType
5055ASTContext::getParenType(QualType InnerType) const {
5056 llvm::FoldingSetNodeID ID;
5057 ParenType::Profile(ID, Inner: InnerType);
5058
5059 void *InsertPos = nullptr;
5060 ParenType *T = ParenTypes.FindNodeOrInsertPos(ID, InsertPos);
5061 if (T)
5062 return QualType(T, 0);
5063
5064 QualType Canon = InnerType;
5065 if (!Canon.isCanonical()) {
5066 Canon = getCanonicalType(T: InnerType);
5067 ParenType *CheckT = ParenTypes.FindNodeOrInsertPos(ID, InsertPos);
5068 assert(!CheckT && "Paren canonical type broken");
5069 (void)CheckT;
5070 }
5071
5072 T = new (*this, alignof(ParenType)) ParenType(InnerType, Canon);
5073 Types.push_back(T);
5074 ParenTypes.InsertNode(N: T, InsertPos);
5075 return QualType(T, 0);
5076}
5077
5078QualType
5079ASTContext::getMacroQualifiedType(QualType UnderlyingTy,
5080 const IdentifierInfo *MacroII) const {
5081 QualType Canon = UnderlyingTy;
5082 if (!Canon.isCanonical())
5083 Canon = getCanonicalType(T: UnderlyingTy);
5084
5085 auto *newType = new (*this, alignof(MacroQualifiedType))
5086 MacroQualifiedType(UnderlyingTy, Canon, MacroII);
5087 Types.push_back(newType);
5088 return QualType(newType, 0);
5089}
5090
5091QualType ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword,
5092 NestedNameSpecifier *NNS,
5093 const IdentifierInfo *Name,
5094 QualType Canon) const {
5095 if (Canon.isNull()) {
5096 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS);
5097 if (CanonNNS != NNS)
5098 Canon = getDependentNameType(Keyword, NNS: CanonNNS, Name);
5099 }
5100
5101 llvm::FoldingSetNodeID ID;
5102 DependentNameType::Profile(ID, Keyword, NNS, Name);
5103
5104 void *InsertPos = nullptr;
5105 DependentNameType *T
5106 = DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos);
5107 if (T)
5108 return QualType(T, 0);
5109
5110 T = new (*this, alignof(DependentNameType))
5111 DependentNameType(Keyword, NNS, Name, Canon);
5112 Types.push_back(T);
5113 DependentNameTypes.InsertNode(N: T, InsertPos);
5114 return QualType(T, 0);
5115}
5116
5117QualType ASTContext::getDependentTemplateSpecializationType(
5118 ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS,
5119 const IdentifierInfo *Name, ArrayRef<TemplateArgumentLoc> Args) const {
5120 // TODO: avoid this copy
5121 SmallVector<TemplateArgument, 16> ArgCopy;
5122 for (unsigned I = 0, E = Args.size(); I != E; ++I)
5123 ArgCopy.push_back(Elt: Args[I].getArgument());
5124 return getDependentTemplateSpecializationType(Keyword, NNS, Name, Args: ArgCopy);
5125}
5126
5127QualType
5128ASTContext::getDependentTemplateSpecializationType(
5129 ElaboratedTypeKeyword Keyword,
5130 NestedNameSpecifier *NNS,
5131 const IdentifierInfo *Name,
5132 ArrayRef<TemplateArgument> Args) const {
5133 assert((!NNS || NNS->isDependent()) &&
5134 "nested-name-specifier must be dependent");
5135
5136 llvm::FoldingSetNodeID ID;
5137 DependentTemplateSpecializationType::Profile(ID, Context: *this, Keyword, Qualifier: NNS,
5138 Name, Args);
5139
5140 void *InsertPos = nullptr;
5141 DependentTemplateSpecializationType *T
5142 = DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
5143 if (T)
5144 return QualType(T, 0);
5145
5146 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS);
5147
5148 ElaboratedTypeKeyword CanonKeyword = Keyword;
5149 if (Keyword == ElaboratedTypeKeyword::None)
5150 CanonKeyword = ElaboratedTypeKeyword::Typename;
5151
5152 bool AnyNonCanonArgs = false;
5153 auto CanonArgs =
5154 ::getCanonicalTemplateArguments(C: *this, Args, AnyNonCanonArgs);
5155
5156 QualType Canon;
5157 if (AnyNonCanonArgs || CanonNNS != NNS || CanonKeyword != Keyword) {
5158 Canon = getDependentTemplateSpecializationType(Keyword: CanonKeyword, NNS: CanonNNS,
5159 Name,
5160 Args: CanonArgs);
5161
5162 // Find the insert position again.
5163 [[maybe_unused]] auto *Nothing =
5164 DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
5165 assert(!Nothing && "canonical type broken");
5166 }
5167
5168 void *Mem = Allocate(Size: (sizeof(DependentTemplateSpecializationType) +
5169 sizeof(TemplateArgument) * Args.size()),
5170 Align: alignof(DependentTemplateSpecializationType));
5171 T = new (Mem) DependentTemplateSpecializationType(Keyword, NNS,
5172 Name, Args, Canon);
5173 Types.push_back(T);
5174 DependentTemplateSpecializationTypes.InsertNode(N: T, InsertPos);
5175 return QualType(T, 0);
5176}
5177
5178TemplateArgument ASTContext::getInjectedTemplateArg(NamedDecl *Param) {
5179 TemplateArgument Arg;
5180 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Val: Param)) {
5181 QualType ArgType = getTypeDeclType(TTP);
5182 if (TTP->isParameterPack())
5183 ArgType = getPackExpansionType(Pattern: ArgType, NumExpansions: std::nullopt);
5184
5185 Arg = TemplateArgument(ArgType);
5186 } else if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Val: Param)) {
5187 QualType T =
5188 NTTP->getType().getNonPackExpansionType().getNonLValueExprType(*this);
5189 // For class NTTPs, ensure we include the 'const' so the type matches that
5190 // of a real template argument.
5191 // FIXME: It would be more faithful to model this as something like an
5192 // lvalue-to-rvalue conversion applied to a const-qualified lvalue.
5193 if (T->isRecordType())
5194 T.addConst();
5195 Expr *E = new (*this) DeclRefExpr(
5196 *this, NTTP, /*RefersToEnclosingVariableOrCapture*/ false, T,
5197 Expr::getValueKindForType(T: NTTP->getType()), NTTP->getLocation());
5198
5199 if (NTTP->isParameterPack())
5200 E = new (*this)
5201 PackExpansionExpr(DependentTy, E, NTTP->getLocation(), std::nullopt);
5202 Arg = TemplateArgument(E);
5203 } else {
5204 auto *TTP = cast<TemplateTemplateParmDecl>(Val: Param);
5205 if (TTP->isParameterPack())
5206 Arg = TemplateArgument(TemplateName(TTP), std::optional<unsigned>());
5207 else
5208 Arg = TemplateArgument(TemplateName(TTP));
5209 }
5210
5211 if (Param->isTemplateParameterPack())
5212 Arg = TemplateArgument::CreatePackCopy(Context&: *this, Args: Arg);
5213
5214 return Arg;
5215}
5216
5217void
5218ASTContext::getInjectedTemplateArgs(const TemplateParameterList *Params,
5219 SmallVectorImpl<TemplateArgument> &Args) {
5220 Args.reserve(N: Args.size() + Params->size());
5221
5222 for (NamedDecl *Param : *Params)
5223 Args.push_back(Elt: getInjectedTemplateArg(Param));
5224}
5225
5226QualType ASTContext::getPackExpansionType(QualType Pattern,
5227 std::optional<unsigned> NumExpansions,
5228 bool ExpectPackInType) {
5229 assert((!ExpectPackInType || Pattern->containsUnexpandedParameterPack()) &&
5230 "Pack expansions must expand one or more parameter packs");
5231
5232 llvm::FoldingSetNodeID ID;
5233 PackExpansionType::Profile(ID, Pattern, NumExpansions);
5234
5235 void *InsertPos = nullptr;
5236 PackExpansionType *T = PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
5237 if (T)
5238 return QualType(T, 0);
5239
5240 QualType Canon;
5241 if (!Pattern.isCanonical()) {
5242 Canon = getPackExpansionType(Pattern: getCanonicalType(T: Pattern), NumExpansions,
5243 /*ExpectPackInType=*/false);
5244
5245 // Find the insert position again, in case we inserted an element into
5246 // PackExpansionTypes and invalidated our insert position.
5247 PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
5248 }
5249
5250 T = new (*this, alignof(PackExpansionType))
5251 PackExpansionType(Pattern, Canon, NumExpansions);
5252 Types.push_back(T);
5253 PackExpansionTypes.InsertNode(N: T, InsertPos);
5254 return QualType(T, 0);
5255}
5256
5257/// CmpProtocolNames - Comparison predicate for sorting protocols
5258/// alphabetically.
5259static int CmpProtocolNames(ObjCProtocolDecl *const *LHS,
5260 ObjCProtocolDecl *const *RHS) {
5261 return DeclarationName::compare(LHS: (*LHS)->getDeclName(), RHS: (*RHS)->getDeclName());
5262}
5263
5264static bool areSortedAndUniqued(ArrayRef<ObjCProtocolDecl *> Protocols) {
5265 if (Protocols.empty()) return true;
5266
5267 if (Protocols[0]->getCanonicalDecl() != Protocols[0])
5268 return false;
5269
5270 for (unsigned i = 1; i != Protocols.size(); ++i)
5271 if (CmpProtocolNames(LHS: &Protocols[i - 1], RHS: &Protocols[i]) >= 0 ||
5272 Protocols[i]->getCanonicalDecl() != Protocols[i])
5273 return false;
5274 return true;
5275}
5276
5277static void
5278SortAndUniqueProtocols(SmallVectorImpl<ObjCProtocolDecl *> &Protocols) {
5279 // Sort protocols, keyed by name.
5280 llvm::array_pod_sort(Start: Protocols.begin(), End: Protocols.end(), Compare: CmpProtocolNames);
5281
5282 // Canonicalize.
5283 for (ObjCProtocolDecl *&P : Protocols)
5284 P = P->getCanonicalDecl();
5285
5286 // Remove duplicates.
5287 auto ProtocolsEnd = std::unique(first: Protocols.begin(), last: Protocols.end());
5288 Protocols.erase(CS: ProtocolsEnd, CE: Protocols.end());
5289}
5290
5291QualType ASTContext::getObjCObjectType(QualType BaseType,
5292 ObjCProtocolDecl * const *Protocols,
5293 unsigned NumProtocols) const {
5294 return getObjCObjectType(Base: BaseType, typeArgs: {},
5295 protocols: llvm::ArrayRef(Protocols, NumProtocols),
5296 /*isKindOf=*/false);
5297}
5298
5299QualType ASTContext::getObjCObjectType(
5300 QualType baseType,
5301 ArrayRef<QualType> typeArgs,
5302 ArrayRef<ObjCProtocolDecl *> protocols,
5303 bool isKindOf) const {
5304 // If the base type is an interface and there aren't any protocols or
5305 // type arguments to add, then the interface type will do just fine.
5306 if (typeArgs.empty() && protocols.empty() && !isKindOf &&
5307 isa<ObjCInterfaceType>(Val: baseType))
5308 return baseType;
5309
5310 // Look in the folding set for an existing type.
5311 llvm::FoldingSetNodeID ID;
5312 ObjCObjectTypeImpl::Profile(ID, Base: baseType, typeArgs, protocols, isKindOf);
5313 void *InsertPos = nullptr;
5314 if (ObjCObjectType *QT = ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos))
5315 return QualType(QT, 0);
5316
5317 // Determine the type arguments to be used for canonicalization,
5318 // which may be explicitly specified here or written on the base
5319 // type.
5320 ArrayRef<QualType> effectiveTypeArgs = typeArgs;
5321 if (effectiveTypeArgs.empty()) {
5322 if (const auto *baseObject = baseType->getAs<ObjCObjectType>())
5323 effectiveTypeArgs = baseObject->getTypeArgs();
5324 }
5325
5326 // Build the canonical type, which has the canonical base type and a
5327 // sorted-and-uniqued list of protocols and the type arguments
5328 // canonicalized.
5329 QualType canonical;
5330 bool typeArgsAreCanonical = llvm::all_of(
5331 Range&: effectiveTypeArgs, P: [&](QualType type) { return type.isCanonical(); });
5332 bool protocolsSorted = areSortedAndUniqued(Protocols: protocols);
5333 if (!typeArgsAreCanonical || !protocolsSorted || !baseType.isCanonical()) {
5334 // Determine the canonical type arguments.
5335 ArrayRef<QualType> canonTypeArgs;
5336 SmallVector<QualType, 4> canonTypeArgsVec;
5337 if (!typeArgsAreCanonical) {
5338 canonTypeArgsVec.reserve(N: effectiveTypeArgs.size());
5339 for (auto typeArg : effectiveTypeArgs)
5340 canonTypeArgsVec.push_back(Elt: getCanonicalType(T: typeArg));
5341 canonTypeArgs = canonTypeArgsVec;
5342 } else {
5343 canonTypeArgs = effectiveTypeArgs;
5344 }
5345
5346 ArrayRef<ObjCProtocolDecl *> canonProtocols;
5347 SmallVector<ObjCProtocolDecl*, 8> canonProtocolsVec;
5348 if (!protocolsSorted) {
5349 canonProtocolsVec.append(in_start: protocols.begin(), in_end: protocols.end());
5350 SortAndUniqueProtocols(Protocols&: canonProtocolsVec);
5351 canonProtocols = canonProtocolsVec;
5352 } else {
5353 canonProtocols = protocols;
5354 }
5355
5356 canonical = getObjCObjectType(baseType: getCanonicalType(T: baseType), typeArgs: canonTypeArgs,
5357 protocols: canonProtocols, isKindOf);
5358
5359 // Regenerate InsertPos.
5360 ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos);
5361 }
5362
5363 unsigned size = sizeof(ObjCObjectTypeImpl);
5364 size += typeArgs.size() * sizeof(QualType);
5365 size += protocols.size() * sizeof(ObjCProtocolDecl *);
5366 void *mem = Allocate(Size: size, Align: alignof(ObjCObjectTypeImpl));
5367 auto *T =
5368 new (mem) ObjCObjectTypeImpl(canonical, baseType, typeArgs, protocols,
5369 isKindOf);
5370
5371 Types.push_back(T);
5372 ObjCObjectTypes.InsertNode(N: T, InsertPos);
5373 return QualType(T, 0);
5374}
5375
5376/// Apply Objective-C protocol qualifiers to the given type.
5377/// If this is for the canonical type of a type parameter, we can apply
5378/// protocol qualifiers on the ObjCObjectPointerType.
5379QualType
5380ASTContext::applyObjCProtocolQualifiers(QualType type,
5381 ArrayRef<ObjCProtocolDecl *> protocols, bool &hasError,
5382 bool allowOnPointerType) const {
5383 hasError = false;
5384
5385 if (const auto *objT = dyn_cast<ObjCTypeParamType>(Val: type.getTypePtr())) {
5386 return getObjCTypeParamType(Decl: objT->getDecl(), protocols);
5387 }
5388
5389 // Apply protocol qualifiers to ObjCObjectPointerType.
5390 if (allowOnPointerType) {
5391 if (const auto *objPtr =
5392 dyn_cast<ObjCObjectPointerType>(Val: type.getTypePtr())) {
5393 const ObjCObjectType *objT = objPtr->getObjectType();
5394 // Merge protocol lists and construct ObjCObjectType.
5395 SmallVector<ObjCProtocolDecl*, 8> protocolsVec;
5396 protocolsVec.append(objT->qual_begin(),
5397 objT->qual_end());
5398 protocolsVec.append(in_start: protocols.begin(), in_end: protocols.end());
5399 ArrayRef<ObjCProtocolDecl *> protocols = protocolsVec;
5400 type = getObjCObjectType(
5401 baseType: objT->getBaseType(),
5402 typeArgs: objT->getTypeArgsAsWritten(),
5403 protocols,
5404 isKindOf: objT->isKindOfTypeAsWritten());
5405 return getObjCObjectPointerType(OIT: type);
5406 }
5407 }
5408
5409 // Apply protocol qualifiers to ObjCObjectType.
5410 if (const auto *objT = dyn_cast<ObjCObjectType>(Val: type.getTypePtr())){
5411 // FIXME: Check for protocols to which the class type is already
5412 // known to conform.
5413
5414 return getObjCObjectType(baseType: objT->getBaseType(),
5415 typeArgs: objT->getTypeArgsAsWritten(),
5416 protocols,
5417 isKindOf: objT->isKindOfTypeAsWritten());
5418 }
5419
5420 // If the canonical type is ObjCObjectType, ...
5421 if (type->isObjCObjectType()) {
5422 // Silently overwrite any existing protocol qualifiers.
5423 // TODO: determine whether that's the right thing to do.
5424
5425 // FIXME: Check for protocols to which the class type is already
5426 // known to conform.
5427 return getObjCObjectType(baseType: type, typeArgs: {}, protocols, isKindOf: false);
5428 }
5429
5430 // id<protocol-list>
5431 if (type->isObjCIdType()) {
5432 const auto *objPtr = type->castAs<ObjCObjectPointerType>();
5433 type = getObjCObjectType(ObjCBuiltinIdTy, {}, protocols,
5434 objPtr->isKindOfType());
5435 return getObjCObjectPointerType(OIT: type);
5436 }
5437
5438 // Class<protocol-list>
5439 if (type->isObjCClassType()) {
5440 const auto *objPtr = type->castAs<ObjCObjectPointerType>();
5441 type = getObjCObjectType(ObjCBuiltinClassTy, {}, protocols,
5442 objPtr->isKindOfType());
5443 return getObjCObjectPointerType(OIT: type);
5444 }
5445
5446 hasError = true;
5447 return type;
5448}
5449
5450QualType
5451ASTContext::getObjCTypeParamType(const ObjCTypeParamDecl *Decl,
5452 ArrayRef<ObjCProtocolDecl *> protocols) const {
5453 // Look in the folding set for an existing type.
5454 llvm::FoldingSetNodeID ID;
5455 ObjCTypeParamType::Profile(ID, Decl, Decl->getUnderlyingType(), protocols);
5456 void *InsertPos = nullptr;
5457 if (ObjCTypeParamType *TypeParam =
5458 ObjCTypeParamTypes.FindNodeOrInsertPos(ID, InsertPos))
5459 return QualType(TypeParam, 0);
5460
5461 // We canonicalize to the underlying type.
5462 QualType Canonical = getCanonicalType(Decl->getUnderlyingType());
5463 if (!protocols.empty()) {
5464 // Apply the protocol qualifers.
5465 bool hasError;
5466 Canonical = getCanonicalType(T: applyObjCProtocolQualifiers(
5467 type: Canonical, protocols, hasError, allowOnPointerType: true /*allowOnPointerType*/));
5468 assert(!hasError && "Error when apply protocol qualifier to bound type");
5469 }
5470
5471 unsigned size = sizeof(ObjCTypeParamType);
5472 size += protocols.size() * sizeof(ObjCProtocolDecl *);
5473 void *mem = Allocate(Size: size, Align: alignof(ObjCTypeParamType));
5474 auto *newType = new (mem) ObjCTypeParamType(Decl, Canonical, protocols);
5475
5476 Types.push_back(Elt: newType);
5477 ObjCTypeParamTypes.InsertNode(newType, InsertPos);
5478 return QualType(newType, 0);
5479}
5480
5481void ASTContext::adjustObjCTypeParamBoundType(const ObjCTypeParamDecl *Orig,
5482 ObjCTypeParamDecl *New) const {
5483 New->setTypeSourceInfo(getTrivialTypeSourceInfo(T: Orig->getUnderlyingType()));
5484 // Update TypeForDecl after updating TypeSourceInfo.
5485 auto NewTypeParamTy = cast<ObjCTypeParamType>(New->getTypeForDecl());
5486 SmallVector<ObjCProtocolDecl *, 8> protocols;
5487 protocols.append(NewTypeParamTy->qual_begin(), NewTypeParamTy->qual_end());
5488 QualType UpdatedTy = getObjCTypeParamType(Decl: New, protocols);
5489 New->setTypeForDecl(UpdatedTy.getTypePtr());
5490}
5491
5492/// ObjCObjectAdoptsQTypeProtocols - Checks that protocols in IC's
5493/// protocol list adopt all protocols in QT's qualified-id protocol
5494/// list.
5495bool ASTContext::ObjCObjectAdoptsQTypeProtocols(QualType QT,
5496 ObjCInterfaceDecl *IC) {
5497 if (!QT->isObjCQualifiedIdType())
5498 return false;
5499
5500 if (const auto *OPT = QT->getAs<ObjCObjectPointerType>()) {
5501 // If both the right and left sides have qualifiers.
5502 for (auto *Proto : OPT->quals()) {
5503 if (!IC->ClassImplementsProtocol(Proto, false))
5504 return false;
5505 }
5506 return true;
5507 }
5508 return false;
5509}
5510
5511/// QIdProtocolsAdoptObjCObjectProtocols - Checks that protocols in
5512/// QT's qualified-id protocol list adopt all protocols in IDecl's list
5513/// of protocols.
5514bool ASTContext::QIdProtocolsAdoptObjCObjectProtocols(QualType QT,
5515 ObjCInterfaceDecl *IDecl) {
5516 if (!QT->isObjCQualifiedIdType())
5517 return false;
5518 const auto *OPT = QT->getAs<ObjCObjectPointerType>();
5519 if (!OPT)
5520 return false;
5521 if (!IDecl->hasDefinition())
5522 return false;
5523 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> InheritedProtocols;
5524 CollectInheritedProtocols(IDecl, InheritedProtocols);
5525 if (InheritedProtocols.empty())
5526 return false;
5527 // Check that if every protocol in list of id<plist> conforms to a protocol
5528 // of IDecl's, then bridge casting is ok.
5529 bool Conforms = false;
5530 for (auto *Proto : OPT->quals()) {
5531 Conforms = false;
5532 for (auto *PI : InheritedProtocols) {
5533 if (ProtocolCompatibleWithProtocol(Proto, PI)) {
5534 Conforms = true;
5535 break;
5536 }
5537 }
5538 if (!Conforms)
5539 break;
5540 }
5541 if (Conforms)
5542 return true;
5543
5544 for (auto *PI : InheritedProtocols) {
5545 // If both the right and left sides have qualifiers.
5546 bool Adopts = false;
5547 for (auto *Proto : OPT->quals()) {
5548 // return 'true' if 'PI' is in the inheritance hierarchy of Proto
5549 if ((Adopts = ProtocolCompatibleWithProtocol(PI, Proto)))
5550 break;
5551 }
5552 if (!Adopts)
5553 return false;
5554 }
5555 return true;
5556}
5557
5558/// getObjCObjectPointerType - Return a ObjCObjectPointerType type for
5559/// the given object type.
5560QualType ASTContext::getObjCObjectPointerType(QualType ObjectT) const {
5561 llvm::FoldingSetNodeID ID;
5562 ObjCObjectPointerType::Profile(ID, T: ObjectT);
5563
5564 void *InsertPos = nullptr;
5565 if (ObjCObjectPointerType *QT =
5566 ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
5567 return QualType(QT, 0);
5568
5569 // Find the canonical object type.
5570 QualType Canonical;
5571 if (!ObjectT.isCanonical()) {
5572 Canonical = getObjCObjectPointerType(ObjectT: getCanonicalType(T: ObjectT));
5573
5574 // Regenerate InsertPos.
5575 ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
5576 }
5577
5578 // No match.
5579 void *Mem =
5580 Allocate(Size: sizeof(ObjCObjectPointerType), Align: alignof(ObjCObjectPointerType));
5581 auto *QType =
5582 new (Mem) ObjCObjectPointerType(Canonical, ObjectT);
5583
5584 Types.push_back(QType);
5585 ObjCObjectPointerTypes.InsertNode(N: QType, InsertPos);
5586 return QualType(QType, 0);
5587}
5588
5589/// getObjCInterfaceType - Return the unique reference to the type for the
5590/// specified ObjC interface decl. The list of protocols is optional.
5591QualType ASTContext::getObjCInterfaceType(const ObjCInterfaceDecl *Decl,
5592 ObjCInterfaceDecl *PrevDecl) const {
5593 if (Decl->TypeForDecl)
5594 return QualType(Decl->TypeForDecl, 0);
5595
5596 if (PrevDecl) {
5597 assert(PrevDecl->TypeForDecl && "previous decl has no TypeForDecl");
5598 Decl->TypeForDecl = PrevDecl->TypeForDecl;
5599 return QualType(PrevDecl->TypeForDecl, 0);
5600 }
5601
5602 // Prefer the definition, if there is one.
5603 if (const ObjCInterfaceDecl *Def = Decl->getDefinition())
5604 Decl = Def;
5605
5606 void *Mem = Allocate(Size: sizeof(ObjCInterfaceType), Align: alignof(ObjCInterfaceType));
5607 auto *T = new (Mem) ObjCInterfaceType(Decl);
5608 Decl->TypeForDecl = T;
5609 Types.push_back(T);
5610 return QualType(T, 0);
5611}
5612
5613/// getTypeOfExprType - Unlike many "get<Type>" functions, we can't unique
5614/// TypeOfExprType AST's (since expression's are never shared). For example,
5615/// multiple declarations that refer to "typeof(x)" all contain different
5616/// DeclRefExpr's. This doesn't effect the type checker, since it operates
5617/// on canonical type's (which are always unique).
5618QualType ASTContext::getTypeOfExprType(Expr *tofExpr, TypeOfKind Kind) const {
5619 TypeOfExprType *toe;
5620 if (tofExpr->isTypeDependent()) {
5621 llvm::FoldingSetNodeID ID;
5622 DependentTypeOfExprType::Profile(ID, Context: *this, E: tofExpr,
5623 IsUnqual: Kind == TypeOfKind::Unqualified);
5624
5625 void *InsertPos = nullptr;
5626 DependentTypeOfExprType *Canon =
5627 DependentTypeOfExprTypes.FindNodeOrInsertPos(ID, InsertPos);
5628 if (Canon) {
5629 // We already have a "canonical" version of an identical, dependent
5630 // typeof(expr) type. Use that as our canonical type.
5631 toe = new (*this, alignof(TypeOfExprType))
5632 TypeOfExprType(tofExpr, Kind, QualType((TypeOfExprType *)Canon, 0));
5633 } else {
5634 // Build a new, canonical typeof(expr) type.
5635 Canon = new (*this, alignof(DependentTypeOfExprType))
5636 DependentTypeOfExprType(tofExpr, Kind);
5637 DependentTypeOfExprTypes.InsertNode(N: Canon, InsertPos);
5638 toe = Canon;
5639 }
5640 } else {
5641 QualType Canonical = getCanonicalType(T: tofExpr->getType());
5642 toe = new (*this, alignof(TypeOfExprType))
5643 TypeOfExprType(tofExpr, Kind, Canonical);
5644 }
5645 Types.push_back(toe);
5646 return QualType(toe, 0);
5647}
5648
5649/// getTypeOfType - Unlike many "get<Type>" functions, we don't unique
5650/// TypeOfType nodes. The only motivation to unique these nodes would be
5651/// memory savings. Since typeof(t) is fairly uncommon, space shouldn't be
5652/// an issue. This doesn't affect the type checker, since it operates
5653/// on canonical types (which are always unique).
5654QualType ASTContext::getTypeOfType(QualType tofType, TypeOfKind Kind) const {
5655 QualType Canonical = getCanonicalType(T: tofType);
5656 auto *tot =
5657 new (*this, alignof(TypeOfType)) TypeOfType(tofType, Canonical, Kind);
5658 Types.push_back(tot);
5659 return QualType(tot, 0);
5660}
5661
5662/// getReferenceQualifiedType - Given an expr, will return the type for
5663/// that expression, as in [dcl.type.simple]p4 but without taking id-expressions
5664/// and class member access into account.
5665QualType ASTContext::getReferenceQualifiedType(const Expr *E) const {
5666 // C++11 [dcl.type.simple]p4:
5667 // [...]
5668 QualType T = E->getType();
5669 switch (E->getValueKind()) {
5670 // - otherwise, if e is an xvalue, decltype(e) is T&&, where T is the
5671 // type of e;
5672 case VK_XValue:
5673 return getRValueReferenceType(T);
5674 // - otherwise, if e is an lvalue, decltype(e) is T&, where T is the
5675 // type of e;
5676 case VK_LValue:
5677 return getLValueReferenceType(T);
5678 // - otherwise, decltype(e) is the type of e.
5679 case VK_PRValue:
5680 return T;
5681 }
5682 llvm_unreachable("Unknown value kind");
5683}
5684
5685/// Unlike many "get<Type>" functions, we don't unique DecltypeType
5686/// nodes. This would never be helpful, since each such type has its own
5687/// expression, and would not give a significant memory saving, since there
5688/// is an Expr tree under each such type.
5689QualType ASTContext::getDecltypeType(Expr *e, QualType UnderlyingType) const {
5690 DecltypeType *dt;
5691
5692 // C++11 [temp.type]p2:
5693 // If an expression e involves a template parameter, decltype(e) denotes a
5694 // unique dependent type. Two such decltype-specifiers refer to the same
5695 // type only if their expressions are equivalent (14.5.6.1).
5696 if (e->isInstantiationDependent()) {
5697 llvm::FoldingSetNodeID ID;
5698 DependentDecltypeType::Profile(ID, Context: *this, E: e);
5699
5700 void *InsertPos = nullptr;
5701 DependentDecltypeType *Canon
5702 = DependentDecltypeTypes.FindNodeOrInsertPos(ID, InsertPos);
5703 if (!Canon) {
5704 // Build a new, canonical decltype(expr) type.
5705 Canon = new (*this, alignof(DependentDecltypeType))
5706 DependentDecltypeType(e, DependentTy);
5707 DependentDecltypeTypes.InsertNode(N: Canon, InsertPos);
5708 }
5709 dt = new (*this, alignof(DecltypeType))
5710 DecltypeType(e, UnderlyingType, QualType((DecltypeType *)Canon, 0));
5711 } else {
5712 dt = new (*this, alignof(DecltypeType))
5713 DecltypeType(e, UnderlyingType, getCanonicalType(T: UnderlyingType));
5714 }
5715 Types.push_back(dt);
5716 return QualType(dt, 0);
5717}
5718
5719QualType ASTContext::getPackIndexingType(QualType Pattern, Expr *IndexExpr,
5720 bool FullySubstituted,
5721 ArrayRef<QualType> Expansions,
5722 int Index) const {
5723 QualType Canonical;
5724 if (FullySubstituted && Index != -1) {
5725 Canonical = getCanonicalType(T: Expansions[Index]);
5726 } else {
5727 llvm::FoldingSetNodeID ID;
5728 PackIndexingType::Profile(ID, Context: *this, Pattern, E: IndexExpr);
5729 void *InsertPos = nullptr;
5730 PackIndexingType *Canon =
5731 DependentPackIndexingTypes.FindNodeOrInsertPos(ID, InsertPos);
5732 if (!Canon) {
5733 void *Mem = Allocate(
5734 PackIndexingType::totalSizeToAlloc<QualType>(Expansions.size()),
5735 TypeAlignment);
5736 Canon = new (Mem)
5737 PackIndexingType(*this, QualType(), Pattern, IndexExpr, Expansions);
5738 DependentPackIndexingTypes.InsertNode(N: Canon, InsertPos);
5739 }
5740 Canonical = QualType(Canon, 0);
5741 }
5742
5743 void *Mem =
5744 Allocate(PackIndexingType::totalSizeToAlloc<QualType>(Expansions.size()),
5745 TypeAlignment);
5746 auto *T = new (Mem)
5747 PackIndexingType(*this, Canonical, Pattern, IndexExpr, Expansions);
5748 Types.push_back(T);
5749 return QualType(T, 0);
5750}
5751
5752/// getUnaryTransformationType - We don't unique these, since the memory
5753/// savings are minimal and these are rare.
5754QualType ASTContext::getUnaryTransformType(QualType BaseType,
5755 QualType UnderlyingType,
5756 UnaryTransformType::UTTKind Kind)
5757 const {
5758 UnaryTransformType *ut = nullptr;
5759
5760 if (BaseType->isDependentType()) {
5761 // Look in the folding set for an existing type.
5762 llvm::FoldingSetNodeID ID;
5763 DependentUnaryTransformType::Profile(ID, BaseType: getCanonicalType(T: BaseType), UKind: Kind);
5764
5765 void *InsertPos = nullptr;
5766 DependentUnaryTransformType *Canon
5767 = DependentUnaryTransformTypes.FindNodeOrInsertPos(ID, InsertPos);
5768
5769 if (!Canon) {
5770 // Build a new, canonical __underlying_type(type) type.
5771 Canon = new (*this, alignof(DependentUnaryTransformType))
5772 DependentUnaryTransformType(*this, getCanonicalType(T: BaseType), Kind);
5773 DependentUnaryTransformTypes.InsertNode(N: Canon, InsertPos);
5774 }
5775 ut = new (*this, alignof(UnaryTransformType))
5776 UnaryTransformType(BaseType, QualType(), Kind, QualType(Canon, 0));
5777 } else {
5778 QualType CanonType = getCanonicalType(T: UnderlyingType);
5779 ut = new (*this, alignof(UnaryTransformType))
5780 UnaryTransformType(BaseType, UnderlyingType, Kind, CanonType);
5781 }
5782 Types.push_back(ut);
5783 return QualType(ut, 0);
5784}
5785
5786QualType ASTContext::getAutoTypeInternal(
5787 QualType DeducedType, AutoTypeKeyword Keyword, bool IsDependent,
5788 bool IsPack, ConceptDecl *TypeConstraintConcept,
5789 ArrayRef<TemplateArgument> TypeConstraintArgs, bool IsCanon) const {
5790 if (DeducedType.isNull() && Keyword == AutoTypeKeyword::Auto &&
5791 !TypeConstraintConcept && !IsDependent)
5792 return getAutoDeductType();
5793
5794 // Look in the folding set for an existing type.
5795 void *InsertPos = nullptr;
5796 llvm::FoldingSetNodeID ID;
5797 AutoType::Profile(ID, Context: *this, Deduced: DeducedType, Keyword, IsDependent,
5798 CD: TypeConstraintConcept, Arguments: TypeConstraintArgs);
5799 if (AutoType *AT = AutoTypes.FindNodeOrInsertPos(ID, InsertPos))
5800 return QualType(AT, 0);
5801
5802 QualType Canon;
5803 if (!IsCanon) {
5804 if (!DeducedType.isNull()) {
5805 Canon = DeducedType.getCanonicalType();
5806 } else if (TypeConstraintConcept) {
5807 bool AnyNonCanonArgs = false;
5808 ConceptDecl *CanonicalConcept = TypeConstraintConcept->getCanonicalDecl();
5809 auto CanonicalConceptArgs = ::getCanonicalTemplateArguments(
5810 C: *this, Args: TypeConstraintArgs, AnyNonCanonArgs);
5811 if (CanonicalConcept != TypeConstraintConcept || AnyNonCanonArgs) {
5812 Canon =
5813 getAutoTypeInternal(DeducedType: QualType(), Keyword, IsDependent, IsPack,
5814 TypeConstraintConcept: CanonicalConcept, TypeConstraintArgs: CanonicalConceptArgs, IsCanon: true);
5815 // Find the insert position again.
5816 [[maybe_unused]] auto *Nothing =
5817 AutoTypes.FindNodeOrInsertPos(ID, InsertPos);
5818 assert(!Nothing && "canonical type broken");
5819 }
5820 }
5821 }
5822
5823 void *Mem = Allocate(Size: sizeof(AutoType) +
5824 sizeof(TemplateArgument) * TypeConstraintArgs.size(),
5825 Align: alignof(AutoType));
5826 auto *AT = new (Mem) AutoType(
5827 DeducedType, Keyword,
5828 (IsDependent ? TypeDependence::DependentInstantiation
5829 : TypeDependence::None) |
5830 (IsPack ? TypeDependence::UnexpandedPack : TypeDependence::None),
5831 Canon, TypeConstraintConcept, TypeConstraintArgs);
5832 Types.push_back(Elt: AT);
5833 AutoTypes.InsertNode(AT, InsertPos);
5834 return QualType(AT, 0);
5835}
5836
5837/// getAutoType - Return the uniqued reference to the 'auto' type which has been
5838/// deduced to the given type, or to the canonical undeduced 'auto' type, or the
5839/// canonical deduced-but-dependent 'auto' type.
5840QualType
5841ASTContext::getAutoType(QualType DeducedType, AutoTypeKeyword Keyword,
5842 bool IsDependent, bool IsPack,
5843 ConceptDecl *TypeConstraintConcept,
5844 ArrayRef<TemplateArgument> TypeConstraintArgs) const {
5845 assert((!IsPack || IsDependent) && "only use IsPack for a dependent pack");
5846 assert((!IsDependent || DeducedType.isNull()) &&
5847 "A dependent auto should be undeduced");
5848 return getAutoTypeInternal(DeducedType, Keyword, IsDependent, IsPack,
5849 TypeConstraintConcept, TypeConstraintArgs);
5850}
5851
5852QualType ASTContext::getUnconstrainedType(QualType T) const {
5853 QualType CanonT = T.getCanonicalType();
5854
5855 // Remove a type-constraint from a top-level auto or decltype(auto).
5856 if (auto *AT = CanonT->getAs<AutoType>()) {
5857 if (!AT->isConstrained())
5858 return T;
5859 return getQualifiedType(getAutoType(DeducedType: QualType(), Keyword: AT->getKeyword(), IsDependent: false,
5860 IsPack: AT->containsUnexpandedParameterPack()),
5861 T.getQualifiers());
5862 }
5863
5864 // FIXME: We only support constrained auto at the top level in the type of a
5865 // non-type template parameter at the moment. Once we lift that restriction,
5866 // we'll need to recursively build types containing auto here.
5867 assert(!CanonT->getContainedAutoType() ||
5868 !CanonT->getContainedAutoType()->isConstrained());
5869 return T;
5870}
5871
5872/// Return the uniqued reference to the deduced template specialization type
5873/// which has been deduced to the given type, or to the canonical undeduced
5874/// such type, or the canonical deduced-but-dependent such type.
5875QualType ASTContext::getDeducedTemplateSpecializationType(
5876 TemplateName Template, QualType DeducedType, bool IsDependent) const {
5877 // Look in the folding set for an existing type.
5878 void *InsertPos = nullptr;
5879 llvm::FoldingSetNodeID ID;
5880 DeducedTemplateSpecializationType::Profile(ID, Template, Deduced: DeducedType,
5881 IsDependent);
5882 if (DeducedTemplateSpecializationType *DTST =
5883 DeducedTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos))
5884 return QualType(DTST, 0);
5885
5886 auto *DTST = new (*this, alignof(DeducedTemplateSpecializationType))
5887 DeducedTemplateSpecializationType(Template, DeducedType, IsDependent);
5888 llvm::FoldingSetNodeID TempID;
5889 DTST->Profile(ID&: TempID);
5890 assert(ID == TempID && "ID does not match");
5891 Types.push_back(DTST);
5892 DeducedTemplateSpecializationTypes.InsertNode(N: DTST, InsertPos);
5893 return QualType(DTST, 0);
5894}
5895
5896/// getAtomicType - Return the uniqued reference to the atomic type for
5897/// the given value type.
5898QualType ASTContext::getAtomicType(QualType T) const {
5899 // Unique pointers, to guarantee there is only one pointer of a particular
5900 // structure.
5901 llvm::FoldingSetNodeID ID;
5902 AtomicType::Profile(ID, T);
5903
5904 void *InsertPos = nullptr;
5905 if (AtomicType *AT = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos))
5906 return QualType(AT, 0);
5907
5908 // If the atomic value type isn't canonical, this won't be a canonical type
5909 // either, so fill in the canonical type field.
5910 QualType Canonical;
5911 if (!T.isCanonical()) {
5912 Canonical = getAtomicType(T: getCanonicalType(T));
5913
5914 // Get the new insert position for the node we care about.
5915 AtomicType *NewIP = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos);
5916 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP;
5917 }
5918 auto *New = new (*this, alignof(AtomicType)) AtomicType(T, Canonical);
5919 Types.push_back(New);
5920 AtomicTypes.InsertNode(N: New, InsertPos);
5921 return QualType(New, 0);
5922}
5923
5924/// getAutoDeductType - Get type pattern for deducing against 'auto'.
5925QualType ASTContext::getAutoDeductType() const {
5926 if (AutoDeductTy.isNull())
5927 AutoDeductTy = QualType(new (*this, alignof(AutoType))
5928 AutoType(QualType(), AutoTypeKeyword::Auto,
5929 TypeDependence::None, QualType(),
5930 /*concept*/ nullptr, /*args*/ {}),
5931 0);
5932 return AutoDeductTy;
5933}
5934
5935/// getAutoRRefDeductType - Get type pattern for deducing against 'auto &&'.
5936QualType ASTContext::getAutoRRefDeductType() const {
5937 if (AutoRRefDeductTy.isNull())
5938 AutoRRefDeductTy = getRValueReferenceType(getAutoDeductType());
5939 assert(!AutoRRefDeductTy.isNull() && "can't build 'auto &&' pattern");
5940 return AutoRRefDeductTy;
5941}
5942
5943/// getTagDeclType - Return the unique reference to the type for the
5944/// specified TagDecl (struct/union/class/enum) decl.
5945QualType ASTContext::getTagDeclType(const TagDecl *Decl) const {
5946 assert(Decl);
5947 // FIXME: What is the design on getTagDeclType when it requires casting
5948 // away const? mutable?
5949 return getTypeDeclType(const_cast<TagDecl*>(Decl));
5950}
5951
5952/// getSizeType - Return the unique type for "size_t" (C99 7.17), the result
5953/// of the sizeof operator (C99 6.5.3.4p4). The value is target dependent and
5954/// needs to agree with the definition in <stddef.h>.
5955CanQualType ASTContext::getSizeType() const {
5956 return getFromTargetType(Type: Target->getSizeType());
5957}
5958
5959/// Return the unique signed counterpart of the integer type
5960/// corresponding to size_t.
5961CanQualType ASTContext::getSignedSizeType() const {
5962 return getFromTargetType(Type: Target->getSignedSizeType());
5963}
5964
5965/// getIntMaxType - Return the unique type for "intmax_t" (C99 7.18.1.5).
5966CanQualType ASTContext::getIntMaxType() const {
5967 return getFromTargetType(Type: Target->getIntMaxType());
5968}
5969
5970/// getUIntMaxType - Return the unique type for "uintmax_t" (C99 7.18.1.5).
5971CanQualType ASTContext::getUIntMaxType() const {
5972 return getFromTargetType(Type: Target->getUIntMaxType());
5973}
5974
5975/// getSignedWCharType - Return the type of "signed wchar_t".
5976/// Used when in C++, as a GCC extension.
5977QualType ASTContext::getSignedWCharType() const {
5978 // FIXME: derive from "Target" ?
5979 return WCharTy;
5980}
5981
5982/// getUnsignedWCharType - Return the type of "unsigned wchar_t".
5983/// Used when in C++, as a GCC extension.
5984QualType ASTContext::getUnsignedWCharType() const {
5985 // FIXME: derive from "Target" ?
5986 return UnsignedIntTy;
5987}
5988
5989QualType ASTContext::getIntPtrType() const {
5990 return getFromTargetType(Type: Target->getIntPtrType());
5991}
5992
5993QualType ASTContext::getUIntPtrType() const {
5994 return getCorrespondingUnsignedType(T: getIntPtrType());
5995}
5996
5997/// getPointerDiffType - Return the unique type for "ptrdiff_t" (C99 7.17)
5998/// defined in <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9).
5999QualType ASTContext::getPointerDiffType() const {
6000 return getFromTargetType(Type: Target->getPtrDiffType(AddrSpace: LangAS::Default));
6001}
6002
6003/// Return the unique unsigned counterpart of "ptrdiff_t"
6004/// integer type. The standard (C11 7.21.6.1p7) refers to this type
6005/// in the definition of %tu format specifier.
6006QualType ASTContext::getUnsignedPointerDiffType() const {
6007 return getFromTargetType(Type: Target->getUnsignedPtrDiffType(AddrSpace: LangAS::Default));
6008}
6009
6010/// Return the unique type for "pid_t" defined in
6011/// <sys/types.h>. We need this to compute the correct type for vfork().
6012QualType ASTContext::getProcessIDType() const {
6013 return getFromTargetType(Type: Target->getProcessIDType());
6014}
6015
6016//===----------------------------------------------------------------------===//
6017// Type Operators
6018//===----------------------------------------------------------------------===//
6019
6020CanQualType ASTContext::getCanonicalParamType(QualType T) const {
6021 // Push qualifiers into arrays, and then discard any remaining
6022 // qualifiers.
6023 T = getCanonicalType(T);
6024 T = getVariableArrayDecayedType(type: T);
6025 const Type *Ty = T.getTypePtr();
6026 QualType Result;
6027 if (isa<ArrayType>(Val: Ty)) {
6028 Result = getArrayDecayedType(T: QualType(Ty,0));
6029 } else if (isa<FunctionType>(Val: Ty)) {
6030 Result = getPointerType(T: QualType(Ty, 0));
6031 } else {
6032 Result = QualType(Ty, 0);
6033 }
6034
6035 return CanQualType::CreateUnsafe(Other: Result);
6036}
6037
6038QualType ASTContext::getUnqualifiedArrayType(QualType type,
6039 Qualifiers &quals) {
6040 SplitQualType splitType = type.getSplitUnqualifiedType();
6041
6042 // FIXME: getSplitUnqualifiedType() actually walks all the way to
6043 // the unqualified desugared type and then drops it on the floor.
6044 // We then have to strip that sugar back off with
6045 // getUnqualifiedDesugaredType(), which is silly.
6046 const auto *AT =
6047 dyn_cast<ArrayType>(Val: splitType.Ty->getUnqualifiedDesugaredType());
6048
6049 // If we don't have an array, just use the results in splitType.
6050 if (!AT) {
6051 quals = splitType.Quals;
6052 return QualType(splitType.Ty, 0);
6053 }
6054
6055 // Otherwise, recurse on the array's element type.
6056 QualType elementType = AT->getElementType();
6057 QualType unqualElementType = getUnqualifiedArrayType(type: elementType, quals);
6058
6059 // If that didn't change the element type, AT has no qualifiers, so we
6060 // can just use the results in splitType.
6061 if (elementType == unqualElementType) {
6062 assert(quals.empty()); // from the recursive call
6063 quals = splitType.Quals;
6064 return QualType(splitType.Ty, 0);
6065 }
6066
6067 // Otherwise, add in the qualifiers from the outermost type, then
6068 // build the type back up.
6069 quals.addConsistentQualifiers(qs: splitType.Quals);
6070
6071 if (const auto *CAT = dyn_cast<ConstantArrayType>(Val: AT)) {
6072 return getConstantArrayType(EltTy: unqualElementType, ArySizeIn: CAT->getSize(),
6073 SizeExpr: CAT->getSizeExpr(), ASM: CAT->getSizeModifier(), IndexTypeQuals: 0);
6074 }
6075
6076 if (const auto *IAT = dyn_cast<IncompleteArrayType>(Val: AT)) {
6077 return getIncompleteArrayType(elementType: unqualElementType, ASM: IAT->getSizeModifier(), elementTypeQuals: 0);
6078 }
6079
6080 if (const auto *VAT = dyn_cast<VariableArrayType>(Val: AT)) {
6081 return getVariableArrayType(EltTy: unqualElementType,
6082 NumElts: VAT->getSizeExpr(),
6083 ASM: VAT->getSizeModifier(),
6084 IndexTypeQuals: VAT->getIndexTypeCVRQualifiers(),
6085 Brackets: VAT->getBracketsRange());
6086 }
6087
6088 const auto *DSAT = cast<DependentSizedArrayType>(Val: AT);
6089 return getDependentSizedArrayType(elementType: unqualElementType, numElements: DSAT->getSizeExpr(),
6090 ASM: DSAT->getSizeModifier(), elementTypeQuals: 0,
6091 brackets: SourceRange());
6092}
6093
6094/// Attempt to unwrap two types that may both be array types with the same bound
6095/// (or both be array types of unknown bound) for the purpose of comparing the
6096/// cv-decomposition of two types per C++ [conv.qual].
6097///
6098/// \param AllowPiMismatch Allow the Pi1 and Pi2 to differ as described in
6099/// C++20 [conv.qual], if permitted by the current language mode.
6100void ASTContext::UnwrapSimilarArrayTypes(QualType &T1, QualType &T2,
6101 bool AllowPiMismatch) {
6102 while (true) {
6103 auto *AT1 = getAsArrayType(T: T1);
6104 if (!AT1)
6105 return;
6106
6107 auto *AT2 = getAsArrayType(T: T2);
6108 if (!AT2)
6109 return;
6110
6111 // If we don't have two array types with the same constant bound nor two
6112 // incomplete array types, we've unwrapped everything we can.
6113 // C++20 also permits one type to be a constant array type and the other
6114 // to be an incomplete array type.
6115 // FIXME: Consider also unwrapping array of unknown bound and VLA.
6116 if (auto *CAT1 = dyn_cast<ConstantArrayType>(Val: AT1)) {
6117 auto *CAT2 = dyn_cast<ConstantArrayType>(Val: AT2);
6118 if (!((CAT2 && CAT1->getSize() == CAT2->getSize()) ||
6119 (AllowPiMismatch && getLangOpts().CPlusPlus20 &&
6120 isa<IncompleteArrayType>(Val: AT2))))
6121 return;
6122 } else if (isa<IncompleteArrayType>(Val: AT1)) {
6123 if (!(isa<IncompleteArrayType>(Val: AT2) ||
6124 (AllowPiMismatch && getLangOpts().CPlusPlus20 &&
6125 isa<ConstantArrayType>(Val: AT2))))
6126 return;
6127 } else {
6128 return;
6129 }
6130
6131 T1 = AT1->getElementType();
6132 T2 = AT2->getElementType();
6133 }
6134}
6135
6136/// Attempt to unwrap two types that may be similar (C++ [conv.qual]).
6137///
6138/// If T1 and T2 are both pointer types of the same kind, or both array types
6139/// with the same bound, unwraps layers from T1 and T2 until a pointer type is
6140/// unwrapped. Top-level qualifiers on T1 and T2 are ignored.
6141///
6142/// This function will typically be called in a loop that successively
6143/// "unwraps" pointer and pointer-to-member types to compare them at each
6144/// level.
6145///
6146/// \param AllowPiMismatch Allow the Pi1 and Pi2 to differ as described in
6147/// C++20 [conv.qual], if permitted by the current language mode.
6148///
6149/// \return \c true if a pointer type was unwrapped, \c false if we reached a
6150/// pair of types that can't be unwrapped further.
6151bool ASTContext::UnwrapSimilarTypes(QualType &T1, QualType &T2,
6152 bool AllowPiMismatch) {
6153 UnwrapSimilarArrayTypes(T1, T2, AllowPiMismatch);
6154
6155 const auto *T1PtrType = T1->getAs<PointerType>();
6156 const auto *T2PtrType = T2->getAs<PointerType>();
6157 if (T1PtrType && T2PtrType) {
6158 T1 = T1PtrType->getPointeeType();
6159 T2 = T2PtrType->getPointeeType();
6160 return true;
6161 }
6162
6163 const auto *T1MPType = T1->getAs<MemberPointerType>();
6164 const auto *T2MPType = T2->getAs<MemberPointerType>();
6165 if (T1MPType && T2MPType &&
6166 hasSameUnqualifiedType(T1: QualType(T1MPType->getClass(), 0),
6167 T2: QualType(T2MPType->getClass(), 0))) {
6168 T1 = T1MPType->getPointeeType();
6169 T2 = T2MPType->getPointeeType();
6170 return true;
6171 }
6172
6173 if (getLangOpts().ObjC) {
6174 const auto *T1OPType = T1->getAs<ObjCObjectPointerType>();
6175 const auto *T2OPType = T2->getAs<ObjCObjectPointerType>();
6176 if (T1OPType && T2OPType) {
6177 T1 = T1OPType->getPointeeType();
6178 T2 = T2OPType->getPointeeType();
6179 return true;
6180 }
6181 }
6182
6183 // FIXME: Block pointers, too?
6184
6185 return false;
6186}
6187
6188bool ASTContext::hasSimilarType(QualType T1, QualType T2) {
6189 while (true) {
6190 Qualifiers Quals;
6191 T1 = getUnqualifiedArrayType(type: T1, quals&: Quals);
6192 T2 = getUnqualifiedArrayType(type: T2, quals&: Quals);
6193 if (hasSameType(T1, T2))
6194 return true;
6195 if (!UnwrapSimilarTypes(T1, T2))
6196 return false;
6197 }
6198}
6199
6200bool ASTContext::hasCvrSimilarType(QualType T1, QualType T2) {
6201 while (true) {
6202 Qualifiers Quals1, Quals2;
6203 T1 = getUnqualifiedArrayType(type: T1, quals&: Quals1);
6204 T2 = getUnqualifiedArrayType(type: T2, quals&: Quals2);
6205
6206 Quals1.removeCVRQualifiers();
6207 Quals2.removeCVRQualifiers();
6208 if (Quals1 != Quals2)
6209 return false;
6210
6211 if (hasSameType(T1, T2))
6212 return true;
6213
6214 if (!UnwrapSimilarTypes(T1, T2, /*AllowPiMismatch*/ false))
6215 return false;
6216 }
6217}
6218
6219DeclarationNameInfo
6220ASTContext::getNameForTemplate(TemplateName Name,
6221 SourceLocation NameLoc) const {
6222 switch (Name.getKind()) {
6223 case TemplateName::QualifiedTemplate:
6224 case TemplateName::Template:
6225 // DNInfo work in progress: CHECKME: what about DNLoc?
6226 return DeclarationNameInfo(Name.getAsTemplateDecl()->getDeclName(),
6227 NameLoc);
6228
6229 case TemplateName::OverloadedTemplate: {
6230 OverloadedTemplateStorage *Storage = Name.getAsOverloadedTemplate();
6231 // DNInfo work in progress: CHECKME: what about DNLoc?
6232 return DeclarationNameInfo((*Storage->begin())->getDeclName(), NameLoc);
6233 }
6234
6235 case TemplateName::AssumedTemplate: {
6236 AssumedTemplateStorage *Storage = Name.getAsAssumedTemplateName();
6237 return DeclarationNameInfo(Storage->getDeclName(), NameLoc);
6238 }
6239
6240 case TemplateName::DependentTemplate: {
6241 DependentTemplateName *DTN = Name.getAsDependentTemplateName();
6242 DeclarationName DName;
6243 if (DTN->isIdentifier()) {
6244 DName = DeclarationNames.getIdentifier(ID: DTN->getIdentifier());
6245 return DeclarationNameInfo(DName, NameLoc);
6246 } else {
6247 DName = DeclarationNames.getCXXOperatorName(Op: DTN->getOperator());
6248 // DNInfo work in progress: FIXME: source locations?
6249 DeclarationNameLoc DNLoc =
6250 DeclarationNameLoc::makeCXXOperatorNameLoc(Range: SourceRange());
6251 return DeclarationNameInfo(DName, NameLoc, DNLoc);
6252 }
6253 }
6254
6255 case TemplateName::SubstTemplateTemplateParm: {
6256 SubstTemplateTemplateParmStorage *subst
6257 = Name.getAsSubstTemplateTemplateParm();
6258 return DeclarationNameInfo(subst->getParameter()->getDeclName(),
6259 NameLoc);
6260 }
6261
6262 case TemplateName::SubstTemplateTemplateParmPack: {
6263 SubstTemplateTemplateParmPackStorage *subst
6264 = Name.getAsSubstTemplateTemplateParmPack();
6265 return DeclarationNameInfo(subst->getParameterPack()->getDeclName(),
6266 NameLoc);
6267 }
6268 case TemplateName::UsingTemplate:
6269 return DeclarationNameInfo(Name.getAsUsingShadowDecl()->getDeclName(),
6270 NameLoc);
6271 }
6272
6273 llvm_unreachable("bad template name kind!");
6274}
6275
6276TemplateName
6277ASTContext::getCanonicalTemplateName(const TemplateName &Name) const {
6278 switch (Name.getKind()) {
6279 case TemplateName::UsingTemplate:
6280 case TemplateName::QualifiedTemplate:
6281 case TemplateName::Template: {
6282 TemplateDecl *Template = Name.getAsTemplateDecl();
6283 if (auto *TTP = dyn_cast<TemplateTemplateParmDecl>(Val: Template))
6284 Template = getCanonicalTemplateTemplateParmDecl(TTP);
6285
6286 // The canonical template name is the canonical template declaration.
6287 return TemplateName(cast<TemplateDecl>(Template->getCanonicalDecl()));
6288 }
6289
6290 case TemplateName::OverloadedTemplate:
6291 case TemplateName::AssumedTemplate:
6292 llvm_unreachable("cannot canonicalize unresolved template");
6293
6294 case TemplateName::DependentTemplate: {
6295 DependentTemplateName *DTN = Name.getAsDependentTemplateName();
6296 assert(DTN && "Non-dependent template names must refer to template decls.");
6297 return DTN->CanonicalTemplateName;
6298 }
6299
6300 case TemplateName::SubstTemplateTemplateParm: {
6301 SubstTemplateTemplateParmStorage *subst
6302 = Name.getAsSubstTemplateTemplateParm();
6303 return getCanonicalTemplateName(Name: subst->getReplacement());
6304 }
6305
6306 case TemplateName::SubstTemplateTemplateParmPack: {
6307 SubstTemplateTemplateParmPackStorage *subst =
6308 Name.getAsSubstTemplateTemplateParmPack();
6309 TemplateArgument canonArgPack =
6310 getCanonicalTemplateArgument(Arg: subst->getArgumentPack());
6311 return getSubstTemplateTemplateParmPack(
6312 ArgPack: canonArgPack, AssociatedDecl: subst->getAssociatedDecl()->getCanonicalDecl(),
6313 Index: subst->getFinal(), Final: subst->getIndex());
6314 }
6315 }
6316
6317 llvm_unreachable("bad template name!");
6318}
6319
6320bool ASTContext::hasSameTemplateName(const TemplateName &X,
6321 const TemplateName &Y) const {
6322 return getCanonicalTemplateName(Name: X).getAsVoidPointer() ==
6323 getCanonicalTemplateName(Name: Y).getAsVoidPointer();
6324}
6325
6326bool ASTContext::isSameConstraintExpr(const Expr *XCE, const Expr *YCE) const {
6327 if (!XCE != !YCE)
6328 return false;
6329
6330 if (!XCE)
6331 return true;
6332
6333 llvm::FoldingSetNodeID XCEID, YCEID;
6334 XCE->Profile(XCEID, *this, /*Canonical=*/true, /*ProfileLambdaExpr=*/true);
6335 YCE->Profile(YCEID, *this, /*Canonical=*/true, /*ProfileLambdaExpr=*/true);
6336 return XCEID == YCEID;
6337}
6338
6339bool ASTContext::isSameTypeConstraint(const TypeConstraint *XTC,
6340 const TypeConstraint *YTC) const {
6341 if (!XTC != !YTC)
6342 return false;
6343
6344 if (!XTC)
6345 return true;
6346
6347 auto *NCX = XTC->getNamedConcept();
6348 auto *NCY = YTC->getNamedConcept();
6349 if (!NCX || !NCY || !isSameEntity(NCX, NCY))
6350 return false;
6351 if (XTC->getConceptReference()->hasExplicitTemplateArgs() !=
6352 YTC->getConceptReference()->hasExplicitTemplateArgs())
6353 return false;
6354 if (XTC->getConceptReference()->hasExplicitTemplateArgs())
6355 if (XTC->getConceptReference()
6356 ->getTemplateArgsAsWritten()
6357 ->NumTemplateArgs !=
6358 YTC->getConceptReference()->getTemplateArgsAsWritten()->NumTemplateArgs)
6359 return false;
6360
6361 // Compare slowly by profiling.
6362 //
6363 // We couldn't compare the profiling result for the template
6364 // args here. Consider the following example in different modules:
6365 //
6366 // template <__integer_like _Tp, C<_Tp> Sentinel>
6367 // constexpr _Tp operator()(_Tp &&__t, Sentinel &&last) const {
6368 // return __t;
6369 // }
6370 //
6371 // When we compare the profiling result for `C<_Tp>` in different
6372 // modules, it will compare the type of `_Tp` in different modules.
6373 // However, the type of `_Tp` in different modules refer to different
6374 // types here naturally. So we couldn't compare the profiling result
6375 // for the template args directly.
6376 return isSameConstraintExpr(XCE: XTC->getImmediatelyDeclaredConstraint(),
6377 YCE: YTC->getImmediatelyDeclaredConstraint());
6378}
6379
6380bool ASTContext::isSameTemplateParameter(const NamedDecl *X,
6381 const NamedDecl *Y) const {
6382 if (X->getKind() != Y->getKind())
6383 return false;
6384
6385 if (auto *TX = dyn_cast<TemplateTypeParmDecl>(Val: X)) {
6386 auto *TY = cast<TemplateTypeParmDecl>(Val: Y);
6387 if (TX->isParameterPack() != TY->isParameterPack())
6388 return false;
6389 if (TX->hasTypeConstraint() != TY->hasTypeConstraint())
6390 return false;
6391 return isSameTypeConstraint(XTC: TX->getTypeConstraint(),
6392 YTC: TY->getTypeConstraint());
6393 }
6394
6395 if (auto *TX = dyn_cast<NonTypeTemplateParmDecl>(Val: X)) {
6396 auto *TY = cast<NonTypeTemplateParmDecl>(Val: Y);
6397 return TX->isParameterPack() == TY->isParameterPack() &&
6398 TX->getASTContext().hasSameType(TX->getType(), TY->getType()) &&
6399 isSameConstraintExpr(XCE: TX->getPlaceholderTypeConstraint(),
6400 YCE: TY->getPlaceholderTypeConstraint());
6401 }
6402
6403 auto *TX = cast<TemplateTemplateParmDecl>(Val: X);
6404 auto *TY = cast<TemplateTemplateParmDecl>(Val: Y);
6405 return TX->isParameterPack() == TY->isParameterPack() &&
6406 isSameTemplateParameterList(X: TX->getTemplateParameters(),
6407 Y: TY->getTemplateParameters());
6408}
6409
6410bool ASTContext::isSameTemplateParameterList(
6411 const TemplateParameterList *X, const TemplateParameterList *Y) const {
6412 if (X->size() != Y->size())
6413 return false;
6414
6415 for (unsigned I = 0, N = X->size(); I != N; ++I)
6416 if (!isSameTemplateParameter(X: X->getParam(Idx: I), Y: Y->getParam(Idx: I)))
6417 return false;
6418
6419 return isSameConstraintExpr(XCE: X->getRequiresClause(), YCE: Y->getRequiresClause());
6420}
6421
6422bool ASTContext::isSameDefaultTemplateArgument(const NamedDecl *X,
6423 const NamedDecl *Y) const {
6424 // If the type parameter isn't the same already, we don't need to check the
6425 // default argument further.
6426 if (!isSameTemplateParameter(X, Y))
6427 return false;
6428
6429 if (auto *TTPX = dyn_cast<TemplateTypeParmDecl>(Val: X)) {
6430 auto *TTPY = cast<TemplateTypeParmDecl>(Val: Y);
6431 if (!TTPX->hasDefaultArgument() || !TTPY->hasDefaultArgument())
6432 return false;
6433
6434 return hasSameType(T1: TTPX->getDefaultArgument(), T2: TTPY->getDefaultArgument());
6435 }
6436
6437 if (auto *NTTPX = dyn_cast<NonTypeTemplateParmDecl>(Val: X)) {
6438 auto *NTTPY = cast<NonTypeTemplateParmDecl>(Val: Y);
6439 if (!NTTPX->hasDefaultArgument() || !NTTPY->hasDefaultArgument())
6440 return false;
6441
6442 Expr *DefaultArgumentX = NTTPX->getDefaultArgument()->IgnoreImpCasts();
6443 Expr *DefaultArgumentY = NTTPY->getDefaultArgument()->IgnoreImpCasts();
6444 llvm::FoldingSetNodeID XID, YID;
6445 DefaultArgumentX->Profile(XID, *this, /*Canonical=*/true);
6446 DefaultArgumentY->Profile(YID, *this, /*Canonical=*/true);
6447 return XID == YID;
6448 }
6449
6450 auto *TTPX = cast<TemplateTemplateParmDecl>(Val: X);
6451 auto *TTPY = cast<TemplateTemplateParmDecl>(Val: Y);
6452
6453 if (!TTPX->hasDefaultArgument() || !TTPY->hasDefaultArgument())
6454 return false;
6455
6456 const TemplateArgument &TAX = TTPX->getDefaultArgument().getArgument();
6457 const TemplateArgument &TAY = TTPY->getDefaultArgument().getArgument();
6458 return hasSameTemplateName(X: TAX.getAsTemplate(), Y: TAY.getAsTemplate());
6459}
6460
6461static NamespaceDecl *getNamespace(const NestedNameSpecifier *X) {
6462 if (auto *NS = X->getAsNamespace())
6463 return NS;
6464 if (auto *NAS = X->getAsNamespaceAlias())
6465 return NAS->getNamespace();
6466 return nullptr;
6467}
6468
6469static bool isSameQualifier(const NestedNameSpecifier *X,
6470 const NestedNameSpecifier *Y) {
6471 if (auto *NSX = getNamespace(X)) {
6472 auto *NSY = getNamespace(X: Y);
6473 if (!NSY || NSX->getCanonicalDecl() != NSY->getCanonicalDecl())
6474 return false;
6475 } else if (X->getKind() != Y->getKind())
6476 return false;
6477
6478 // FIXME: For namespaces and types, we're permitted to check that the entity
6479 // is named via the same tokens. We should probably do so.
6480 switch (X->getKind()) {
6481 case NestedNameSpecifier::Identifier:
6482 if (X->getAsIdentifier() != Y->getAsIdentifier())
6483 return false;
6484 break;
6485 case NestedNameSpecifier::Namespace:
6486 case NestedNameSpecifier::NamespaceAlias:
6487 // We've already checked that we named the same namespace.
6488 break;
6489 case NestedNameSpecifier::TypeSpec:
6490 case NestedNameSpecifier::TypeSpecWithTemplate:
6491 if (X->getAsType()->getCanonicalTypeInternal() !=
6492 Y->getAsType()->getCanonicalTypeInternal())
6493 return false;
6494 break;
6495 case NestedNameSpecifier::Global:
6496 case NestedNameSpecifier::Super:
6497 return true;
6498 }
6499
6500 // Recurse into earlier portion of NNS, if any.
6501 auto *PX = X->getPrefix();
6502 auto *PY = Y->getPrefix();
6503 if (PX && PY)
6504 return isSameQualifier(X: PX, Y: PY);
6505 return !PX && !PY;
6506}
6507
6508/// Determine whether the attributes we can overload on are identical for A and
6509/// B. Will ignore any overloadable attrs represented in the type of A and B.
6510static bool hasSameOverloadableAttrs(const FunctionDecl *A,
6511 const FunctionDecl *B) {
6512 // Note that pass_object_size attributes are represented in the function's
6513 // ExtParameterInfo, so we don't need to check them here.
6514
6515 llvm::FoldingSetNodeID Cand1ID, Cand2ID;
6516 auto AEnableIfAttrs = A->specific_attrs<EnableIfAttr>();
6517 auto BEnableIfAttrs = B->specific_attrs<EnableIfAttr>();
6518
6519 for (auto Pair : zip_longest(AEnableIfAttrs, BEnableIfAttrs)) {
6520 std::optional<EnableIfAttr *> Cand1A = std::get<0>(Pair);
6521 std::optional<EnableIfAttr *> Cand2A = std::get<1>(Pair);
6522
6523 // Return false if the number of enable_if attributes is different.
6524 if (!Cand1A || !Cand2A)
6525 return false;
6526
6527 Cand1ID.clear();
6528 Cand2ID.clear();
6529
6530 (*Cand1A)->getCond()->Profile(Cand1ID, A->getASTContext(), true);
6531 (*Cand2A)->getCond()->Profile(Cand2ID, B->getASTContext(), true);
6532
6533 // Return false if any of the enable_if expressions of A and B are
6534 // different.
6535 if (Cand1ID != Cand2ID)
6536 return false;
6537 }
6538 return true;
6539}
6540
6541bool ASTContext::isSameEntity(const NamedDecl *X, const NamedDecl *Y) const {
6542 // Caution: this function is called by the AST reader during deserialization,
6543 // so it cannot rely on AST invariants being met. Non-trivial accessors
6544 // should be avoided, along with any traversal of redeclaration chains.
6545
6546 if (X == Y)
6547 return true;
6548
6549 if (X->getDeclName() != Y->getDeclName())
6550 return false;
6551
6552 // Must be in the same context.
6553 //
6554 // Note that we can't use DeclContext::Equals here, because the DeclContexts
6555 // could be two different declarations of the same function. (We will fix the
6556 // semantic DC to refer to the primary definition after merging.)
6557 if (!declaresSameEntity(cast<Decl>(X->getDeclContext()->getRedeclContext()),
6558 cast<Decl>(Y->getDeclContext()->getRedeclContext())))
6559 return false;
6560
6561 // Two typedefs refer to the same entity if they have the same underlying
6562 // type.
6563 if (const auto *TypedefX = dyn_cast<TypedefNameDecl>(Val: X))
6564 if (const auto *TypedefY = dyn_cast<TypedefNameDecl>(Val: Y))
6565 return hasSameType(T1: TypedefX->getUnderlyingType(),
6566 T2: TypedefY->getUnderlyingType());
6567
6568 // Must have the same kind.
6569 if (X->getKind() != Y->getKind())
6570 return false;
6571
6572 // Objective-C classes and protocols with the same name always match.
6573 if (isa<ObjCInterfaceDecl>(Val: X) || isa<ObjCProtocolDecl>(Val: X))
6574 return true;
6575
6576 if (isa<ClassTemplateSpecializationDecl>(Val: X)) {
6577 // No need to handle these here: we merge them when adding them to the
6578 // template.
6579 return false;
6580 }
6581
6582 // Compatible tags match.
6583 if (const auto *TagX = dyn_cast<TagDecl>(Val: X)) {
6584 const auto *TagY = cast<TagDecl>(Val: Y);
6585 return (TagX->getTagKind() == TagY->getTagKind()) ||
6586 ((TagX->getTagKind() == TagTypeKind::Struct ||
6587 TagX->getTagKind() == TagTypeKind::Class ||
6588 TagX->getTagKind() == TagTypeKind::Interface) &&
6589 (TagY->getTagKind() == TagTypeKind::Struct ||
6590 TagY->getTagKind() == TagTypeKind::Class ||
6591 TagY->getTagKind() == TagTypeKind::Interface));
6592 }
6593
6594 // Functions with the same type and linkage match.
6595 // FIXME: This needs to cope with merging of prototyped/non-prototyped
6596 // functions, etc.
6597 if (const auto *FuncX = dyn_cast<FunctionDecl>(Val: X)) {
6598 const auto *FuncY = cast<FunctionDecl>(Val: Y);
6599 if (const auto *CtorX = dyn_cast<CXXConstructorDecl>(Val: X)) {
6600 const auto *CtorY = cast<CXXConstructorDecl>(Val: Y);
6601 if (CtorX->getInheritedConstructor() &&
6602 !isSameEntity(CtorX->getInheritedConstructor().getConstructor(),
6603 CtorY->getInheritedConstructor().getConstructor()))
6604 return false;
6605 }
6606
6607 if (FuncX->isMultiVersion() != FuncY->isMultiVersion())
6608 return false;
6609
6610 // Multiversioned functions with different feature strings are represented
6611 // as separate declarations.
6612 if (FuncX->isMultiVersion()) {
6613 const auto *TAX = FuncX->getAttr<TargetAttr>();
6614 const auto *TAY = FuncY->getAttr<TargetAttr>();
6615 assert(TAX && TAY && "Multiversion Function without target attribute");
6616
6617 if (TAX->getFeaturesStr() != TAY->getFeaturesStr())
6618 return false;
6619 }
6620
6621 // Per C++20 [temp.over.link]/4, friends in different classes are sometimes
6622 // not the same entity if they are constrained.
6623 if ((FuncX->isMemberLikeConstrainedFriend() ||
6624 FuncY->isMemberLikeConstrainedFriend()) &&
6625 !FuncX->getLexicalDeclContext()->Equals(
6626 FuncY->getLexicalDeclContext())) {
6627 return false;
6628 }
6629
6630 if (!isSameConstraintExpr(XCE: FuncX->getTrailingRequiresClause(),
6631 YCE: FuncY->getTrailingRequiresClause()))
6632 return false;
6633
6634 auto GetTypeAsWritten = [](const FunctionDecl *FD) {
6635 // Map to the first declaration that we've already merged into this one.
6636 // The TSI of redeclarations might not match (due to calling conventions
6637 // being inherited onto the type but not the TSI), but the TSI type of
6638 // the first declaration of the function should match across modules.
6639 FD = FD->getCanonicalDecl();
6640 return FD->getTypeSourceInfo() ? FD->getTypeSourceInfo()->getType()
6641 : FD->getType();
6642 };
6643 QualType XT = GetTypeAsWritten(FuncX), YT = GetTypeAsWritten(FuncY);
6644 if (!hasSameType(T1: XT, T2: YT)) {
6645 // We can get functions with different types on the redecl chain in C++17
6646 // if they have differing exception specifications and at least one of
6647 // the excpetion specs is unresolved.
6648 auto *XFPT = XT->getAs<FunctionProtoType>();
6649 auto *YFPT = YT->getAs<FunctionProtoType>();
6650 if (getLangOpts().CPlusPlus17 && XFPT && YFPT &&
6651 (isUnresolvedExceptionSpec(XFPT->getExceptionSpecType()) ||
6652 isUnresolvedExceptionSpec(YFPT->getExceptionSpecType())) &&
6653 hasSameFunctionTypeIgnoringExceptionSpec(T: XT, U: YT))
6654 return true;
6655 return false;
6656 }
6657
6658 return FuncX->getLinkageInternal() == FuncY->getLinkageInternal() &&
6659 hasSameOverloadableAttrs(A: FuncX, B: FuncY);
6660 }
6661
6662 // Variables with the same type and linkage match.
6663 if (const auto *VarX = dyn_cast<VarDecl>(Val: X)) {
6664 const auto *VarY = cast<VarDecl>(Val: Y);
6665 if (VarX->getLinkageInternal() == VarY->getLinkageInternal()) {
6666 // During deserialization, we might compare variables before we load
6667 // their types. Assume the types will end up being the same.
6668 if (VarX->getType().isNull() || VarY->getType().isNull())
6669 return true;
6670
6671 if (hasSameType(VarX->getType(), VarY->getType()))
6672 return true;
6673
6674 // We can get decls with different types on the redecl chain. Eg.
6675 // template <typename T> struct S { static T Var[]; }; // #1
6676 // template <typename T> T S<T>::Var[sizeof(T)]; // #2
6677 // Only? happens when completing an incomplete array type. In this case
6678 // when comparing #1 and #2 we should go through their element type.
6679 const ArrayType *VarXTy = getAsArrayType(T: VarX->getType());
6680 const ArrayType *VarYTy = getAsArrayType(T: VarY->getType());
6681 if (!VarXTy || !VarYTy)
6682 return false;
6683 if (VarXTy->isIncompleteArrayType() || VarYTy->isIncompleteArrayType())
6684 return hasSameType(T1: VarXTy->getElementType(), T2: VarYTy->getElementType());
6685 }
6686 return false;
6687 }
6688
6689 // Namespaces with the same name and inlinedness match.
6690 if (const auto *NamespaceX = dyn_cast<NamespaceDecl>(Val: X)) {
6691 const auto *NamespaceY = cast<NamespaceDecl>(Val: Y);
6692 return NamespaceX->isInline() == NamespaceY->isInline();
6693 }
6694
6695 // Identical template names and kinds match if their template parameter lists
6696 // and patterns match.
6697 if (const auto *TemplateX = dyn_cast<TemplateDecl>(Val: X)) {
6698 const auto *TemplateY = cast<TemplateDecl>(Val: Y);
6699
6700 // ConceptDecl wouldn't be the same if their constraint expression differs.
6701 if (const auto *ConceptX = dyn_cast<ConceptDecl>(Val: X)) {
6702 const auto *ConceptY = cast<ConceptDecl>(Val: Y);
6703 if (!isSameConstraintExpr(XCE: ConceptX->getConstraintExpr(),
6704 YCE: ConceptY->getConstraintExpr()))
6705 return false;
6706 }
6707
6708 return isSameEntity(X: TemplateX->getTemplatedDecl(),
6709 Y: TemplateY->getTemplatedDecl()) &&
6710 isSameTemplateParameterList(X: TemplateX->getTemplateParameters(),
6711 Y: TemplateY->getTemplateParameters());
6712 }
6713
6714 // Fields with the same name and the same type match.
6715 if (const auto *FDX = dyn_cast<FieldDecl>(Val: X)) {
6716 const auto *FDY = cast<FieldDecl>(Val: Y);
6717 // FIXME: Also check the bitwidth is odr-equivalent, if any.
6718 return hasSameType(FDX->getType(), FDY->getType());
6719 }
6720
6721 // Indirect fields with the same target field match.
6722 if (const auto *IFDX = dyn_cast<IndirectFieldDecl>(Val: X)) {
6723 const auto *IFDY = cast<IndirectFieldDecl>(Val: Y);
6724 return IFDX->getAnonField()->getCanonicalDecl() ==
6725 IFDY->getAnonField()->getCanonicalDecl();
6726 }
6727
6728 // Enumerators with the same name match.
6729 if (isa<EnumConstantDecl>(Val: X))
6730 // FIXME: Also check the value is odr-equivalent.
6731 return true;
6732
6733 // Using shadow declarations with the same target match.
6734 if (const auto *USX = dyn_cast<UsingShadowDecl>(Val: X)) {
6735 const auto *USY = cast<UsingShadowDecl>(Val: Y);
6736 return USX->getTargetDecl() == USY->getTargetDecl();
6737 }
6738
6739 // Using declarations with the same qualifier match. (We already know that
6740 // the name matches.)
6741 if (const auto *UX = dyn_cast<UsingDecl>(Val: X)) {
6742 const auto *UY = cast<UsingDecl>(Val: Y);
6743 return isSameQualifier(X: UX->getQualifier(), Y: UY->getQualifier()) &&
6744 UX->hasTypename() == UY->hasTypename() &&
6745 UX->isAccessDeclaration() == UY->isAccessDeclaration();
6746 }
6747 if (const auto *UX = dyn_cast<UnresolvedUsingValueDecl>(Val: X)) {
6748 const auto *UY = cast<UnresolvedUsingValueDecl>(Val: Y);
6749 return isSameQualifier(X: UX->getQualifier(), Y: UY->getQualifier()) &&
6750 UX->isAccessDeclaration() == UY->isAccessDeclaration();
6751 }
6752 if (const auto *UX = dyn_cast<UnresolvedUsingTypenameDecl>(Val: X)) {
6753 return isSameQualifier(
6754 X: UX->getQualifier(),
6755 Y: cast<UnresolvedUsingTypenameDecl>(Val: Y)->getQualifier());
6756 }
6757
6758 // Using-pack declarations are only created by instantiation, and match if
6759 // they're instantiated from matching UnresolvedUsing...Decls.
6760 if (const auto *UX = dyn_cast<UsingPackDecl>(Val: X)) {
6761 return declaresSameEntity(
6762 UX->getInstantiatedFromUsingDecl(),
6763 cast<UsingPackDecl>(Val: Y)->getInstantiatedFromUsingDecl());
6764 }
6765
6766 // Namespace alias definitions with the same target match.
6767 if (const auto *NAX = dyn_cast<NamespaceAliasDecl>(Val: X)) {
6768 const auto *NAY = cast<NamespaceAliasDecl>(Val: Y);
6769 return NAX->getNamespace()->Equals(NAY->getNamespace());
6770 }
6771
6772 return false;
6773}
6774
6775TemplateArgument
6776ASTContext::getCanonicalTemplateArgument(const TemplateArgument &Arg) const {
6777 switch (Arg.getKind()) {
6778 case TemplateArgument::Null:
6779 return Arg;
6780
6781 case TemplateArgument::Expression:
6782 return Arg;
6783
6784 case TemplateArgument::Declaration: {
6785 auto *D = cast<ValueDecl>(Arg.getAsDecl()->getCanonicalDecl());
6786 return TemplateArgument(D, getCanonicalType(T: Arg.getParamTypeForDecl()),
6787 Arg.getIsDefaulted());
6788 }
6789
6790 case TemplateArgument::NullPtr:
6791 return TemplateArgument(getCanonicalType(T: Arg.getNullPtrType()),
6792 /*isNullPtr*/ true, Arg.getIsDefaulted());
6793
6794 case TemplateArgument::Template:
6795 return TemplateArgument(getCanonicalTemplateName(Name: Arg.getAsTemplate()),
6796 Arg.getIsDefaulted());
6797
6798 case TemplateArgument::TemplateExpansion:
6799 return TemplateArgument(
6800 getCanonicalTemplateName(Name: Arg.getAsTemplateOrTemplatePattern()),
6801 Arg.getNumTemplateExpansions(), Arg.getIsDefaulted());
6802
6803 case TemplateArgument::Integral:
6804 return TemplateArgument(Arg, getCanonicalType(T: Arg.getIntegralType()));
6805
6806 case TemplateArgument::StructuralValue:
6807 return TemplateArgument(*this,
6808 getCanonicalType(T: Arg.getStructuralValueType()),
6809 Arg.getAsStructuralValue());
6810
6811 case TemplateArgument::Type:
6812 return TemplateArgument(getCanonicalType(T: Arg.getAsType()),
6813 /*isNullPtr*/ false, Arg.getIsDefaulted());
6814
6815 case TemplateArgument::Pack: {
6816 bool AnyNonCanonArgs = false;
6817 auto CanonArgs = ::getCanonicalTemplateArguments(
6818 C: *this, Args: Arg.pack_elements(), AnyNonCanonArgs);
6819 if (!AnyNonCanonArgs)
6820 return Arg;
6821 return TemplateArgument::CreatePackCopy(Context&: const_cast<ASTContext &>(*this),
6822 Args: CanonArgs);
6823 }
6824 }
6825
6826 // Silence GCC warning
6827 llvm_unreachable("Unhandled template argument kind");
6828}
6829
6830NestedNameSpecifier *
6831ASTContext::getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) const {
6832 if (!NNS)
6833 return nullptr;
6834
6835 switch (NNS->getKind()) {
6836 case NestedNameSpecifier::Identifier:
6837 // Canonicalize the prefix but keep the identifier the same.
6838 return NestedNameSpecifier::Create(Context: *this,
6839 Prefix: getCanonicalNestedNameSpecifier(NNS: NNS->getPrefix()),
6840 II: NNS->getAsIdentifier());
6841
6842 case NestedNameSpecifier::Namespace:
6843 // A namespace is canonical; build a nested-name-specifier with
6844 // this namespace and no prefix.
6845 return NestedNameSpecifier::Create(Context: *this, Prefix: nullptr,
6846 NS: NNS->getAsNamespace()->getOriginalNamespace());
6847
6848 case NestedNameSpecifier::NamespaceAlias:
6849 // A namespace is canonical; build a nested-name-specifier with
6850 // this namespace and no prefix.
6851 return NestedNameSpecifier::Create(Context: *this, Prefix: nullptr,
6852 NS: NNS->getAsNamespaceAlias()->getNamespace()
6853 ->getOriginalNamespace());
6854
6855 // The difference between TypeSpec and TypeSpecWithTemplate is that the
6856 // latter will have the 'template' keyword when printed.
6857 case NestedNameSpecifier::TypeSpec:
6858 case NestedNameSpecifier::TypeSpecWithTemplate: {
6859 const Type *T = getCanonicalType(T: NNS->getAsType());
6860
6861 // If we have some kind of dependent-named type (e.g., "typename T::type"),
6862 // break it apart into its prefix and identifier, then reconsititute those
6863 // as the canonical nested-name-specifier. This is required to canonicalize
6864 // a dependent nested-name-specifier involving typedefs of dependent-name
6865 // types, e.g.,
6866 // typedef typename T::type T1;
6867 // typedef typename T1::type T2;
6868 if (const auto *DNT = T->getAs<DependentNameType>())
6869 return NestedNameSpecifier::Create(
6870 Context: *this, Prefix: DNT->getQualifier(),
6871 II: const_cast<IdentifierInfo *>(DNT->getIdentifier()));
6872 if (const auto *DTST = T->getAs<DependentTemplateSpecializationType>())
6873 return NestedNameSpecifier::Create(Context: *this, Prefix: DTST->getQualifier(), Template: true,
6874 T: const_cast<Type *>(T));
6875
6876 // TODO: Set 'Template' parameter to true for other template types.
6877 return NestedNameSpecifier::Create(Context: *this, Prefix: nullptr, Template: false,
6878 T: const_cast<Type *>(T));
6879 }
6880
6881 case NestedNameSpecifier::Global:
6882 case NestedNameSpecifier::Super:
6883 // The global specifier and __super specifer are canonical and unique.
6884 return NNS;
6885 }
6886
6887 llvm_unreachable("Invalid NestedNameSpecifier::Kind!");
6888}
6889
6890const ArrayType *ASTContext::getAsArrayType(QualType T) const {
6891 // Handle the non-qualified case efficiently.
6892 if (!T.hasLocalQualifiers()) {
6893 // Handle the common positive case fast.
6894 if (const auto *AT = dyn_cast<ArrayType>(Val&: T))
6895 return AT;
6896 }
6897
6898 // Handle the common negative case fast.
6899 if (!isa<ArrayType>(Val: T.getCanonicalType()))
6900 return nullptr;
6901
6902 // Apply any qualifiers from the array type to the element type. This
6903 // implements C99 6.7.3p8: "If the specification of an array type includes
6904 // any type qualifiers, the element type is so qualified, not the array type."
6905
6906 // If we get here, we either have type qualifiers on the type, or we have
6907 // sugar such as a typedef in the way. If we have type qualifiers on the type
6908 // we must propagate them down into the element type.
6909
6910 SplitQualType split = T.getSplitDesugaredType();
6911 Qualifiers qs = split.Quals;
6912
6913 // If we have a simple case, just return now.
6914 const auto *ATy = dyn_cast<ArrayType>(Val: split.Ty);
6915 if (!ATy || qs.empty())
6916 return ATy;
6917
6918 // Otherwise, we have an array and we have qualifiers on it. Push the
6919 // qualifiers into the array element type and return a new array type.
6920 QualType NewEltTy = getQualifiedType(T: ATy->getElementType(), Qs: qs);
6921
6922 if (const auto *CAT = dyn_cast<ConstantArrayType>(Val: ATy))
6923 return cast<ArrayType>(getConstantArrayType(EltTy: NewEltTy, ArySizeIn: CAT->getSize(),
6924 SizeExpr: CAT->getSizeExpr(),
6925 ASM: CAT->getSizeModifier(),
6926 IndexTypeQuals: CAT->getIndexTypeCVRQualifiers()));
6927 if (const auto *IAT = dyn_cast<IncompleteArrayType>(Val: ATy))
6928 return cast<ArrayType>(getIncompleteArrayType(elementType: NewEltTy,
6929 ASM: IAT->getSizeModifier(),
6930 elementTypeQuals: IAT->getIndexTypeCVRQualifiers()));
6931
6932 if (const auto *DSAT = dyn_cast<DependentSizedArrayType>(Val: ATy))
6933 return cast<ArrayType>(
6934 getDependentSizedArrayType(elementType: NewEltTy,
6935 numElements: DSAT->getSizeExpr(),
6936 ASM: DSAT->getSizeModifier(),
6937 elementTypeQuals: DSAT->getIndexTypeCVRQualifiers(),
6938 brackets: DSAT->getBracketsRange()));
6939
6940 const auto *VAT = cast<VariableArrayType>(Val: ATy);
6941 return cast<ArrayType>(getVariableArrayType(EltTy: NewEltTy,
6942 NumElts: VAT->getSizeExpr(),
6943 ASM: VAT->getSizeModifier(),
6944 IndexTypeQuals: VAT->getIndexTypeCVRQualifiers(),
6945 Brackets: VAT->getBracketsRange()));
6946}
6947
6948QualType ASTContext::getAdjustedParameterType(QualType T) const {
6949 if (T->isArrayType() || T->isFunctionType())
6950 return getDecayedType(T);
6951 return T;
6952}
6953
6954QualType ASTContext::getSignatureParameterType(QualType T) const {
6955 T = getVariableArrayDecayedType(type: T);
6956 T = getAdjustedParameterType(T);
6957 return T.getUnqualifiedType();
6958}
6959
6960QualType ASTContext::getExceptionObjectType(QualType T) const {
6961 // C++ [except.throw]p3:
6962 // A throw-expression initializes a temporary object, called the exception
6963 // object, the type of which is determined by removing any top-level
6964 // cv-qualifiers from the static type of the operand of throw and adjusting
6965 // the type from "array of T" or "function returning T" to "pointer to T"
6966 // or "pointer to function returning T", [...]
6967 T = getVariableArrayDecayedType(type: T);
6968 if (T->isArrayType() || T->isFunctionType())
6969 T = getDecayedType(T);
6970 return T.getUnqualifiedType();
6971}
6972
6973/// getArrayDecayedType - Return the properly qualified result of decaying the
6974/// specified array type to a pointer. This operation is non-trivial when
6975/// handling typedefs etc. The canonical type of "T" must be an array type,
6976/// this returns a pointer to a properly qualified element of the array.
6977///
6978/// See C99 6.7.5.3p7 and C99 6.3.2.1p3.
6979QualType ASTContext::getArrayDecayedType(QualType Ty) const {
6980 // Get the element type with 'getAsArrayType' so that we don't lose any
6981 // typedefs in the element type of the array. This also handles propagation
6982 // of type qualifiers from the array type into the element type if present
6983 // (C99 6.7.3p8).
6984 const ArrayType *PrettyArrayType = getAsArrayType(T: Ty);
6985 assert(PrettyArrayType && "Not an array type!");
6986
6987 QualType PtrTy = getPointerType(T: PrettyArrayType->getElementType());
6988
6989 // int x[restrict 4] -> int *restrict
6990 QualType Result = getQualifiedType(T: PtrTy,
6991 Qs: PrettyArrayType->getIndexTypeQualifiers());
6992
6993 // int x[_Nullable] -> int * _Nullable
6994 if (auto Nullability = Ty->getNullability()) {
6995 Result = const_cast<ASTContext *>(this)->getAttributedType(
6996 attrKind: AttributedType::getNullabilityAttrKind(kind: *Nullability), modifiedType: Result, equivalentType: Result);
6997 }
6998 return Result;
6999}
7000
7001QualType ASTContext::getBaseElementType(const ArrayType *array) const {
7002 return getBaseElementType(QT: array->getElementType());
7003}
7004
7005QualType ASTContext::getBaseElementType(QualType type) const {
7006 Qualifiers qs;
7007 while (true) {
7008 SplitQualType split = type.getSplitDesugaredType();
7009 const ArrayType *array = split.Ty->getAsArrayTypeUnsafe();
7010 if (!array) break;
7011
7012 type = array->getElementType();
7013 qs.addConsistentQualifiers(qs: split.Quals);
7014 }
7015
7016 return getQualifiedType(T: type, Qs: qs);
7017}
7018
7019/// getConstantArrayElementCount - Returns number of constant array elements.
7020uint64_t
7021ASTContext::getConstantArrayElementCount(const ConstantArrayType *CA) const {
7022 uint64_t ElementCount = 1;
7023 do {
7024 ElementCount *= CA->getSize().getZExtValue();
7025 CA = dyn_cast_or_null<ConstantArrayType>(
7026 CA->getElementType()->getAsArrayTypeUnsafe());
7027 } while (CA);
7028 return ElementCount;
7029}
7030
7031uint64_t ASTContext::getArrayInitLoopExprElementCount(
7032 const ArrayInitLoopExpr *AILE) const {
7033 if (!AILE)
7034 return 0;
7035
7036 uint64_t ElementCount = 1;
7037
7038 do {
7039 ElementCount *= AILE->getArraySize().getZExtValue();
7040 AILE = dyn_cast<ArrayInitLoopExpr>(Val: AILE->getSubExpr());
7041 } while (AILE);
7042
7043 return ElementCount;
7044}
7045
7046/// getFloatingRank - Return a relative rank for floating point types.
7047/// This routine will assert if passed a built-in type that isn't a float.
7048static FloatingRank getFloatingRank(QualType T) {
7049 if (const auto *CT = T->getAs<ComplexType>())
7050 return getFloatingRank(T: CT->getElementType());
7051
7052 switch (T->castAs<BuiltinType>()->getKind()) {
7053 default: llvm_unreachable("getFloatingRank(): not a floating type");
7054 case BuiltinType::Float16: return Float16Rank;
7055 case BuiltinType::Half: return HalfRank;
7056 case BuiltinType::Float: return FloatRank;
7057 case BuiltinType::Double: return DoubleRank;
7058 case BuiltinType::LongDouble: return LongDoubleRank;
7059 case BuiltinType::Float128: return Float128Rank;
7060 case BuiltinType::BFloat16: return BFloat16Rank;
7061 case BuiltinType::Ibm128: return Ibm128Rank;
7062 }
7063}
7064
7065/// getFloatingTypeOrder - Compare the rank of the two specified floating
7066/// point types, ignoring the domain of the type (i.e. 'double' ==
7067/// '_Complex double'). If LHS > RHS, return 1. If LHS == RHS, return 0. If
7068/// LHS < RHS, return -1.
7069int ASTContext::getFloatingTypeOrder(QualType LHS, QualType RHS) const {
7070 FloatingRank LHSR = getFloatingRank(T: LHS);
7071 FloatingRank RHSR = getFloatingRank(T: RHS);
7072
7073 if (LHSR == RHSR)
7074 return 0;
7075 if (LHSR > RHSR)
7076 return 1;
7077 return -1;
7078}
7079
7080int ASTContext::getFloatingTypeSemanticOrder(QualType LHS, QualType RHS) const {
7081 if (&getFloatTypeSemantics(T: LHS) == &getFloatTypeSemantics(T: RHS))
7082 return 0;
7083 return getFloatingTypeOrder(LHS, RHS);
7084}
7085
7086/// getIntegerRank - Return an integer conversion rank (C99 6.3.1.1p1). This
7087/// routine will assert if passed a built-in type that isn't an integer or enum,
7088/// or if it is not canonicalized.
7089unsigned ASTContext::getIntegerRank(const Type *T) const {
7090 assert(T->isCanonicalUnqualified() && "T should be canonicalized");
7091
7092 // Results in this 'losing' to any type of the same size, but winning if
7093 // larger.
7094 if (const auto *EIT = dyn_cast<BitIntType>(Val: T))
7095 return 0 + (EIT->getNumBits() << 3);
7096
7097 switch (cast<BuiltinType>(Val: T)->getKind()) {
7098 default: llvm_unreachable("getIntegerRank(): not a built-in integer");
7099 case BuiltinType::Bool:
7100 return 1 + (getIntWidth(BoolTy) << 3);
7101 case BuiltinType::Char_S:
7102 case BuiltinType::Char_U:
7103 case BuiltinType::SChar:
7104 case BuiltinType::UChar:
7105 return 2 + (getIntWidth(CharTy) << 3);
7106 case BuiltinType::Short:
7107 case BuiltinType::UShort:
7108 return 3 + (getIntWidth(ShortTy) << 3);
7109 case BuiltinType::Int:
7110 case BuiltinType::UInt:
7111 return 4 + (getIntWidth(IntTy) << 3);
7112 case BuiltinType::Long:
7113 case BuiltinType::ULong:
7114 return 5 + (getIntWidth(LongTy) << 3);
7115 case BuiltinType::LongLong:
7116 case BuiltinType::ULongLong:
7117 return 6 + (getIntWidth(LongLongTy) << 3);
7118 case BuiltinType::Int128:
7119 case BuiltinType::UInt128:
7120 return 7 + (getIntWidth(Int128Ty) << 3);
7121
7122 // "The ranks of char8_t, char16_t, char32_t, and wchar_t equal the ranks of
7123 // their underlying types" [c++20 conv.rank]
7124 case BuiltinType::Char8:
7125 return getIntegerRank(UnsignedCharTy.getTypePtr());
7126 case BuiltinType::Char16:
7127 return getIntegerRank(
7128 T: getFromTargetType(Type: Target->getChar16Type()).getTypePtr());
7129 case BuiltinType::Char32:
7130 return getIntegerRank(
7131 T: getFromTargetType(Type: Target->getChar32Type()).getTypePtr());
7132 case BuiltinType::WChar_S:
7133 case BuiltinType::WChar_U:
7134 return getIntegerRank(
7135 T: getFromTargetType(Type: Target->getWCharType()).getTypePtr());
7136 }
7137}
7138
7139/// Whether this is a promotable bitfield reference according
7140/// to C99 6.3.1.1p2, bullet 2 (and GCC extensions).
7141///
7142/// \returns the type this bit-field will promote to, or NULL if no
7143/// promotion occurs.
7144QualType ASTContext::isPromotableBitField(Expr *E) const {
7145 if (E->isTypeDependent() || E->isValueDependent())
7146 return {};
7147
7148 // C++ [conv.prom]p5:
7149 // If the bit-field has an enumerated type, it is treated as any other
7150 // value of that type for promotion purposes.
7151 if (getLangOpts().CPlusPlus && E->getType()->isEnumeralType())
7152 return {};
7153
7154 // FIXME: We should not do this unless E->refersToBitField() is true. This
7155 // matters in C where getSourceBitField() will find bit-fields for various
7156 // cases where the source expression is not a bit-field designator.
7157
7158 FieldDecl *Field = E->getSourceBitField(); // FIXME: conditional bit-fields?
7159 if (!Field)
7160 return {};
7161
7162 QualType FT = Field->getType();
7163
7164 uint64_t BitWidth = Field->getBitWidthValue(Ctx: *this);
7165 uint64_t IntSize = getTypeSize(IntTy);
7166 // C++ [conv.prom]p5:
7167 // A prvalue for an integral bit-field can be converted to a prvalue of type
7168 // int if int can represent all the values of the bit-field; otherwise, it
7169 // can be converted to unsigned int if unsigned int can represent all the
7170 // values of the bit-field. If the bit-field is larger yet, no integral
7171 // promotion applies to it.
7172 // C11 6.3.1.1/2:
7173 // [For a bit-field of type _Bool, int, signed int, or unsigned int:]
7174 // If an int can represent all values of the original type (as restricted by
7175 // the width, for a bit-field), the value is converted to an int; otherwise,
7176 // it is converted to an unsigned int.
7177 //
7178 // FIXME: C does not permit promotion of a 'long : 3' bitfield to int.
7179 // We perform that promotion here to match GCC and C++.
7180 // FIXME: C does not permit promotion of an enum bit-field whose rank is
7181 // greater than that of 'int'. We perform that promotion to match GCC.
7182 if (BitWidth < IntSize)
7183 return IntTy;
7184
7185 if (BitWidth == IntSize)
7186 return FT->isSignedIntegerType() ? IntTy : UnsignedIntTy;
7187
7188 // Bit-fields wider than int are not subject to promotions, and therefore act
7189 // like the base type. GCC has some weird bugs in this area that we
7190 // deliberately do not follow (GCC follows a pre-standard resolution to
7191 // C's DR315 which treats bit-width as being part of the type, and this leaks
7192 // into their semantics in some cases).
7193 return {};
7194}
7195
7196/// getPromotedIntegerType - Returns the type that Promotable will
7197/// promote to: C99 6.3.1.1p2, assuming that Promotable is a promotable
7198/// integer type.
7199QualType ASTContext::getPromotedIntegerType(QualType Promotable) const {
7200 assert(!Promotable.isNull());
7201 assert(isPromotableIntegerType(Promotable));
7202 if (const auto *ET = Promotable->getAs<EnumType>())
7203 return ET->getDecl()->getPromotionType();
7204
7205 if (const auto *BT = Promotable->getAs<BuiltinType>()) {
7206 // C++ [conv.prom]: A prvalue of type char16_t, char32_t, or wchar_t
7207 // (3.9.1) can be converted to a prvalue of the first of the following
7208 // types that can represent all the values of its underlying type:
7209 // int, unsigned int, long int, unsigned long int, long long int, or
7210 // unsigned long long int [...]
7211 // FIXME: Is there some better way to compute this?
7212 if (BT->getKind() == BuiltinType::WChar_S ||
7213 BT->getKind() == BuiltinType::WChar_U ||
7214 BT->getKind() == BuiltinType::Char8 ||
7215 BT->getKind() == BuiltinType::Char16 ||
7216 BT->getKind() == BuiltinType::Char32) {
7217 bool FromIsSigned = BT->getKind() == BuiltinType::WChar_S;
7218 uint64_t FromSize = getTypeSize(BT);
7219 QualType PromoteTypes[] = { IntTy, UnsignedIntTy, LongTy, UnsignedLongTy,
7220 LongLongTy, UnsignedLongLongTy };
7221 for (const auto &PT : PromoteTypes) {
7222 uint64_t ToSize = getTypeSize(PT);
7223 if (FromSize < ToSize ||
7224 (FromSize == ToSize && FromIsSigned == PT->isSignedIntegerType()))
7225 return PT;
7226 }
7227 llvm_unreachable("char type should fit into long long");
7228 }
7229 }
7230
7231 // At this point, we should have a signed or unsigned integer type.
7232 if (Promotable->isSignedIntegerType())
7233 return IntTy;
7234 uint64_t PromotableSize = getIntWidth(T: Promotable);
7235 uint64_t IntSize = getIntWidth(IntTy);
7236 assert(Promotable->isUnsignedIntegerType() && PromotableSize <= IntSize);
7237 return (PromotableSize != IntSize) ? IntTy : UnsignedIntTy;
7238}
7239
7240/// Recurses in pointer/array types until it finds an objc retainable
7241/// type and returns its ownership.
7242Qualifiers::ObjCLifetime ASTContext::getInnerObjCOwnership(QualType T) const {
7243 while (!T.isNull()) {
7244 if (T.getObjCLifetime() != Qualifiers::OCL_None)
7245 return T.getObjCLifetime();
7246 if (T->isArrayType())
7247 T = getBaseElementType(type: T);
7248 else if (const auto *PT = T->getAs<PointerType>())
7249 T = PT->getPointeeType();
7250 else if (const auto *RT = T->getAs<ReferenceType>())
7251 T = RT->getPointeeType();
7252 else
7253 break;
7254 }
7255
7256 return Qualifiers::OCL_None;
7257}
7258
7259static const Type *getIntegerTypeForEnum(const EnumType *ET) {
7260 // Incomplete enum types are not treated as integer types.
7261 // FIXME: In C++, enum types are never integer types.
7262 if (ET->getDecl()->isComplete() && !ET->getDecl()->isScoped())
7263 return ET->getDecl()->getIntegerType().getTypePtr();
7264 return nullptr;
7265}
7266
7267/// getIntegerTypeOrder - Returns the highest ranked integer type:
7268/// C99 6.3.1.8p1. If LHS > RHS, return 1. If LHS == RHS, return 0. If
7269/// LHS < RHS, return -1.
7270int ASTContext::getIntegerTypeOrder(QualType LHS, QualType RHS) const {
7271 const Type *LHSC = getCanonicalType(T: LHS).getTypePtr();
7272 const Type *RHSC = getCanonicalType(T: RHS).getTypePtr();
7273
7274 // Unwrap enums to their underlying type.
7275 if (const auto *ET = dyn_cast<EnumType>(Val: LHSC))
7276 LHSC = getIntegerTypeForEnum(ET);
7277 if (const auto *ET = dyn_cast<EnumType>(Val: RHSC))
7278 RHSC = getIntegerTypeForEnum(ET);
7279
7280 if (LHSC == RHSC) return 0;
7281
7282 bool LHSUnsigned = LHSC->isUnsignedIntegerType();
7283 bool RHSUnsigned = RHSC->isUnsignedIntegerType();
7284
7285 unsigned LHSRank = getIntegerRank(T: LHSC);
7286 unsigned RHSRank = getIntegerRank(T: RHSC);
7287
7288 if (LHSUnsigned == RHSUnsigned) { // Both signed or both unsigned.
7289 if (LHSRank == RHSRank) return 0;
7290 return LHSRank > RHSRank ? 1 : -1;
7291 }
7292
7293 // Otherwise, the LHS is signed and the RHS is unsigned or visa versa.
7294 if (LHSUnsigned) {
7295 // If the unsigned [LHS] type is larger, return it.
7296 if (LHSRank >= RHSRank)
7297 return 1;
7298
7299 // If the signed type can represent all values of the unsigned type, it
7300 // wins. Because we are dealing with 2's complement and types that are
7301 // powers of two larger than each other, this is always safe.
7302 return -1;
7303 }
7304
7305 // If the unsigned [RHS] type is larger, return it.
7306 if (RHSRank >= LHSRank)
7307 return -1;
7308
7309 // If the signed type can represent all values of the unsigned type, it
7310 // wins. Because we are dealing with 2's complement and types that are
7311 // powers of two larger than each other, this is always safe.
7312 return 1;
7313}
7314
7315TypedefDecl *ASTContext::getCFConstantStringDecl() const {
7316 if (CFConstantStringTypeDecl)
7317 return CFConstantStringTypeDecl;
7318
7319 assert(!CFConstantStringTagDecl &&
7320 "tag and typedef should be initialized together");
7321 CFConstantStringTagDecl = buildImplicitRecord(Name: "__NSConstantString_tag");
7322 CFConstantStringTagDecl->startDefinition();
7323
7324 struct {
7325 QualType Type;
7326 const char *Name;
7327 } Fields[5];
7328 unsigned Count = 0;
7329
7330 /// Objective-C ABI
7331 ///
7332 /// typedef struct __NSConstantString_tag {
7333 /// const int *isa;
7334 /// int flags;
7335 /// const char *str;
7336 /// long length;
7337 /// } __NSConstantString;
7338 ///
7339 /// Swift ABI (4.1, 4.2)
7340 ///
7341 /// typedef struct __NSConstantString_tag {
7342 /// uintptr_t _cfisa;
7343 /// uintptr_t _swift_rc;
7344 /// _Atomic(uint64_t) _cfinfoa;
7345 /// const char *_ptr;
7346 /// uint32_t _length;
7347 /// } __NSConstantString;
7348 ///
7349 /// Swift ABI (5.0)
7350 ///
7351 /// typedef struct __NSConstantString_tag {
7352 /// uintptr_t _cfisa;
7353 /// uintptr_t _swift_rc;
7354 /// _Atomic(uint64_t) _cfinfoa;
7355 /// const char *_ptr;
7356 /// uintptr_t _length;
7357 /// } __NSConstantString;
7358
7359 const auto CFRuntime = getLangOpts().CFRuntime;
7360 if (static_cast<unsigned>(CFRuntime) <
7361 static_cast<unsigned>(LangOptions::CoreFoundationABI::Swift)) {
7362 Fields[Count++] = { getPointerType(IntTy.withConst()), "isa" };
7363 Fields[Count++] = { IntTy, "flags" };
7364 Fields[Count++] = { getPointerType(CharTy.withConst()), "str" };
7365 Fields[Count++] = { LongTy, "length" };
7366 } else {
7367 Fields[Count++] = { getUIntPtrType(), "_cfisa" };
7368 Fields[Count++] = { getUIntPtrType(), "_swift_rc" };
7369 Fields[Count++] = { getFromTargetType(Type: Target->getUInt64Type()), "_swift_rc" };
7370 Fields[Count++] = { getPointerType(CharTy.withConst()), "_ptr" };
7371 if (CFRuntime == LangOptions::CoreFoundationABI::Swift4_1 ||
7372 CFRuntime == LangOptions::CoreFoundationABI::Swift4_2)
7373 Fields[Count++] = { IntTy, "_ptr" };
7374 else
7375 Fields[Count++] = { getUIntPtrType(), "_ptr" };
7376 }
7377
7378 // Create fields
7379 for (unsigned i = 0; i < Count; ++i) {
7380 FieldDecl *Field =
7381 FieldDecl::Create(C: *this, DC: CFConstantStringTagDecl, StartLoc: SourceLocation(),
7382 IdLoc: SourceLocation(), Id: &Idents.get(Name: Fields[i].Name),
7383 T: Fields[i].Type, /*TInfo=*/nullptr,
7384 /*BitWidth=*/BW: nullptr, /*Mutable=*/false, InitStyle: ICIS_NoInit);
7385 Field->setAccess(AS_public);
7386 CFConstantStringTagDecl->addDecl(Field);
7387 }
7388
7389 CFConstantStringTagDecl->completeDefinition();
7390 // This type is designed to be compatible with NSConstantString, but cannot
7391 // use the same name, since NSConstantString is an interface.
7392 auto tagType = getTagDeclType(CFConstantStringTagDecl);
7393 CFConstantStringTypeDecl =
7394 buildImplicitTypedef(T: tagType, Name: "__NSConstantString");
7395
7396 return CFConstantStringTypeDecl;
7397}
7398
7399RecordDecl *ASTContext::getCFConstantStringTagDecl() const {
7400 if (!CFConstantStringTagDecl)
7401 getCFConstantStringDecl(); // Build the tag and the typedef.
7402 return CFConstantStringTagDecl;
7403}
7404
7405// getCFConstantStringType - Return the type used for constant CFStrings.
7406QualType ASTContext::getCFConstantStringType() const {
7407 return getTypedefType(getCFConstantStringDecl());
7408}
7409
7410QualType ASTContext::getObjCSuperType() const {
7411 if (ObjCSuperType.isNull()) {
7412 RecordDecl *ObjCSuperTypeDecl = buildImplicitRecord(Name: "objc_super");
7413 getTranslationUnitDecl()->addDecl(ObjCSuperTypeDecl);
7414 ObjCSuperType = getTagDeclType(ObjCSuperTypeDecl);
7415 }
7416 return ObjCSuperType;
7417}
7418
7419void ASTContext::setCFConstantStringType(QualType T) {
7420 const auto *TD = T->castAs<TypedefType>();
7421 CFConstantStringTypeDecl = cast<TypedefDecl>(Val: TD->getDecl());
7422 const auto *TagType =
7423 CFConstantStringTypeDecl->getUnderlyingType()->castAs<RecordType>();
7424 CFConstantStringTagDecl = TagType->getDecl();
7425}
7426
7427QualType ASTContext::getBlockDescriptorType() const {
7428 if (BlockDescriptorType)
7429 return getTagDeclType(BlockDescriptorType);
7430
7431 RecordDecl *RD;
7432 // FIXME: Needs the FlagAppleBlock bit.
7433 RD = buildImplicitRecord(Name: "__block_descriptor");
7434 RD->startDefinition();
7435
7436 QualType FieldTypes[] = {
7437 UnsignedLongTy,
7438 UnsignedLongTy,
7439 };
7440
7441 static const char *const FieldNames[] = {
7442 "reserved",
7443 "Size"
7444 };
7445
7446 for (size_t i = 0; i < 2; ++i) {
7447 FieldDecl *Field = FieldDecl::Create(
7448 *this, RD, SourceLocation(), SourceLocation(),
7449 &Idents.get(Name: FieldNames[i]), FieldTypes[i], /*TInfo=*/nullptr,
7450 /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit);
7451 Field->setAccess(AS_public);
7452 RD->addDecl(Field);
7453 }
7454
7455 RD->completeDefinition();
7456
7457 BlockDescriptorType = RD;
7458
7459 return getTagDeclType(BlockDescriptorType);
7460}
7461
7462QualType ASTContext::getBlockDescriptorExtendedType() const {
7463 if (BlockDescriptorExtendedType)
7464 return getTagDeclType(BlockDescriptorExtendedType);
7465
7466 RecordDecl *RD;
7467 // FIXME: Needs the FlagAppleBlock bit.
7468 RD = buildImplicitRecord(Name: "__block_descriptor_withcopydispose");
7469 RD->startDefinition();
7470
7471 QualType FieldTypes[] = {
7472 UnsignedLongTy,
7473 UnsignedLongTy,
7474 getPointerType(VoidPtrTy),
7475 getPointerType(VoidPtrTy)
7476 };
7477
7478 static const char *const FieldNames[] = {
7479 "reserved",
7480 "Size",
7481 "CopyFuncPtr",
7482 "DestroyFuncPtr"
7483 };
7484
7485 for (size_t i = 0; i < 4; ++i) {
7486 FieldDecl *Field = FieldDecl::Create(
7487 *this, RD, SourceLocation(), SourceLocation(),
7488 &Idents.get(Name: FieldNames[i]), FieldTypes[i], /*TInfo=*/nullptr,
7489 /*BitWidth=*/nullptr,
7490 /*Mutable=*/false, ICIS_NoInit);
7491 Field->setAccess(AS_public);
7492 RD->addDecl(Field);
7493 }
7494
7495 RD->completeDefinition();
7496
7497 BlockDescriptorExtendedType = RD;
7498 return getTagDeclType(BlockDescriptorExtendedType);
7499}
7500
7501OpenCLTypeKind ASTContext::getOpenCLTypeKind(const Type *T) const {
7502 const auto *BT = dyn_cast<BuiltinType>(Val: T);
7503
7504 if (!BT) {
7505 if (isa<PipeType>(Val: T))
7506 return OCLTK_Pipe;
7507
7508 return OCLTK_Default;
7509 }
7510
7511 switch (BT->getKind()) {
7512#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
7513 case BuiltinType::Id: \
7514 return OCLTK_Image;
7515#include "clang/Basic/OpenCLImageTypes.def"
7516
7517 case BuiltinType::OCLClkEvent:
7518 return OCLTK_ClkEvent;
7519
7520 case BuiltinType::OCLEvent:
7521 return OCLTK_Event;
7522
7523 case BuiltinType::OCLQueue:
7524 return OCLTK_Queue;
7525
7526 case BuiltinType::OCLReserveID:
7527 return OCLTK_ReserveID;
7528
7529 case BuiltinType::OCLSampler:
7530 return OCLTK_Sampler;
7531
7532 default:
7533 return OCLTK_Default;
7534 }
7535}
7536
7537LangAS ASTContext::getOpenCLTypeAddrSpace(const Type *T) const {
7538 return Target->getOpenCLTypeAddrSpace(TK: getOpenCLTypeKind(T));
7539}
7540
7541/// BlockRequiresCopying - Returns true if byref variable "D" of type "Ty"
7542/// requires copy/dispose. Note that this must match the logic
7543/// in buildByrefHelpers.
7544bool ASTContext::BlockRequiresCopying(QualType Ty,
7545 const VarDecl *D) {
7546 if (const CXXRecordDecl *record = Ty->getAsCXXRecordDecl()) {
7547 const Expr *copyExpr = getBlockVarCopyInit(VD: D).getCopyExpr();
7548 if (!copyExpr && record->hasTrivialDestructor()) return false;
7549
7550 return true;
7551 }
7552
7553 // The block needs copy/destroy helpers if Ty is non-trivial to destructively
7554 // move or destroy.
7555 if (Ty.isNonTrivialToPrimitiveDestructiveMove() || Ty.isDestructedType())
7556 return true;
7557
7558 if (!Ty->isObjCRetainableType()) return false;
7559
7560 Qualifiers qs = Ty.getQualifiers();
7561
7562 // If we have lifetime, that dominates.
7563 if (Qualifiers::ObjCLifetime lifetime = qs.getObjCLifetime()) {
7564 switch (lifetime) {
7565 case Qualifiers::OCL_None: llvm_unreachable("impossible");
7566
7567 // These are just bits as far as the runtime is concerned.
7568 case Qualifiers::OCL_ExplicitNone:
7569 case Qualifiers::OCL_Autoreleasing:
7570 return false;
7571
7572 // These cases should have been taken care of when checking the type's
7573 // non-triviality.
7574 case Qualifiers::OCL_Weak:
7575 case Qualifiers::OCL_Strong:
7576 llvm_unreachable("impossible");
7577 }
7578 llvm_unreachable("fell out of lifetime switch!");
7579 }
7580 return (Ty->isBlockPointerType() || isObjCNSObjectType(Ty) ||
7581 Ty->isObjCObjectPointerType());
7582}
7583
7584bool ASTContext::getByrefLifetime(QualType Ty,
7585 Qualifiers::ObjCLifetime &LifeTime,
7586 bool &HasByrefExtendedLayout) const {
7587 if (!getLangOpts().ObjC ||
7588 getLangOpts().getGC() != LangOptions::NonGC)
7589 return false;
7590
7591 HasByrefExtendedLayout = false;
7592 if (Ty->isRecordType()) {
7593 HasByrefExtendedLayout = true;
7594 LifeTime = Qualifiers::OCL_None;
7595 } else if ((LifeTime = Ty.getObjCLifetime())) {
7596 // Honor the ARC qualifiers.
7597 } else if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType()) {
7598 // The MRR rule.
7599 LifeTime = Qualifiers::OCL_ExplicitNone;
7600 } else {
7601 LifeTime = Qualifiers::OCL_None;
7602 }
7603 return true;
7604}
7605
7606CanQualType ASTContext::getNSUIntegerType() const {
7607 assert(Target && "Expected target to be initialized");
7608 const llvm::Triple &T = Target->getTriple();
7609 // Windows is LLP64 rather than LP64
7610 if (T.isOSWindows() && T.isArch64Bit())
7611 return UnsignedLongLongTy;
7612 return UnsignedLongTy;
7613}
7614
7615CanQualType ASTContext::getNSIntegerType() const {
7616 assert(Target && "Expected target to be initialized");
7617 const llvm::Triple &T = Target->getTriple();
7618 // Windows is LLP64 rather than LP64
7619 if (T.isOSWindows() && T.isArch64Bit())
7620 return LongLongTy;
7621 return LongTy;
7622}
7623
7624TypedefDecl *ASTContext::getObjCInstanceTypeDecl() {
7625 if (!ObjCInstanceTypeDecl)
7626 ObjCInstanceTypeDecl =
7627 buildImplicitTypedef(T: getObjCIdType(), Name: "instancetype");
7628 return ObjCInstanceTypeDecl;
7629}
7630
7631// This returns true if a type has been typedefed to BOOL:
7632// typedef <type> BOOL;
7633static bool isTypeTypedefedAsBOOL(QualType T) {
7634 if (const auto *TT = dyn_cast<TypedefType>(Val&: T))
7635 if (IdentifierInfo *II = TT->getDecl()->getIdentifier())
7636 return II->isStr(Str: "BOOL");
7637
7638 return false;
7639}
7640
7641/// getObjCEncodingTypeSize returns size of type for objective-c encoding
7642/// purpose.
7643CharUnits ASTContext::getObjCEncodingTypeSize(QualType type) const {
7644 if (!type->isIncompleteArrayType() && type->isIncompleteType())
7645 return CharUnits::Zero();
7646
7647 CharUnits sz = getTypeSizeInChars(T: type);
7648
7649 // Make all integer and enum types at least as large as an int
7650 if (sz.isPositive() && type->isIntegralOrEnumerationType())
7651 sz = std::max(sz, getTypeSizeInChars(IntTy));
7652 // Treat arrays as pointers, since that's how they're passed in.
7653 else if (type->isArrayType())
7654 sz = getTypeSizeInChars(VoidPtrTy);
7655 return sz;
7656}
7657
7658bool ASTContext::isMSStaticDataMemberInlineDefinition(const VarDecl *VD) const {
7659 return getTargetInfo().getCXXABI().isMicrosoft() &&
7660 VD->isStaticDataMember() &&
7661 VD->getType()->isIntegralOrEnumerationType() &&
7662 !VD->getFirstDecl()->isOutOfLine() && VD->getFirstDecl()->hasInit();
7663}
7664
7665ASTContext::InlineVariableDefinitionKind
7666ASTContext::getInlineVariableDefinitionKind(const VarDecl *VD) const {
7667 if (!VD->isInline())
7668 return InlineVariableDefinitionKind::None;
7669
7670 // In almost all cases, it's a weak definition.
7671 auto *First = VD->getFirstDecl();
7672 if (First->isInlineSpecified() || !First->isStaticDataMember())
7673 return InlineVariableDefinitionKind::Weak;
7674
7675 // If there's a file-context declaration in this translation unit, it's a
7676 // non-discardable definition.
7677 for (auto *D : VD->redecls())
7678 if (D->getLexicalDeclContext()->isFileContext() &&
7679 !D->isInlineSpecified() && (D->isConstexpr() || First->isConstexpr()))
7680 return InlineVariableDefinitionKind::Strong;
7681
7682 // If we've not seen one yet, we don't know.
7683 return InlineVariableDefinitionKind::WeakUnknown;
7684}
7685
7686static std::string charUnitsToString(const CharUnits &CU) {
7687 return llvm::itostr(X: CU.getQuantity());
7688}
7689
7690/// getObjCEncodingForBlock - Return the encoded type for this block
7691/// declaration.
7692std::string ASTContext::getObjCEncodingForBlock(const BlockExpr *Expr) const {
7693 std::string S;
7694
7695 const BlockDecl *Decl = Expr->getBlockDecl();
7696 QualType BlockTy =
7697 Expr->getType()->castAs<BlockPointerType>()->getPointeeType();
7698 QualType BlockReturnTy = BlockTy->castAs<FunctionType>()->getReturnType();
7699 // Encode result type.
7700 if (getLangOpts().EncodeExtendedBlockSig)
7701 getObjCEncodingForMethodParameter(QT: Decl::OBJC_TQ_None, T: BlockReturnTy, S,
7702 Extended: true /*Extended*/);
7703 else
7704 getObjCEncodingForType(T: BlockReturnTy, S);
7705 // Compute size of all parameters.
7706 // Start with computing size of a pointer in number of bytes.
7707 // FIXME: There might(should) be a better way of doing this computation!
7708 CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy);
7709 CharUnits ParmOffset = PtrSize;
7710 for (auto *PI : Decl->parameters()) {
7711 QualType PType = PI->getType();
7712 CharUnits sz = getObjCEncodingTypeSize(type: PType);
7713 if (sz.isZero())
7714 continue;
7715 assert(sz.isPositive() && "BlockExpr - Incomplete param type");
7716 ParmOffset += sz;
7717 }
7718 // Size of the argument frame
7719 S += charUnitsToString(CU: ParmOffset);
7720 // Block pointer and offset.
7721 S += "@?0";
7722
7723 // Argument types.
7724 ParmOffset = PtrSize;
7725 for (auto *PVDecl : Decl->parameters()) {
7726 QualType PType = PVDecl->getOriginalType();
7727 if (const auto *AT =
7728 dyn_cast<ArrayType>(Val: PType->getCanonicalTypeInternal())) {
7729 // Use array's original type only if it has known number of
7730 // elements.
7731 if (!isa<ConstantArrayType>(Val: AT))
7732 PType = PVDecl->getType();
7733 } else if (PType->isFunctionType())
7734 PType = PVDecl->getType();
7735 if (getLangOpts().EncodeExtendedBlockSig)
7736 getObjCEncodingForMethodParameter(QT: Decl::OBJC_TQ_None, T: PType,
7737 S, Extended: true /*Extended*/);
7738 else
7739 getObjCEncodingForType(T: PType, S);
7740 S += charUnitsToString(CU: ParmOffset);
7741 ParmOffset += getObjCEncodingTypeSize(type: PType);
7742 }
7743
7744 return S;
7745}
7746
7747std::string
7748ASTContext::getObjCEncodingForFunctionDecl(const FunctionDecl *Decl) const {
7749 std::string S;
7750 // Encode result type.
7751 getObjCEncodingForType(T: Decl->getReturnType(), S);
7752 CharUnits ParmOffset;
7753 // Compute size of all parameters.
7754 for (auto *PI : Decl->parameters()) {
7755 QualType PType = PI->getType();
7756 CharUnits sz = getObjCEncodingTypeSize(type: PType);
7757 if (sz.isZero())
7758 continue;
7759
7760 assert(sz.isPositive() &&
7761 "getObjCEncodingForFunctionDecl - Incomplete param type");
7762 ParmOffset += sz;
7763 }
7764 S += charUnitsToString(CU: ParmOffset);
7765 ParmOffset = CharUnits::Zero();
7766
7767 // Argument types.
7768 for (auto *PVDecl : Decl->parameters()) {
7769 QualType PType = PVDecl->getOriginalType();
7770 if (const auto *AT =
7771 dyn_cast<ArrayType>(Val: PType->getCanonicalTypeInternal())) {
7772 // Use array's original type only if it has known number of
7773 // elements.
7774 if (!isa<ConstantArrayType>(Val: AT))
7775 PType = PVDecl->getType();
7776 } else if (PType->isFunctionType())
7777 PType = PVDecl->getType();
7778 getObjCEncodingForType(T: PType, S);
7779 S += charUnitsToString(CU: ParmOffset);
7780 ParmOffset += getObjCEncodingTypeSize(type: PType);
7781 }
7782
7783 return S;
7784}
7785
7786/// getObjCEncodingForMethodParameter - Return the encoded type for a single
7787/// method parameter or return type. If Extended, include class names and
7788/// block object types.
7789void ASTContext::getObjCEncodingForMethodParameter(Decl::ObjCDeclQualifier QT,
7790 QualType T, std::string& S,
7791 bool Extended) const {
7792 // Encode type qualifier, 'in', 'inout', etc. for the parameter.
7793 getObjCEncodingForTypeQualifier(QT, S);
7794 // Encode parameter type.
7795 ObjCEncOptions Options = ObjCEncOptions()
7796 .setExpandPointedToStructures()
7797 .setExpandStructures()
7798 .setIsOutermostType();
7799 if (Extended)
7800 Options.setEncodeBlockParameters().setEncodeClassNames();
7801 getObjCEncodingForTypeImpl(t: T, S, Options, /*Field=*/nullptr);
7802}
7803
7804/// getObjCEncodingForMethodDecl - Return the encoded type for this method
7805/// declaration.
7806std::string ASTContext::getObjCEncodingForMethodDecl(const ObjCMethodDecl *Decl,
7807 bool Extended) const {
7808 // FIXME: This is not very efficient.
7809 // Encode return type.
7810 std::string S;
7811 getObjCEncodingForMethodParameter(QT: Decl->getObjCDeclQualifier(),
7812 T: Decl->getReturnType(), S, Extended);
7813 // Compute size of all parameters.
7814 // Start with computing size of a pointer in number of bytes.
7815 // FIXME: There might(should) be a better way of doing this computation!
7816 CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy);
7817 // The first two arguments (self and _cmd) are pointers; account for
7818 // their size.
7819 CharUnits ParmOffset = 2 * PtrSize;
7820 for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(),
7821 E = Decl->sel_param_end(); PI != E; ++PI) {
7822 QualType PType = (*PI)->getType();
7823 CharUnits sz = getObjCEncodingTypeSize(type: PType);
7824 if (sz.isZero())
7825 continue;
7826
7827 assert(sz.isPositive() &&
7828 "getObjCEncodingForMethodDecl - Incomplete param type");
7829 ParmOffset += sz;
7830 }
7831 S += charUnitsToString(CU: ParmOffset);
7832 S += "@0:";
7833 S += charUnitsToString(CU: PtrSize);
7834
7835 // Argument types.
7836 ParmOffset = 2 * PtrSize;
7837 for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(),
7838 E = Decl->sel_param_end(); PI != E; ++PI) {
7839 const ParmVarDecl *PVDecl = *PI;
7840 QualType PType = PVDecl->getOriginalType();
7841 if (const auto *AT =
7842 dyn_cast<ArrayType>(Val: PType->getCanonicalTypeInternal())) {
7843 // Use array's original type only if it has known number of
7844 // elements.
7845 if (!isa<ConstantArrayType>(Val: AT))
7846 PType = PVDecl->getType();
7847 } else if (PType->isFunctionType())
7848 PType = PVDecl->getType();
7849 getObjCEncodingForMethodParameter(QT: PVDecl->getObjCDeclQualifier(),
7850 T: PType, S, Extended);
7851 S += charUnitsToString(CU: ParmOffset);
7852 ParmOffset += getObjCEncodingTypeSize(type: PType);
7853 }
7854
7855 return S;
7856}
7857
7858ObjCPropertyImplDecl *
7859ASTContext::getObjCPropertyImplDeclForPropertyDecl(
7860 const ObjCPropertyDecl *PD,
7861 const Decl *Container) const {
7862 if (!Container)
7863 return nullptr;
7864 if (const auto *CID = dyn_cast<ObjCCategoryImplDecl>(Val: Container)) {
7865 for (auto *PID : CID->property_impls())
7866 if (PID->getPropertyDecl() == PD)
7867 return PID;
7868 } else {
7869 const auto *OID = cast<ObjCImplementationDecl>(Val: Container);
7870 for (auto *PID : OID->property_impls())
7871 if (PID->getPropertyDecl() == PD)
7872 return PID;
7873 }
7874 return nullptr;
7875}
7876
7877/// getObjCEncodingForPropertyDecl - Return the encoded type for this
7878/// property declaration. If non-NULL, Container must be either an
7879/// ObjCCategoryImplDecl or ObjCImplementationDecl; it should only be
7880/// NULL when getting encodings for protocol properties.
7881/// Property attributes are stored as a comma-delimited C string. The simple
7882/// attributes readonly and bycopy are encoded as single characters. The
7883/// parametrized attributes, getter=name, setter=name, and ivar=name, are
7884/// encoded as single characters, followed by an identifier. Property types
7885/// are also encoded as a parametrized attribute. The characters used to encode
7886/// these attributes are defined by the following enumeration:
7887/// @code
7888/// enum PropertyAttributes {
7889/// kPropertyReadOnly = 'R', // property is read-only.
7890/// kPropertyBycopy = 'C', // property is a copy of the value last assigned
7891/// kPropertyByref = '&', // property is a reference to the value last assigned
7892/// kPropertyDynamic = 'D', // property is dynamic
7893/// kPropertyGetter = 'G', // followed by getter selector name
7894/// kPropertySetter = 'S', // followed by setter selector name
7895/// kPropertyInstanceVariable = 'V' // followed by instance variable name
7896/// kPropertyType = 'T' // followed by old-style type encoding.
7897/// kPropertyWeak = 'W' // 'weak' property
7898/// kPropertyStrong = 'P' // property GC'able
7899/// kPropertyNonAtomic = 'N' // property non-atomic
7900/// kPropertyOptional = '?' // property optional
7901/// };
7902/// @endcode
7903std::string
7904ASTContext::getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD,
7905 const Decl *Container) const {
7906 // Collect information from the property implementation decl(s).
7907 bool Dynamic = false;
7908 ObjCPropertyImplDecl *SynthesizePID = nullptr;
7909
7910 if (ObjCPropertyImplDecl *PropertyImpDecl =
7911 getObjCPropertyImplDeclForPropertyDecl(PD, Container)) {
7912 if (PropertyImpDecl->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
7913 Dynamic = true;
7914 else
7915 SynthesizePID = PropertyImpDecl;
7916 }
7917
7918 // FIXME: This is not very efficient.
7919 std::string S = "T";
7920
7921 // Encode result type.
7922 // GCC has some special rules regarding encoding of properties which
7923 // closely resembles encoding of ivars.
7924 getObjCEncodingForPropertyType(T: PD->getType(), S);
7925
7926 if (PD->isOptional())
7927 S += ",?";
7928
7929 if (PD->isReadOnly()) {
7930 S += ",R";
7931 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_copy)
7932 S += ",C";
7933 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_retain)
7934 S += ",&";
7935 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_weak)
7936 S += ",W";
7937 } else {
7938 switch (PD->getSetterKind()) {
7939 case ObjCPropertyDecl::Assign: break;
7940 case ObjCPropertyDecl::Copy: S += ",C"; break;
7941 case ObjCPropertyDecl::Retain: S += ",&"; break;
7942 case ObjCPropertyDecl::Weak: S += ",W"; break;
7943 }
7944 }
7945
7946 // It really isn't clear at all what this means, since properties
7947 // are "dynamic by default".
7948 if (Dynamic)
7949 S += ",D";
7950
7951 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_nonatomic)
7952 S += ",N";
7953
7954 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_getter) {
7955 S += ",G";
7956 S += PD->getGetterName().getAsString();
7957 }
7958
7959 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_setter) {
7960 S += ",S";
7961 S += PD->getSetterName().getAsString();
7962 }
7963
7964 if (SynthesizePID) {
7965 const ObjCIvarDecl *OID = SynthesizePID->getPropertyIvarDecl();
7966 S += ",V";
7967 S += OID->getNameAsString();
7968 }
7969
7970 // FIXME: OBJCGC: weak & strong
7971 return S;
7972}
7973
7974/// getLegacyIntegralTypeEncoding -
7975/// Another legacy compatibility encoding: 32-bit longs are encoded as
7976/// 'l' or 'L' , but not always. For typedefs, we need to use
7977/// 'i' or 'I' instead if encoding a struct field, or a pointer!
7978void ASTContext::getLegacyIntegralTypeEncoding (QualType &PointeeTy) const {
7979 if (PointeeTy->getAs<TypedefType>()) {
7980 if (const auto *BT = PointeeTy->getAs<BuiltinType>()) {
7981 if (BT->getKind() == BuiltinType::ULong && getIntWidth(PointeeTy) == 32)
7982 PointeeTy = UnsignedIntTy;
7983 else
7984 if (BT->getKind() == BuiltinType::Long && getIntWidth(PointeeTy) == 32)
7985 PointeeTy = IntTy;
7986 }
7987 }
7988}
7989
7990void ASTContext::getObjCEncodingForType(QualType T, std::string& S,
7991 const FieldDecl *Field,
7992 QualType *NotEncodedT) const {
7993 // We follow the behavior of gcc, expanding structures which are
7994 // directly pointed to, and expanding embedded structures. Note that
7995 // these rules are sufficient to prevent recursive encoding of the
7996 // same type.
7997 getObjCEncodingForTypeImpl(t: T, S,
7998 Options: ObjCEncOptions()
7999 .setExpandPointedToStructures()
8000 .setExpandStructures()
8001 .setIsOutermostType(),
8002 Field, NotEncodedT);
8003}
8004
8005void ASTContext::getObjCEncodingForPropertyType(QualType T,
8006 std::string& S) const {
8007 // Encode result type.
8008 // GCC has some special rules regarding encoding of properties which
8009 // closely resembles encoding of ivars.
8010 getObjCEncodingForTypeImpl(t: T, S,
8011 Options: ObjCEncOptions()
8012 .setExpandPointedToStructures()
8013 .setExpandStructures()
8014 .setIsOutermostType()
8015 .setEncodingProperty(),
8016 /*Field=*/nullptr);
8017}
8018
8019static char getObjCEncodingForPrimitiveType(const ASTContext *C,
8020 const BuiltinType *BT) {
8021 BuiltinType::Kind kind = BT->getKind();
8022 switch (kind) {
8023 case BuiltinType::Void: return 'v';
8024 case BuiltinType::Bool: return 'B';
8025 case BuiltinType::Char8:
8026 case BuiltinType::Char_U:
8027 case BuiltinType::UChar: return 'C';
8028 case BuiltinType::Char16:
8029 case BuiltinType::UShort: return 'S';
8030 case BuiltinType::Char32:
8031 case BuiltinType::UInt: return 'I';
8032 case BuiltinType::ULong:
8033 return C->getTargetInfo().getLongWidth() == 32 ? 'L' : 'Q';
8034 case BuiltinType::UInt128: return 'T';
8035 case BuiltinType::ULongLong: return 'Q';
8036 case BuiltinType::Char_S:
8037 case BuiltinType::SChar: return 'c';
8038 case BuiltinType::Short: return 's';
8039 case BuiltinType::WChar_S:
8040 case BuiltinType::WChar_U:
8041 case BuiltinType::Int: return 'i';
8042 case BuiltinType::Long:
8043 return C->getTargetInfo().getLongWidth() == 32 ? 'l' : 'q';
8044 case BuiltinType::LongLong: return 'q';
8045 case BuiltinType::Int128: return 't';
8046 case BuiltinType::Float: return 'f';
8047 case BuiltinType::Double: return 'd';
8048 case BuiltinType::LongDouble: return 'D';
8049 case BuiltinType::NullPtr: return '*'; // like char*
8050
8051 case BuiltinType::BFloat16:
8052 case BuiltinType::Float16:
8053 case BuiltinType::Float128:
8054 case BuiltinType::Ibm128:
8055 case BuiltinType::Half:
8056 case BuiltinType::ShortAccum:
8057 case BuiltinType::Accum:
8058 case BuiltinType::LongAccum:
8059 case BuiltinType::UShortAccum:
8060 case BuiltinType::UAccum:
8061 case BuiltinType::ULongAccum:
8062 case BuiltinType::ShortFract:
8063 case BuiltinType::Fract:
8064 case BuiltinType::LongFract:
8065 case BuiltinType::UShortFract:
8066 case BuiltinType::UFract:
8067 case BuiltinType::ULongFract:
8068 case BuiltinType::SatShortAccum:
8069 case BuiltinType::SatAccum:
8070 case BuiltinType::SatLongAccum:
8071 case BuiltinType::SatUShortAccum:
8072 case BuiltinType::SatUAccum:
8073 case BuiltinType::SatULongAccum:
8074 case BuiltinType::SatShortFract:
8075 case BuiltinType::SatFract:
8076 case BuiltinType::SatLongFract:
8077 case BuiltinType::SatUShortFract:
8078 case BuiltinType::SatUFract:
8079 case BuiltinType::SatULongFract:
8080 // FIXME: potentially need @encodes for these!
8081 return ' ';
8082
8083#define SVE_TYPE(Name, Id, SingletonId) \
8084 case BuiltinType::Id:
8085#include "clang/Basic/AArch64SVEACLETypes.def"
8086#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
8087#include "clang/Basic/RISCVVTypes.def"
8088#define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
8089#include "clang/Basic/WebAssemblyReferenceTypes.def"
8090 {
8091 DiagnosticsEngine &Diags = C->getDiagnostics();
8092 unsigned DiagID = Diags.getCustomDiagID(L: DiagnosticsEngine::Error,
8093 FormatString: "cannot yet @encode type %0");
8094 Diags.Report(DiagID) << BT->getName(Policy: C->getPrintingPolicy());
8095 return ' ';
8096 }
8097
8098 case BuiltinType::ObjCId:
8099 case BuiltinType::ObjCClass:
8100 case BuiltinType::ObjCSel:
8101 llvm_unreachable("@encoding ObjC primitive type");
8102
8103 // OpenCL and placeholder types don't need @encodings.
8104#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
8105 case BuiltinType::Id:
8106#include "clang/Basic/OpenCLImageTypes.def"
8107#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
8108 case BuiltinType::Id:
8109#include "clang/Basic/OpenCLExtensionTypes.def"
8110 case BuiltinType::OCLEvent:
8111 case BuiltinType::OCLClkEvent:
8112 case BuiltinType::OCLQueue:
8113 case BuiltinType::OCLReserveID:
8114 case BuiltinType::OCLSampler:
8115 case BuiltinType::Dependent:
8116#define PPC_VECTOR_TYPE(Name, Id, Size) \
8117 case BuiltinType::Id:
8118#include "clang/Basic/PPCTypes.def"
8119#define BUILTIN_TYPE(KIND, ID)
8120#define PLACEHOLDER_TYPE(KIND, ID) \
8121 case BuiltinType::KIND:
8122#include "clang/AST/BuiltinTypes.def"
8123 llvm_unreachable("invalid builtin type for @encode");
8124 }
8125 llvm_unreachable("invalid BuiltinType::Kind value");
8126}
8127
8128static char ObjCEncodingForEnumType(const ASTContext *C, const EnumType *ET) {
8129 EnumDecl *Enum = ET->getDecl();
8130
8131 // The encoding of an non-fixed enum type is always 'i', regardless of size.
8132 if (!Enum->isFixed())
8133 return 'i';
8134
8135 // The encoding of a fixed enum type matches its fixed underlying type.
8136 const auto *BT = Enum->getIntegerType()->castAs<BuiltinType>();
8137 return getObjCEncodingForPrimitiveType(C, BT);
8138}
8139
8140static void EncodeBitField(const ASTContext *Ctx, std::string& S,
8141 QualType T, const FieldDecl *FD) {
8142 assert(FD->isBitField() && "not a bitfield - getObjCEncodingForTypeImpl");
8143 S += 'b';
8144 // The NeXT runtime encodes bit fields as b followed by the number of bits.
8145 // The GNU runtime requires more information; bitfields are encoded as b,
8146 // then the offset (in bits) of the first element, then the type of the
8147 // bitfield, then the size in bits. For example, in this structure:
8148 //
8149 // struct
8150 // {
8151 // int integer;
8152 // int flags:2;
8153 // };
8154 // On a 32-bit system, the encoding for flags would be b2 for the NeXT
8155 // runtime, but b32i2 for the GNU runtime. The reason for this extra
8156 // information is not especially sensible, but we're stuck with it for
8157 // compatibility with GCC, although providing it breaks anything that
8158 // actually uses runtime introspection and wants to work on both runtimes...
8159 if (Ctx->getLangOpts().ObjCRuntime.isGNUFamily()) {
8160 uint64_t Offset;
8161
8162 if (const auto *IVD = dyn_cast<ObjCIvarDecl>(Val: FD)) {
8163 Offset = Ctx->lookupFieldBitOffset(OID: IVD->getContainingInterface(), ID: nullptr,
8164 Ivar: IVD);
8165 } else {
8166 const RecordDecl *RD = FD->getParent();
8167 const ASTRecordLayout &RL = Ctx->getASTRecordLayout(D: RD);
8168 Offset = RL.getFieldOffset(FieldNo: FD->getFieldIndex());
8169 }
8170
8171 S += llvm::utostr(X: Offset);
8172
8173 if (const auto *ET = T->getAs<EnumType>())
8174 S += ObjCEncodingForEnumType(C: Ctx, ET);
8175 else {
8176 const auto *BT = T->castAs<BuiltinType>();
8177 S += getObjCEncodingForPrimitiveType(C: Ctx, BT);
8178 }
8179 }
8180 S += llvm::utostr(X: FD->getBitWidthValue(Ctx: *Ctx));
8181}
8182
8183// Helper function for determining whether the encoded type string would include
8184// a template specialization type.
8185static bool hasTemplateSpecializationInEncodedString(const Type *T,
8186 bool VisitBasesAndFields) {
8187 T = T->getBaseElementTypeUnsafe();
8188
8189 if (auto *PT = T->getAs<PointerType>())
8190 return hasTemplateSpecializationInEncodedString(
8191 T: PT->getPointeeType().getTypePtr(), VisitBasesAndFields: false);
8192
8193 auto *CXXRD = T->getAsCXXRecordDecl();
8194
8195 if (!CXXRD)
8196 return false;
8197
8198 if (isa<ClassTemplateSpecializationDecl>(Val: CXXRD))
8199 return true;
8200
8201 if (!CXXRD->hasDefinition() || !VisitBasesAndFields)
8202 return false;
8203
8204 for (const auto &B : CXXRD->bases())
8205 if (hasTemplateSpecializationInEncodedString(T: B.getType().getTypePtr(),
8206 VisitBasesAndFields: true))
8207 return true;
8208
8209 for (auto *FD : CXXRD->fields())
8210 if (hasTemplateSpecializationInEncodedString(FD->getType().getTypePtr(),
8211 true))
8212 return true;
8213
8214 return false;
8215}
8216
8217// FIXME: Use SmallString for accumulating string.
8218void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S,
8219 const ObjCEncOptions Options,
8220 const FieldDecl *FD,
8221 QualType *NotEncodedT) const {
8222 CanQualType CT = getCanonicalType(T);
8223 switch (CT->getTypeClass()) {
8224 case Type::Builtin:
8225 case Type::Enum:
8226 if (FD && FD->isBitField())
8227 return EncodeBitField(Ctx: this, S, T, FD);
8228 if (const auto *BT = dyn_cast<BuiltinType>(Val&: CT))
8229 S += getObjCEncodingForPrimitiveType(C: this, BT);
8230 else
8231 S += ObjCEncodingForEnumType(C: this, ET: cast<EnumType>(Val&: CT));
8232 return;
8233
8234 case Type::Complex:
8235 S += 'j';
8236 getObjCEncodingForTypeImpl(T: T->castAs<ComplexType>()->getElementType(), S,
8237 Options: ObjCEncOptions(),
8238 /*Field=*/FD: nullptr);
8239 return;
8240
8241 case Type::Atomic:
8242 S += 'A';
8243 getObjCEncodingForTypeImpl(T: T->castAs<AtomicType>()->getValueType(), S,
8244 Options: ObjCEncOptions(),
8245 /*Field=*/FD: nullptr);
8246 return;
8247
8248 // encoding for pointer or reference types.
8249 case Type::Pointer:
8250 case Type::LValueReference:
8251 case Type::RValueReference: {
8252 QualType PointeeTy;
8253 if (isa<PointerType>(Val: CT)) {
8254 const auto *PT = T->castAs<PointerType>();
8255 if (PT->isObjCSelType()) {
8256 S += ':';
8257 return;
8258 }
8259 PointeeTy = PT->getPointeeType();
8260 } else {
8261 PointeeTy = T->castAs<ReferenceType>()->getPointeeType();
8262 }
8263
8264 bool isReadOnly = false;
8265 // For historical/compatibility reasons, the read-only qualifier of the
8266 // pointee gets emitted _before_ the '^'. The read-only qualifier of
8267 // the pointer itself gets ignored, _unless_ we are looking at a typedef!
8268 // Also, do not emit the 'r' for anything but the outermost type!
8269 if (T->getAs<TypedefType>()) {
8270 if (Options.IsOutermostType() && T.isConstQualified()) {
8271 isReadOnly = true;
8272 S += 'r';
8273 }
8274 } else if (Options.IsOutermostType()) {
8275 QualType P = PointeeTy;
8276 while (auto PT = P->getAs<PointerType>())
8277 P = PT->getPointeeType();
8278 if (P.isConstQualified()) {
8279 isReadOnly = true;
8280 S += 'r';
8281 }
8282 }
8283 if (isReadOnly) {
8284 // Another legacy compatibility encoding. Some ObjC qualifier and type
8285 // combinations need to be rearranged.
8286 // Rewrite "in const" from "nr" to "rn"
8287 if (StringRef(S).ends_with(Suffix: "nr"))
8288 S.replace(i1: S.end()-2, i2: S.end(), s: "rn");
8289 }
8290
8291 if (PointeeTy->isCharType()) {
8292 // char pointer types should be encoded as '*' unless it is a
8293 // type that has been typedef'd to 'BOOL'.
8294 if (!isTypeTypedefedAsBOOL(T: PointeeTy)) {
8295 S += '*';
8296 return;
8297 }
8298 } else if (const auto *RTy = PointeeTy->getAs<RecordType>()) {
8299 // GCC binary compat: Need to convert "struct objc_class *" to "#".
8300 if (RTy->getDecl()->getIdentifier() == &Idents.get(Name: "objc_class")) {
8301 S += '#';
8302 return;
8303 }
8304 // GCC binary compat: Need to convert "struct objc_object *" to "@".
8305 if (RTy->getDecl()->getIdentifier() == &Idents.get(Name: "objc_object")) {
8306 S += '@';
8307 return;
8308 }
8309 // If the encoded string for the class includes template names, just emit
8310 // "^v" for pointers to the class.
8311 if (getLangOpts().CPlusPlus &&
8312 (!getLangOpts().EncodeCXXClassTemplateSpec &&
8313 hasTemplateSpecializationInEncodedString(
8314 RTy, Options.ExpandPointedToStructures()))) {
8315 S += "^v";
8316 return;
8317 }
8318 // fall through...
8319 }
8320 S += '^';
8321 getLegacyIntegralTypeEncoding(PointeeTy);
8322
8323 ObjCEncOptions NewOptions;
8324 if (Options.ExpandPointedToStructures())
8325 NewOptions.setExpandStructures();
8326 getObjCEncodingForTypeImpl(T: PointeeTy, S, Options: NewOptions,
8327 /*Field=*/FD: nullptr, NotEncodedT);
8328 return;
8329 }
8330
8331 case Type::ConstantArray:
8332 case Type::IncompleteArray:
8333 case Type::VariableArray: {
8334 const auto *AT = cast<ArrayType>(Val&: CT);
8335
8336 if (isa<IncompleteArrayType>(Val: AT) && !Options.IsStructField()) {
8337 // Incomplete arrays are encoded as a pointer to the array element.
8338 S += '^';
8339
8340 getObjCEncodingForTypeImpl(
8341 T: AT->getElementType(), S,
8342 Options: Options.keepingOnly(Mask: ObjCEncOptions().setExpandStructures()), FD);
8343 } else {
8344 S += '[';
8345
8346 if (const auto *CAT = dyn_cast<ConstantArrayType>(Val: AT))
8347 S += llvm::utostr(X: CAT->getSize().getZExtValue());
8348 else {
8349 //Variable length arrays are encoded as a regular array with 0 elements.
8350 assert((isa<VariableArrayType>(AT) || isa<IncompleteArrayType>(AT)) &&
8351 "Unknown array type!");
8352 S += '0';
8353 }
8354
8355 getObjCEncodingForTypeImpl(
8356 T: AT->getElementType(), S,
8357 Options: Options.keepingOnly(Mask: ObjCEncOptions().setExpandStructures()), FD,
8358 NotEncodedT);
8359 S += ']';
8360 }
8361 return;
8362 }
8363
8364 case Type::FunctionNoProto:
8365 case Type::FunctionProto:
8366 S += '?';
8367 return;
8368
8369 case Type::Record: {
8370 RecordDecl *RDecl = cast<RecordType>(Val&: CT)->getDecl();
8371 S += RDecl->isUnion() ? '(' : '{';
8372 // Anonymous structures print as '?'
8373 if (const IdentifierInfo *II = RDecl->getIdentifier()) {
8374 S += II->getName();
8375 if (const auto *Spec = dyn_cast<ClassTemplateSpecializationDecl>(Val: RDecl)) {
8376 const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
8377 llvm::raw_string_ostream OS(S);
8378 printTemplateArgumentList(OS, Args: TemplateArgs.asArray(),
8379 Policy: getPrintingPolicy());
8380 }
8381 } else {
8382 S += '?';
8383 }
8384 if (Options.ExpandStructures()) {
8385 S += '=';
8386 if (!RDecl->isUnion()) {
8387 getObjCEncodingForStructureImpl(RD: RDecl, S, Field: FD, includeVBases: true, NotEncodedT);
8388 } else {
8389 for (const auto *Field : RDecl->fields()) {
8390 if (FD) {
8391 S += '"';
8392 S += Field->getNameAsString();
8393 S += '"';
8394 }
8395
8396 // Special case bit-fields.
8397 if (Field->isBitField()) {
8398 getObjCEncodingForTypeImpl(T: Field->getType(), S,
8399 Options: ObjCEncOptions().setExpandStructures(),
8400 FD: Field);
8401 } else {
8402 QualType qt = Field->getType();
8403 getLegacyIntegralTypeEncoding(PointeeTy&: qt);
8404 getObjCEncodingForTypeImpl(
8405 T: qt, S,
8406 Options: ObjCEncOptions().setExpandStructures().setIsStructField(), FD,
8407 NotEncodedT);
8408 }
8409 }
8410 }
8411 }
8412 S += RDecl->isUnion() ? ')' : '}';
8413 return;
8414 }
8415
8416 case Type::BlockPointer: {
8417 const auto *BT = T->castAs<BlockPointerType>();
8418 S += "@?"; // Unlike a pointer-to-function, which is "^?".
8419 if (Options.EncodeBlockParameters()) {
8420 const auto *FT = BT->getPointeeType()->castAs<FunctionType>();
8421
8422 S += '<';
8423 // Block return type
8424 getObjCEncodingForTypeImpl(T: FT->getReturnType(), S,
8425 Options: Options.forComponentType(), FD, NotEncodedT);
8426 // Block self
8427 S += "@?";
8428 // Block parameters
8429 if (const auto *FPT = dyn_cast<FunctionProtoType>(Val: FT)) {
8430 for (const auto &I : FPT->param_types())
8431 getObjCEncodingForTypeImpl(T: I, S, Options: Options.forComponentType(), FD,
8432 NotEncodedT);
8433 }
8434 S += '>';
8435 }
8436 return;
8437 }
8438
8439 case Type::ObjCObject: {
8440 // hack to match legacy encoding of *id and *Class
8441 QualType Ty = getObjCObjectPointerType(ObjectT: CT);
8442 if (Ty->isObjCIdType()) {
8443 S += "{objc_object=}";
8444 return;
8445 }
8446 else if (Ty->isObjCClassType()) {
8447 S += "{objc_class=}";
8448 return;
8449 }
8450 // TODO: Double check to make sure this intentionally falls through.
8451 [[fallthrough]];
8452 }
8453
8454 case Type::ObjCInterface: {
8455 // Ignore protocol qualifiers when mangling at this level.
8456 // @encode(class_name)
8457 ObjCInterfaceDecl *OI = T->castAs<ObjCObjectType>()->getInterface();
8458 S += '{';
8459 S += OI->getObjCRuntimeNameAsString();
8460 if (Options.ExpandStructures()) {
8461 S += '=';
8462 SmallVector<const ObjCIvarDecl*, 32> Ivars;
8463 DeepCollectObjCIvars(OI, leafClass: true, Ivars);
8464 for (unsigned i = 0, e = Ivars.size(); i != e; ++i) {
8465 const FieldDecl *Field = Ivars[i];
8466 if (Field->isBitField())
8467 getObjCEncodingForTypeImpl(T: Field->getType(), S,
8468 Options: ObjCEncOptions().setExpandStructures(),
8469 FD: Field);
8470 else
8471 getObjCEncodingForTypeImpl(T: Field->getType(), S,
8472 Options: ObjCEncOptions().setExpandStructures(), FD,
8473 NotEncodedT);
8474 }
8475 }
8476 S += '}';
8477 return;
8478 }
8479
8480 case Type::ObjCObjectPointer: {
8481 const auto *OPT = T->castAs<ObjCObjectPointerType>();
8482 if (OPT->isObjCIdType()) {
8483 S += '@';
8484 return;
8485 }
8486
8487 if (OPT->isObjCClassType() || OPT->isObjCQualifiedClassType()) {
8488 // FIXME: Consider if we need to output qualifiers for 'Class<p>'.
8489 // Since this is a binary compatibility issue, need to consult with
8490 // runtime folks. Fortunately, this is a *very* obscure construct.
8491 S += '#';
8492 return;
8493 }
8494
8495 if (OPT->isObjCQualifiedIdType()) {
8496 getObjCEncodingForTypeImpl(
8497 T: getObjCIdType(), S,
8498 Options: Options.keepingOnly(Mask: ObjCEncOptions()
8499 .setExpandPointedToStructures()
8500 .setExpandStructures()),
8501 FD);
8502 if (FD || Options.EncodingProperty() || Options.EncodeClassNames()) {
8503 // Note that we do extended encoding of protocol qualifier list
8504 // Only when doing ivar or property encoding.
8505 S += '"';
8506 for (const auto *I : OPT->quals()) {
8507 S += '<';
8508 S += I->getObjCRuntimeNameAsString();
8509 S += '>';
8510 }
8511 S += '"';
8512 }
8513 return;
8514 }
8515
8516 S += '@';
8517 if (OPT->getInterfaceDecl() &&
8518 (FD || Options.EncodingProperty() || Options.EncodeClassNames())) {
8519 S += '"';
8520 S += OPT->getInterfaceDecl()->getObjCRuntimeNameAsString();
8521 for (const auto *I : OPT->quals()) {
8522 S += '<';
8523 S += I->getObjCRuntimeNameAsString();
8524 S += '>';
8525 }
8526 S += '"';
8527 }
8528 return;
8529 }
8530
8531 // gcc just blithely ignores member pointers.
8532 // FIXME: we should do better than that. 'M' is available.
8533 case Type::MemberPointer:
8534 // This matches gcc's encoding, even though technically it is insufficient.
8535 //FIXME. We should do a better job than gcc.
8536 case Type::Vector:
8537 case Type::ExtVector:
8538 // Until we have a coherent encoding of these three types, issue warning.
8539 if (NotEncodedT)
8540 *NotEncodedT = T;
8541 return;
8542
8543 case Type::ConstantMatrix:
8544 if (NotEncodedT)
8545 *NotEncodedT = T;
8546 return;
8547
8548 case Type::BitInt:
8549 if (NotEncodedT)
8550 *NotEncodedT = T;
8551 return;
8552
8553 // We could see an undeduced auto type here during error recovery.
8554 // Just ignore it.
8555 case Type::Auto:
8556 case Type::DeducedTemplateSpecialization:
8557 return;
8558
8559 case Type::Pipe:
8560#define ABSTRACT_TYPE(KIND, BASE)
8561#define TYPE(KIND, BASE)
8562#define DEPENDENT_TYPE(KIND, BASE) \
8563 case Type::KIND:
8564#define NON_CANONICAL_TYPE(KIND, BASE) \
8565 case Type::KIND:
8566#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(KIND, BASE) \
8567 case Type::KIND:
8568#include "clang/AST/TypeNodes.inc"
8569 llvm_unreachable("@encode for dependent type!");
8570 }
8571 llvm_unreachable("bad type kind!");
8572}
8573
8574void ASTContext::getObjCEncodingForStructureImpl(RecordDecl *RDecl,
8575 std::string &S,
8576 const FieldDecl *FD,
8577 bool includeVBases,
8578 QualType *NotEncodedT) const {
8579 assert(RDecl && "Expected non-null RecordDecl");
8580 assert(!RDecl->isUnion() && "Should not be called for unions");
8581 if (!RDecl->getDefinition() || RDecl->getDefinition()->isInvalidDecl())
8582 return;
8583
8584 const auto *CXXRec = dyn_cast<CXXRecordDecl>(Val: RDecl);
8585 std::multimap<uint64_t, NamedDecl *> FieldOrBaseOffsets;
8586 const ASTRecordLayout &layout = getASTRecordLayout(D: RDecl);
8587
8588 if (CXXRec) {
8589 for (const auto &BI : CXXRec->bases()) {
8590 if (!BI.isVirtual()) {
8591 CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl();
8592 if (base->isEmpty())
8593 continue;
8594 uint64_t offs = toBits(CharSize: layout.getBaseClassOffset(Base: base));
8595 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(x: offs),
8596 std::make_pair(x&: offs, y&: base));
8597 }
8598 }
8599 }
8600
8601 for (FieldDecl *Field : RDecl->fields()) {
8602 if (!Field->isZeroLengthBitField(Ctx: *this) && Field->isZeroSize(Ctx: *this))
8603 continue;
8604 uint64_t offs = layout.getFieldOffset(FieldNo: Field->getFieldIndex());
8605 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(x: offs),
8606 std::make_pair(x&: offs, y&: Field));
8607 }
8608
8609 if (CXXRec && includeVBases) {
8610 for (const auto &BI : CXXRec->vbases()) {
8611 CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl();
8612 if (base->isEmpty())
8613 continue;
8614 uint64_t offs = toBits(CharSize: layout.getVBaseClassOffset(VBase: base));
8615 if (offs >= uint64_t(toBits(CharSize: layout.getNonVirtualSize())) &&
8616 FieldOrBaseOffsets.find(x: offs) == FieldOrBaseOffsets.end())
8617 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.end(),
8618 std::make_pair(x&: offs, y&: base));
8619 }
8620 }
8621
8622 CharUnits size;
8623 if (CXXRec) {
8624 size = includeVBases ? layout.getSize() : layout.getNonVirtualSize();
8625 } else {
8626 size = layout.getSize();
8627 }
8628
8629#ifndef NDEBUG
8630 uint64_t CurOffs = 0;
8631#endif
8632 std::multimap<uint64_t, NamedDecl *>::iterator
8633 CurLayObj = FieldOrBaseOffsets.begin();
8634
8635 if (CXXRec && CXXRec->isDynamicClass() &&
8636 (CurLayObj == FieldOrBaseOffsets.end() || CurLayObj->first != 0)) {
8637 if (FD) {
8638 S += "\"_vptr$";
8639 std::string recname = CXXRec->getNameAsString();
8640 if (recname.empty()) recname = "?";
8641 S += recname;
8642 S += '"';
8643 }
8644 S += "^^?";
8645#ifndef NDEBUG
8646 CurOffs += getTypeSize(VoidPtrTy);
8647#endif
8648 }
8649
8650 if (!RDecl->hasFlexibleArrayMember()) {
8651 // Mark the end of the structure.
8652 uint64_t offs = toBits(CharSize: size);
8653 FieldOrBaseOffsets.insert(position: FieldOrBaseOffsets.upper_bound(x: offs),
8654 x: std::make_pair(x&: offs, y: nullptr));
8655 }
8656
8657 for (; CurLayObj != FieldOrBaseOffsets.end(); ++CurLayObj) {
8658#ifndef NDEBUG
8659 assert(CurOffs <= CurLayObj->first);
8660 if (CurOffs < CurLayObj->first) {
8661 uint64_t padding = CurLayObj->first - CurOffs;
8662 // FIXME: There doesn't seem to be a way to indicate in the encoding that
8663 // packing/alignment of members is different that normal, in which case
8664 // the encoding will be out-of-sync with the real layout.
8665 // If the runtime switches to just consider the size of types without
8666 // taking into account alignment, we could make padding explicit in the
8667 // encoding (e.g. using arrays of chars). The encoding strings would be
8668 // longer then though.
8669 CurOffs += padding;
8670 }
8671#endif
8672
8673 NamedDecl *dcl = CurLayObj->second;
8674 if (!dcl)
8675 break; // reached end of structure.
8676
8677 if (auto *base = dyn_cast<CXXRecordDecl>(Val: dcl)) {
8678 // We expand the bases without their virtual bases since those are going
8679 // in the initial structure. Note that this differs from gcc which
8680 // expands virtual bases each time one is encountered in the hierarchy,
8681 // making the encoding type bigger than it really is.
8682 getObjCEncodingForStructureImpl(base, S, FD, /*includeVBases*/false,
8683 NotEncodedT);
8684 assert(!base->isEmpty());
8685#ifndef NDEBUG
8686 CurOffs += toBits(CharSize: getASTRecordLayout(base).getNonVirtualSize());
8687#endif
8688 } else {
8689 const auto *field = cast<FieldDecl>(Val: dcl);
8690 if (FD) {
8691 S += '"';
8692 S += field->getNameAsString();
8693 S += '"';
8694 }
8695
8696 if (field->isBitField()) {
8697 EncodeBitField(this, S, field->getType(), field);
8698#ifndef NDEBUG
8699 CurOffs += field->getBitWidthValue(Ctx: *this);
8700#endif
8701 } else {
8702 QualType qt = field->getType();
8703 getLegacyIntegralTypeEncoding(PointeeTy&: qt);
8704 getObjCEncodingForTypeImpl(
8705 T: qt, S, Options: ObjCEncOptions().setExpandStructures().setIsStructField(),
8706 FD, NotEncodedT);
8707#ifndef NDEBUG
8708 CurOffs += getTypeSize(field->getType());
8709#endif
8710 }
8711 }
8712 }
8713}
8714
8715void ASTContext::getObjCEncodingForTypeQualifier(Decl::ObjCDeclQualifier QT,
8716 std::string& S) const {
8717 if (QT & Decl::OBJC_TQ_In)
8718 S += 'n';
8719 if (QT & Decl::OBJC_TQ_Inout)
8720 S += 'N';
8721 if (QT & Decl::OBJC_TQ_Out)
8722 S += 'o';
8723 if (QT & Decl::OBJC_TQ_Bycopy)
8724 S += 'O';
8725 if (QT & Decl::OBJC_TQ_Byref)
8726 S += 'R';
8727 if (QT & Decl::OBJC_TQ_Oneway)
8728 S += 'V';
8729}
8730
8731TypedefDecl *ASTContext::getObjCIdDecl() const {
8732 if (!ObjCIdDecl) {
8733 QualType T = getObjCObjectType(ObjCBuiltinIdTy, {}, {});
8734 T = getObjCObjectPointerType(ObjectT: T);
8735 ObjCIdDecl = buildImplicitTypedef(T, Name: "id");
8736 }
8737 return ObjCIdDecl;
8738}
8739
8740TypedefDecl *ASTContext::getObjCSelDecl() const {
8741 if (!ObjCSelDecl) {
8742 QualType T = getPointerType(ObjCBuiltinSelTy);
8743 ObjCSelDecl = buildImplicitTypedef(T, Name: "SEL");
8744 }
8745 return ObjCSelDecl;
8746}
8747
8748TypedefDecl *ASTContext::getObjCClassDecl() const {
8749 if (!ObjCClassDecl) {
8750 QualType T = getObjCObjectType(ObjCBuiltinClassTy, {}, {});
8751 T = getObjCObjectPointerType(ObjectT: T);
8752 ObjCClassDecl = buildImplicitTypedef(T, Name: "Class");
8753 }
8754 return ObjCClassDecl;
8755}
8756
8757ObjCInterfaceDecl *ASTContext::getObjCProtocolDecl() const {
8758 if (!ObjCProtocolClassDecl) {
8759 ObjCProtocolClassDecl
8760 = ObjCInterfaceDecl::Create(*this, getTranslationUnitDecl(),
8761 SourceLocation(),
8762 &Idents.get(Name: "Protocol"),
8763 /*typeParamList=*/nullptr,
8764 /*PrevDecl=*/nullptr,
8765 SourceLocation(), true);
8766 }
8767
8768 return ObjCProtocolClassDecl;
8769}
8770
8771//===----------------------------------------------------------------------===//
8772// __builtin_va_list Construction Functions
8773//===----------------------------------------------------------------------===//
8774
8775static TypedefDecl *CreateCharPtrNamedVaListDecl(const ASTContext *Context,
8776 StringRef Name) {
8777 // typedef char* __builtin[_ms]_va_list;
8778 QualType T = Context->getPointerType(Context->CharTy);
8779 return Context->buildImplicitTypedef(T, Name);
8780}
8781
8782static TypedefDecl *CreateMSVaListDecl(const ASTContext *Context) {
8783 return CreateCharPtrNamedVaListDecl(Context, Name: "__builtin_ms_va_list");
8784}
8785
8786static TypedefDecl *CreateCharPtrBuiltinVaListDecl(const ASTContext *Context) {
8787 return CreateCharPtrNamedVaListDecl(Context, Name: "__builtin_va_list");
8788}
8789
8790static TypedefDecl *CreateVoidPtrBuiltinVaListDecl(const ASTContext *Context) {
8791 // typedef void* __builtin_va_list;
8792 QualType T = Context->getPointerType(Context->VoidTy);
8793 return Context->buildImplicitTypedef(T, Name: "__builtin_va_list");
8794}
8795
8796static TypedefDecl *
8797CreateAArch64ABIBuiltinVaListDecl(const ASTContext *Context) {
8798 // struct __va_list
8799 RecordDecl *VaListTagDecl = Context->buildImplicitRecord(Name: "__va_list");
8800 if (Context->getLangOpts().CPlusPlus) {
8801 // namespace std { struct __va_list {
8802 auto *NS = NamespaceDecl::Create(
8803 const_cast<ASTContext &>(*Context), Context->getTranslationUnitDecl(),
8804 /*Inline=*/false, SourceLocation(), SourceLocation(),
8805 &Context->Idents.get(Name: "std"),
8806 /*PrevDecl=*/nullptr, /*Nested=*/false);
8807 NS->setImplicit();
8808 VaListTagDecl->setDeclContext(NS);
8809 }
8810
8811 VaListTagDecl->startDefinition();
8812
8813 const size_t NumFields = 5;
8814 QualType FieldTypes[NumFields];
8815 const char *FieldNames[NumFields];
8816
8817 // void *__stack;
8818 FieldTypes[0] = Context->getPointerType(Context->VoidTy);
8819 FieldNames[0] = "__stack";
8820
8821 // void *__gr_top;
8822 FieldTypes[1] = Context->getPointerType(Context->VoidTy);
8823 FieldNames[1] = "__gr_top";
8824
8825 // void *__vr_top;
8826 FieldTypes[2] = Context->getPointerType(Context->VoidTy);
8827 FieldNames[2] = "__vr_top";
8828
8829 // int __gr_offs;
8830 FieldTypes[3] = Context->IntTy;
8831 FieldNames[3] = "__gr_offs";
8832
8833 // int __vr_offs;
8834 FieldTypes[4] = Context->IntTy;
8835 FieldNames[4] = "__vr_offs";
8836
8837 // Create fields
8838 for (unsigned i = 0; i < NumFields; ++i) {
8839 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context),
8840 VaListTagDecl,
8841 SourceLocation(),
8842 SourceLocation(),
8843 &Context->Idents.get(Name: FieldNames[i]),
8844 FieldTypes[i], /*TInfo=*/nullptr,
8845 /*BitWidth=*/nullptr,
8846 /*Mutable=*/false,
8847 ICIS_NoInit);
8848 Field->setAccess(AS_public);
8849 VaListTagDecl->addDecl(Field);
8850 }
8851 VaListTagDecl->completeDefinition();
8852 Context->VaListTagDecl = VaListTagDecl;
8853 QualType VaListTagType = Context->getRecordType(Decl: VaListTagDecl);
8854
8855 // } __builtin_va_list;
8856 return Context->buildImplicitTypedef(T: VaListTagType, Name: "__builtin_va_list");
8857}
8858
8859static TypedefDecl *CreatePowerABIBuiltinVaListDecl(const ASTContext *Context) {
8860 // typedef struct __va_list_tag {
8861 RecordDecl *VaListTagDecl;
8862
8863 VaListTagDecl = Context->buildImplicitRecord(Name: "__va_list_tag");
8864 VaListTagDecl->startDefinition();
8865
8866 const size_t NumFields = 5;
8867 QualType FieldTypes[NumFields];
8868 const char *FieldNames[NumFields];
8869
8870 // unsigned char gpr;
8871 FieldTypes[0] = Context->UnsignedCharTy;
8872 FieldNames[0] = "gpr";
8873
8874 // unsigned char fpr;
8875 FieldTypes[1] = Context->UnsignedCharTy;
8876 FieldNames[1] = "fpr";
8877
8878 // unsigned short reserved;
8879 FieldTypes[2] = Context->UnsignedShortTy;
8880 FieldNames[2] = "reserved";
8881
8882 // void* overflow_arg_area;
8883 FieldTypes[3] = Context->getPointerType(Context->VoidTy);
8884 FieldNames[3] = "overflow_arg_area";
8885
8886 // void* reg_save_area;
8887 FieldTypes[4] = Context->getPointerType(Context->VoidTy);
8888 FieldNames[4] = "reg_save_area";
8889
8890 // Create fields
8891 for (unsigned i = 0; i < NumFields; ++i) {
8892 FieldDecl *Field = FieldDecl::Create(*Context, VaListTagDecl,
8893 SourceLocation(),
8894 SourceLocation(),
8895 &Context->Idents.get(Name: FieldNames[i]),
8896 FieldTypes[i], /*TInfo=*/nullptr,
8897 /*BitWidth=*/nullptr,
8898 /*Mutable=*/false,
8899 ICIS_NoInit);
8900 Field->setAccess(AS_public);
8901 VaListTagDecl->addDecl(Field);
8902 }
8903 VaListTagDecl->completeDefinition();
8904 Context->VaListTagDecl = VaListTagDecl;
8905 QualType VaListTagType = Context->getRecordType(Decl: VaListTagDecl);
8906
8907 // } __va_list_tag;
8908 TypedefDecl *VaListTagTypedefDecl =
8909 Context->buildImplicitTypedef(T: VaListTagType, Name: "__va_list_tag");
8910
8911 QualType VaListTagTypedefType =
8912 Context->getTypedefType(VaListTagTypedefDecl);
8913
8914 // typedef __va_list_tag __builtin_va_list[1];
8915 llvm::APInt Size(Context->getTypeSize(T: Context->getSizeType()), 1);
8916 QualType VaListTagArrayType = Context->getConstantArrayType(
8917 EltTy: VaListTagTypedefType, ArySizeIn: Size, SizeExpr: nullptr, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
8918 return Context->buildImplicitTypedef(T: VaListTagArrayType, Name: "__builtin_va_list");
8919}
8920
8921static TypedefDecl *
8922CreateX86_64ABIBuiltinVaListDecl(const ASTContext *Context) {
8923 // struct __va_list_tag {
8924 RecordDecl *VaListTagDecl;
8925 VaListTagDecl = Context->buildImplicitRecord(Name: "__va_list_tag");
8926 VaListTagDecl->startDefinition();
8927
8928 const size_t NumFields = 4;
8929 QualType FieldTypes[NumFields];
8930 const char *FieldNames[NumFields];
8931
8932 // unsigned gp_offset;
8933 FieldTypes[0] = Context->UnsignedIntTy;
8934 FieldNames[0] = "gp_offset";
8935
8936 // unsigned fp_offset;
8937 FieldTypes[1] = Context->UnsignedIntTy;
8938 FieldNames[1] = "fp_offset";
8939
8940 // void* overflow_arg_area;
8941 FieldTypes[2] = Context->getPointerType(Context->VoidTy);
8942 FieldNames[2] = "overflow_arg_area";
8943
8944 // void* reg_save_area;
8945 FieldTypes[3] = Context->getPointerType(Context->VoidTy);
8946 FieldNames[3] = "reg_save_area";
8947
8948 // Create fields
8949 for (unsigned i = 0; i < NumFields; ++i) {
8950 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context),
8951 VaListTagDecl,
8952 SourceLocation(),
8953 SourceLocation(),
8954 &Context->Idents.get(Name: FieldNames[i]),
8955 FieldTypes[i], /*TInfo=*/nullptr,
8956 /*BitWidth=*/nullptr,
8957 /*Mutable=*/false,
8958 ICIS_NoInit);
8959 Field->setAccess(AS_public);
8960 VaListTagDecl->addDecl(Field);
8961 }
8962 VaListTagDecl->completeDefinition();
8963 Context->VaListTagDecl = VaListTagDecl;
8964 QualType VaListTagType = Context->getRecordType(Decl: VaListTagDecl);
8965
8966 // };
8967
8968 // typedef struct __va_list_tag __builtin_va_list[1];
8969 llvm::APInt Size(Context->getTypeSize(T: Context->getSizeType()), 1);
8970 QualType VaListTagArrayType = Context->getConstantArrayType(
8971 EltTy: VaListTagType, ArySizeIn: Size, SizeExpr: nullptr, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
8972 return Context->buildImplicitTypedef(T: VaListTagArrayType, Name: "__builtin_va_list");
8973}
8974
8975static TypedefDecl *CreatePNaClABIBuiltinVaListDecl(const ASTContext *Context) {
8976 // typedef int __builtin_va_list[4];
8977 llvm::APInt Size(Context->getTypeSize(T: Context->getSizeType()), 4);
8978 QualType IntArrayType = Context->getConstantArrayType(
8979 EltTy: Context->IntTy, ArySizeIn: Size, SizeExpr: nullptr, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
8980 return Context->buildImplicitTypedef(T: IntArrayType, Name: "__builtin_va_list");
8981}
8982
8983static TypedefDecl *
8984CreateAAPCSABIBuiltinVaListDecl(const ASTContext *Context) {
8985 // struct __va_list
8986 RecordDecl *VaListDecl = Context->buildImplicitRecord(Name: "__va_list");
8987 if (Context->getLangOpts().CPlusPlus) {
8988 // namespace std { struct __va_list {
8989 NamespaceDecl *NS;
8990 NS = NamespaceDecl::Create(const_cast<ASTContext &>(*Context),
8991 Context->getTranslationUnitDecl(),
8992 /*Inline=*/false, SourceLocation(),
8993 SourceLocation(), &Context->Idents.get(Name: "std"),
8994 /*PrevDecl=*/nullptr, /*Nested=*/false);
8995 NS->setImplicit();
8996 VaListDecl->setDeclContext(NS);
8997 }
8998
8999 VaListDecl->startDefinition();
9000
9001 // void * __ap;
9002 FieldDecl *Field = FieldDecl::Create(C: const_cast<ASTContext &>(*Context),
9003 DC: VaListDecl,
9004 StartLoc: SourceLocation(),
9005 IdLoc: SourceLocation(),
9006 Id: &Context->Idents.get(Name: "__ap"),
9007 T: Context->getPointerType(Context->VoidTy),
9008 /*TInfo=*/nullptr,
9009 /*BitWidth=*/BW: nullptr,
9010 /*Mutable=*/false,
9011 InitStyle: ICIS_NoInit);
9012 Field->setAccess(AS_public);
9013 VaListDecl->addDecl(Field);
9014
9015 // };
9016 VaListDecl->completeDefinition();
9017 Context->VaListTagDecl = VaListDecl;
9018
9019 // typedef struct __va_list __builtin_va_list;
9020 QualType T = Context->getRecordType(Decl: VaListDecl);
9021 return Context->buildImplicitTypedef(T, Name: "__builtin_va_list");
9022}
9023
9024static TypedefDecl *
9025CreateSystemZBuiltinVaListDecl(const ASTContext *Context) {
9026 // struct __va_list_tag {
9027 RecordDecl *VaListTagDecl;
9028 VaListTagDecl = Context->buildImplicitRecord(Name: "__va_list_tag");
9029 VaListTagDecl->startDefinition();
9030
9031 const size_t NumFields = 4;
9032 QualType FieldTypes[NumFields];
9033 const char *FieldNames[NumFields];
9034
9035 // long __gpr;
9036 FieldTypes[0] = Context->LongTy;
9037 FieldNames[0] = "__gpr";
9038
9039 // long __fpr;
9040 FieldTypes[1] = Context->LongTy;
9041 FieldNames[1] = "__fpr";
9042
9043 // void *__overflow_arg_area;
9044 FieldTypes[2] = Context->getPointerType(Context->VoidTy);
9045 FieldNames[2] = "__overflow_arg_area";
9046
9047 // void *__reg_save_area;
9048 FieldTypes[3] = Context->getPointerType(Context->VoidTy);
9049 FieldNames[3] = "__reg_save_area";
9050
9051 // Create fields
9052 for (unsigned i = 0; i < NumFields; ++i) {
9053 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context),
9054 VaListTagDecl,
9055 SourceLocation(),
9056 SourceLocation(),
9057 &Context->Idents.get(Name: FieldNames[i]),
9058 FieldTypes[i], /*TInfo=*/nullptr,
9059 /*BitWidth=*/nullptr,
9060 /*Mutable=*/false,
9061 ICIS_NoInit);
9062 Field->setAccess(AS_public);
9063 VaListTagDecl->addDecl(Field);
9064 }
9065 VaListTagDecl->completeDefinition();
9066 Context->VaListTagDecl = VaListTagDecl;
9067 QualType VaListTagType = Context->getRecordType(Decl: VaListTagDecl);
9068
9069 // };
9070
9071 // typedef __va_list_tag __builtin_va_list[1];
9072 llvm::APInt Size(Context->getTypeSize(T: Context->getSizeType()), 1);
9073 QualType VaListTagArrayType = Context->getConstantArrayType(
9074 EltTy: VaListTagType, ArySizeIn: Size, SizeExpr: nullptr, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
9075
9076 return Context->buildImplicitTypedef(T: VaListTagArrayType, Name: "__builtin_va_list");
9077}
9078
9079static TypedefDecl *CreateHexagonBuiltinVaListDecl(const ASTContext *Context) {
9080 // typedef struct __va_list_tag {
9081 RecordDecl *VaListTagDecl;
9082 VaListTagDecl = Context->buildImplicitRecord(Name: "__va_list_tag");
9083 VaListTagDecl->startDefinition();
9084
9085 const size_t NumFields = 3;
9086 QualType FieldTypes[NumFields];
9087 const char *FieldNames[NumFields];
9088
9089 // void *CurrentSavedRegisterArea;
9090 FieldTypes[0] = Context->getPointerType(Context->VoidTy);
9091 FieldNames[0] = "__current_saved_reg_area_pointer";
9092
9093 // void *SavedRegAreaEnd;
9094 FieldTypes[1] = Context->getPointerType(Context->VoidTy);
9095 FieldNames[1] = "__saved_reg_area_end_pointer";
9096
9097 // void *OverflowArea;
9098 FieldTypes[2] = Context->getPointerType(Context->VoidTy);
9099 FieldNames[2] = "__overflow_area_pointer";
9100
9101 // Create fields
9102 for (unsigned i = 0; i < NumFields; ++i) {
9103 FieldDecl *Field = FieldDecl::Create(
9104 const_cast<ASTContext &>(*Context), VaListTagDecl, SourceLocation(),
9105 SourceLocation(), &Context->Idents.get(Name: FieldNames[i]), FieldTypes[i],
9106 /*TInfo=*/nullptr,
9107 /*BitWidth=*/nullptr,
9108 /*Mutable=*/false, ICIS_NoInit);
9109 Field->setAccess(AS_public);
9110 VaListTagDecl->addDecl(Field);
9111 }
9112 VaListTagDecl->completeDefinition();
9113 Context->VaListTagDecl = VaListTagDecl;
9114 QualType VaListTagType = Context->getRecordType(Decl: VaListTagDecl);
9115
9116 // } __va_list_tag;
9117 TypedefDecl *VaListTagTypedefDecl =
9118 Context->buildImplicitTypedef(T: VaListTagType, Name: "__va_list_tag");
9119
9120 QualType VaListTagTypedefType = Context->getTypedefType(VaListTagTypedefDecl);
9121
9122 // typedef __va_list_tag __builtin_va_list[1];
9123 llvm::APInt Size(Context->getTypeSize(T: Context->getSizeType()), 1);
9124 QualType VaListTagArrayType = Context->getConstantArrayType(
9125 EltTy: VaListTagTypedefType, ArySizeIn: Size, SizeExpr: nullptr, ASM: ArraySizeModifier::Normal, IndexTypeQuals: 0);
9126
9127 return Context->buildImplicitTypedef(T: VaListTagArrayType, Name: "__builtin_va_list");
9128}
9129
9130static TypedefDecl *CreateVaListDecl(const ASTContext *Context,
9131 TargetInfo::BuiltinVaListKind Kind) {
9132 switch (Kind) {
9133 case TargetInfo::CharPtrBuiltinVaList:
9134 return CreateCharPtrBuiltinVaListDecl(Context);
9135 case TargetInfo::VoidPtrBuiltinVaList:
9136 return CreateVoidPtrBuiltinVaListDecl(Context);
9137 case TargetInfo::AArch64ABIBuiltinVaList:
9138 return CreateAArch64ABIBuiltinVaListDecl(Context);
9139 case TargetInfo::PowerABIBuiltinVaList:
9140 return CreatePowerABIBuiltinVaListDecl(Context);
9141 case TargetInfo::X86_64ABIBuiltinVaList:
9142 return CreateX86_64ABIBuiltinVaListDecl(Context);
9143 case TargetInfo::PNaClABIBuiltinVaList:
9144 return CreatePNaClABIBuiltinVaListDecl(Context);
9145 case TargetInfo::AAPCSABIBuiltinVaList:
9146 return CreateAAPCSABIBuiltinVaListDecl(Context);
9147 case TargetInfo::SystemZBuiltinVaList:
9148 return CreateSystemZBuiltinVaListDecl(Context);
9149 case TargetInfo::HexagonBuiltinVaList:
9150 return CreateHexagonBuiltinVaListDecl(Context);
9151 }
9152
9153 llvm_unreachable("Unhandled __builtin_va_list type kind");
9154}
9155
9156TypedefDecl *ASTContext::getBuiltinVaListDecl() const {
9157 if (!BuiltinVaListDecl) {
9158 BuiltinVaListDecl = CreateVaListDecl(Context: this, Kind: Target->getBuiltinVaListKind());
9159 assert(BuiltinVaListDecl->isImplicit());
9160 }
9161
9162 return BuiltinVaListDecl;
9163}
9164
9165Decl *ASTContext::getVaListTagDecl() const {
9166 // Force the creation of VaListTagDecl by building the __builtin_va_list
9167 // declaration.
9168 if (!VaListTagDecl)
9169 (void)getBuiltinVaListDecl();
9170
9171 return VaListTagDecl;
9172}
9173
9174TypedefDecl *ASTContext::getBuiltinMSVaListDecl() const {
9175 if (!BuiltinMSVaListDecl)
9176 BuiltinMSVaListDecl = CreateMSVaListDecl(Context: this);
9177
9178 return BuiltinMSVaListDecl;
9179}
9180
9181bool ASTContext::canBuiltinBeRedeclared(const FunctionDecl *FD) const {
9182 // Allow redecl custom type checking builtin for HLSL.
9183 if (LangOpts.HLSL && FD->getBuiltinID() != Builtin::NotBuiltin &&
9184 BuiltinInfo.hasCustomTypechecking(ID: FD->getBuiltinID()))
9185 return true;
9186 return BuiltinInfo.canBeRedeclared(ID: FD->getBuiltinID());
9187}
9188
9189void ASTContext::setObjCConstantStringInterface(ObjCInterfaceDecl *Decl) {
9190 assert(ObjCConstantStringType.isNull() &&
9191 "'NSConstantString' type already set!");
9192
9193 ObjCConstantStringType = getObjCInterfaceType(Decl);
9194}
9195
9196/// Retrieve the template name that corresponds to a non-empty
9197/// lookup.
9198TemplateName
9199ASTContext::getOverloadedTemplateName(UnresolvedSetIterator Begin,
9200 UnresolvedSetIterator End) const {
9201 unsigned size = End - Begin;
9202 assert(size > 1 && "set is not overloaded!");
9203
9204 void *memory = Allocate(Size: sizeof(OverloadedTemplateStorage) +
9205 size * sizeof(FunctionTemplateDecl*));
9206 auto *OT = new (memory) OverloadedTemplateStorage(size);
9207
9208 NamedDecl **Storage = OT->getStorage();
9209 for (UnresolvedSetIterator I = Begin; I != End; ++I) {
9210 NamedDecl *D = *I;
9211 assert(isa<FunctionTemplateDecl>(D) ||
9212 isa<UnresolvedUsingValueDecl>(D) ||
9213 (isa<UsingShadowDecl>(D) &&
9214 isa<FunctionTemplateDecl>(D->getUnderlyingDecl())));
9215 *Storage++ = D;
9216 }
9217
9218 return TemplateName(OT);
9219}
9220
9221/// Retrieve a template name representing an unqualified-id that has been
9222/// assumed to name a template for ADL purposes.
9223TemplateName ASTContext::getAssumedTemplateName(DeclarationName Name) const {
9224 auto *OT = new (*this) AssumedTemplateStorage(Name);
9225 return TemplateName(OT);
9226}
9227
9228/// Retrieve the template name that represents a qualified
9229/// template name such as \c std::vector.
9230TemplateName ASTContext::getQualifiedTemplateName(NestedNameSpecifier *NNS,
9231 bool TemplateKeyword,
9232 TemplateName Template) const {
9233 assert(NNS && "Missing nested-name-specifier in qualified template name");
9234
9235 // FIXME: Canonicalization?
9236 llvm::FoldingSetNodeID ID;
9237 QualifiedTemplateName::Profile(ID, NNS, TemplateKeyword, TN: Template);
9238
9239 void *InsertPos = nullptr;
9240 QualifiedTemplateName *QTN =
9241 QualifiedTemplateNames.FindNodeOrInsertPos(ID, InsertPos);
9242 if (!QTN) {
9243 QTN = new (*this, alignof(QualifiedTemplateName))
9244 QualifiedTemplateName(NNS, TemplateKeyword, Template);
9245 QualifiedTemplateNames.InsertNode(N: QTN, InsertPos);
9246 }
9247
9248 return TemplateName(QTN);
9249}
9250
9251/// Retrieve the template name that represents a dependent
9252/// template name such as \c MetaFun::template apply.
9253TemplateName
9254ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS,
9255 const IdentifierInfo *Name) const {
9256 assert((!NNS || NNS->isDependent()) &&
9257 "Nested name specifier must be dependent");
9258
9259 llvm::FoldingSetNodeID ID;
9260 DependentTemplateName::Profile(ID, NNS, Identifier: Name);
9261
9262 void *InsertPos = nullptr;
9263 DependentTemplateName *QTN =
9264 DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos);
9265
9266 if (QTN)
9267 return TemplateName(QTN);
9268
9269 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS);
9270 if (CanonNNS == NNS) {
9271 QTN = new (*this, alignof(DependentTemplateName))
9272 DependentTemplateName(NNS, Name);
9273 } else {
9274 TemplateName Canon = getDependentTemplateName(NNS: CanonNNS, Name);
9275 QTN = new (*this, alignof(DependentTemplateName))
9276 DependentTemplateName(NNS, Name, Canon);
9277 DependentTemplateName *CheckQTN =
9278 DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos);
9279 assert(!CheckQTN && "Dependent type name canonicalization broken");
9280 (void)CheckQTN;
9281 }
9282
9283 DependentTemplateNames.InsertNode(N: QTN, InsertPos);
9284 return TemplateName(QTN);
9285}
9286
9287/// Retrieve the template name that represents a dependent
9288/// template name such as \c MetaFun::template operator+.
9289TemplateName
9290ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS,
9291 OverloadedOperatorKind Operator) const {
9292 assert((!NNS || NNS->isDependent()) &&
9293 "Nested name specifier must be dependent");
9294
9295 llvm::FoldingSetNodeID ID;
9296 DependentTemplateName::Profile(ID, NNS, Operator);
9297
9298 void *InsertPos = nullptr;
9299 DependentTemplateName *QTN
9300 = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos);
9301
9302 if (QTN)
9303 return TemplateName(QTN);
9304
9305 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS);
9306 if (CanonNNS == NNS) {
9307 QTN = new (*this, alignof(DependentTemplateName))
9308 DependentTemplateName(NNS, Operator);
9309 } else {
9310 TemplateName Canon = getDependentTemplateName(NNS: CanonNNS, Operator);
9311 QTN = new (*this, alignof(DependentTemplateName))
9312 DependentTemplateName(NNS, Operator, Canon);
9313
9314 DependentTemplateName *CheckQTN
9315 = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos);
9316 assert(!CheckQTN && "Dependent template name canonicalization broken");
9317 (void)CheckQTN;
9318 }
9319
9320 DependentTemplateNames.InsertNode(N: QTN, InsertPos);
9321 return TemplateName(QTN);
9322}
9323
9324TemplateName ASTContext::getSubstTemplateTemplateParm(
9325 TemplateName Replacement, Decl *AssociatedDecl, unsigned Index,
9326 std::optional<unsigned> PackIndex) const {
9327 llvm::FoldingSetNodeID ID;
9328 SubstTemplateTemplateParmStorage::Profile(ID, Replacement, AssociatedDecl,
9329 Index, PackIndex);
9330
9331 void *insertPos = nullptr;
9332 SubstTemplateTemplateParmStorage *subst
9333 = SubstTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos&: insertPos);
9334
9335 if (!subst) {
9336 subst = new (*this) SubstTemplateTemplateParmStorage(
9337 Replacement, AssociatedDecl, Index, PackIndex);
9338 SubstTemplateTemplateParms.InsertNode(N: subst, InsertPos: insertPos);
9339 }
9340
9341 return TemplateName(subst);
9342}
9343
9344TemplateName
9345ASTContext::getSubstTemplateTemplateParmPack(const TemplateArgument &ArgPack,
9346 Decl *AssociatedDecl,
9347 unsigned Index, bool Final) const {
9348 auto &Self = const_cast<ASTContext &>(*this);
9349 llvm::FoldingSetNodeID ID;
9350 SubstTemplateTemplateParmPackStorage::Profile(ID, Context&: Self, ArgPack,
9351 AssociatedDecl, Index, Final);
9352
9353 void *InsertPos = nullptr;
9354 SubstTemplateTemplateParmPackStorage *Subst
9355 = SubstTemplateTemplateParmPacks.FindNodeOrInsertPos(ID, InsertPos);
9356
9357 if (!Subst) {
9358 Subst = new (*this) SubstTemplateTemplateParmPackStorage(
9359 ArgPack.pack_elements(), AssociatedDecl, Index, Final);
9360 SubstTemplateTemplateParmPacks.InsertNode(N: Subst, InsertPos);
9361 }
9362
9363 return TemplateName(Subst);
9364}
9365
9366/// getFromTargetType - Given one of the integer types provided by
9367/// TargetInfo, produce the corresponding type. The unsigned @p Type
9368/// is actually a value of type @c TargetInfo::IntType.
9369CanQualType ASTContext::getFromTargetType(unsigned Type) const {
9370 switch (Type) {
9371 case TargetInfo::NoInt: return {};
9372 case TargetInfo::SignedChar: return SignedCharTy;
9373 case TargetInfo::UnsignedChar: return UnsignedCharTy;
9374 case TargetInfo::SignedShort: return ShortTy;
9375 case TargetInfo::UnsignedShort: return UnsignedShortTy;
9376 case TargetInfo::SignedInt: return IntTy;
9377 case TargetInfo::UnsignedInt: return UnsignedIntTy;
9378 case TargetInfo::SignedLong: return LongTy;
9379 case TargetInfo::UnsignedLong: return UnsignedLongTy;
9380 case TargetInfo::SignedLongLong: return LongLongTy;
9381 case TargetInfo::UnsignedLongLong: return UnsignedLongLongTy;
9382 }
9383
9384 llvm_unreachable("Unhandled TargetInfo::IntType value");
9385}
9386
9387//===----------------------------------------------------------------------===//
9388// Type Predicates.
9389//===----------------------------------------------------------------------===//
9390
9391/// getObjCGCAttr - Returns one of GCNone, Weak or Strong objc's
9392/// garbage collection attribute.
9393///
9394Qualifiers::GC ASTContext::getObjCGCAttrKind(QualType Ty) const {
9395 if (getLangOpts().getGC() == LangOptions::NonGC)
9396 return Qualifiers::GCNone;
9397
9398 assert(getLangOpts().ObjC);
9399 Qualifiers::GC GCAttrs = Ty.getObjCGCAttr();
9400
9401 // Default behaviour under objective-C's gc is for ObjC pointers
9402 // (or pointers to them) be treated as though they were declared
9403 // as __strong.
9404 if (GCAttrs == Qualifiers::GCNone) {
9405 if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType())
9406 return Qualifiers::Strong;
9407 else if (Ty->isPointerType())
9408 return getObjCGCAttrKind(Ty: Ty->castAs<PointerType>()->getPointeeType());
9409 } else {
9410 // It's not valid to set GC attributes on anything that isn't a
9411 // pointer.
9412#ifndef NDEBUG
9413 QualType CT = Ty->getCanonicalTypeInternal();
9414 while (const auto *AT = dyn_cast<ArrayType>(Val&: CT))
9415 CT = AT->getElementType();
9416 assert(CT->isAnyPointerType() || CT->isBlockPointerType());
9417#endif
9418 }
9419 return GCAttrs;
9420}
9421
9422//===----------------------------------------------------------------------===//
9423// Type Compatibility Testing
9424//===----------------------------------------------------------------------===//
9425
9426/// areCompatVectorTypes - Return true if the two specified vector types are
9427/// compatible.
9428static bool areCompatVectorTypes(const VectorType *LHS,
9429 const VectorType *RHS) {
9430 assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified());
9431 return LHS->getElementType() == RHS->getElementType() &&
9432 LHS->getNumElements() == RHS->getNumElements();
9433}
9434
9435/// areCompatMatrixTypes - Return true if the two specified matrix types are
9436/// compatible.
9437static bool areCompatMatrixTypes(const ConstantMatrixType *LHS,
9438 const ConstantMatrixType *RHS) {
9439 assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified());
9440 return LHS->getElementType() == RHS->getElementType() &&
9441 LHS->getNumRows() == RHS->getNumRows() &&
9442 LHS->getNumColumns() == RHS->getNumColumns();
9443}
9444
9445bool ASTContext::areCompatibleVectorTypes(QualType FirstVec,
9446 QualType SecondVec) {
9447 assert(FirstVec->isVectorType() && "FirstVec should be a vector type");
9448 assert(SecondVec->isVectorType() && "SecondVec should be a vector type");
9449
9450 if (hasSameUnqualifiedType(T1: FirstVec, T2: SecondVec))
9451 return true;
9452
9453 // Treat Neon vector types and most AltiVec vector types as if they are the
9454 // equivalent GCC vector types.
9455 const auto *First = FirstVec->castAs<VectorType>();
9456 const auto *Second = SecondVec->castAs<VectorType>();
9457 if (First->getNumElements() == Second->getNumElements() &&
9458 hasSameType(T1: First->getElementType(), T2: Second->getElementType()) &&
9459 First->getVectorKind() != VectorKind::AltiVecPixel &&
9460 First->getVectorKind() != VectorKind::AltiVecBool &&
9461 Second->getVectorKind() != VectorKind::AltiVecPixel &&
9462 Second->getVectorKind() != VectorKind::AltiVecBool &&
9463 First->getVectorKind() != VectorKind::SveFixedLengthData &&
9464 First->getVectorKind() != VectorKind::SveFixedLengthPredicate &&
9465 Second->getVectorKind() != VectorKind::SveFixedLengthData &&
9466 Second->getVectorKind() != VectorKind::SveFixedLengthPredicate &&
9467 First->getVectorKind() != VectorKind::RVVFixedLengthData &&
9468 Second->getVectorKind() != VectorKind::RVVFixedLengthData &&
9469 First->getVectorKind() != VectorKind::RVVFixedLengthMask &&
9470 Second->getVectorKind() != VectorKind::RVVFixedLengthMask)
9471 return true;
9472
9473 return false;
9474}
9475
9476/// getSVETypeSize - Return SVE vector or predicate register size.
9477static uint64_t getSVETypeSize(ASTContext &Context, const BuiltinType *Ty) {
9478 assert(Ty->isSveVLSBuiltinType() && "Invalid SVE Type");
9479 if (Ty->getKind() == BuiltinType::SveBool ||
9480 Ty->getKind() == BuiltinType::SveCount)
9481 return (Context.getLangOpts().VScaleMin * 128) / Context.getCharWidth();
9482 return Context.getLangOpts().VScaleMin * 128;
9483}
9484
9485bool ASTContext::areCompatibleSveTypes(QualType FirstType,
9486 QualType SecondType) {
9487 assert(
9488 ((FirstType->isSVESizelessBuiltinType() && SecondType->isVectorType()) ||
9489 (FirstType->isVectorType() && SecondType->isSVESizelessBuiltinType())) &&
9490 "Expected SVE builtin type and vector type!");
9491
9492 auto IsValidCast = [this](QualType FirstType, QualType SecondType) {
9493 if (const auto *BT = FirstType->getAs<BuiltinType>()) {
9494 if (const auto *VT = SecondType->getAs<VectorType>()) {
9495 // Predicates have the same representation as uint8 so we also have to
9496 // check the kind to make these types incompatible.
9497 if (VT->getVectorKind() == VectorKind::SveFixedLengthPredicate)
9498 return BT->getKind() == BuiltinType::SveBool;
9499 else if (VT->getVectorKind() == VectorKind::SveFixedLengthData)
9500 return VT->getElementType().getCanonicalType() ==
9501 FirstType->getSveEltType(Ctx: *this);
9502 else if (VT->getVectorKind() == VectorKind::Generic)
9503 return getTypeSize(SecondType) == getSVETypeSize(*this, BT) &&
9504 hasSameType(VT->getElementType(),
9505 getBuiltinVectorTypeInfo(BT).ElementType);
9506 }
9507 }
9508 return false;
9509 };
9510
9511 return IsValidCast(FirstType, SecondType) ||
9512 IsValidCast(SecondType, FirstType);
9513}
9514
9515bool ASTContext::areLaxCompatibleSveTypes(QualType FirstType,
9516 QualType SecondType) {
9517 assert(
9518 ((FirstType->isSVESizelessBuiltinType() && SecondType->isVectorType()) ||
9519 (FirstType->isVectorType() && SecondType->isSVESizelessBuiltinType())) &&
9520 "Expected SVE builtin type and vector type!");
9521
9522 auto IsLaxCompatible = [this](QualType FirstType, QualType SecondType) {
9523 const auto *BT = FirstType->getAs<BuiltinType>();
9524 if (!BT)
9525 return false;
9526
9527 const auto *VecTy = SecondType->getAs<VectorType>();
9528 if (VecTy && (VecTy->getVectorKind() == VectorKind::SveFixedLengthData ||
9529 VecTy->getVectorKind() == VectorKind::Generic)) {
9530 const LangOptions::LaxVectorConversionKind LVCKind =
9531 getLangOpts().getLaxVectorConversions();
9532
9533 // Can not convert between sve predicates and sve vectors because of
9534 // different size.
9535 if (BT->getKind() == BuiltinType::SveBool &&
9536 VecTy->getVectorKind() == VectorKind::SveFixedLengthData)
9537 return false;
9538
9539 // If __ARM_FEATURE_SVE_BITS != N do not allow GNU vector lax conversion.
9540 // "Whenever __ARM_FEATURE_SVE_BITS==N, GNUT implicitly
9541 // converts to VLAT and VLAT implicitly converts to GNUT."
9542 // ACLE Spec Version 00bet6, 3.7.3.2. Behavior common to vectors and
9543 // predicates.
9544 if (VecTy->getVectorKind() == VectorKind::Generic &&
9545 getTypeSize(T: SecondType) != getSVETypeSize(Context&: *this, Ty: BT))
9546 return false;
9547
9548 // If -flax-vector-conversions=all is specified, the types are
9549 // certainly compatible.
9550 if (LVCKind == LangOptions::LaxVectorConversionKind::All)
9551 return true;
9552
9553 // If -flax-vector-conversions=integer is specified, the types are
9554 // compatible if the elements are integer types.
9555 if (LVCKind == LangOptions::LaxVectorConversionKind::Integer)
9556 return VecTy->getElementType().getCanonicalType()->isIntegerType() &&
9557 FirstType->getSveEltType(Ctx: *this)->isIntegerType();
9558 }
9559
9560 return false;
9561 };
9562
9563 return IsLaxCompatible(FirstType, SecondType) ||
9564 IsLaxCompatible(SecondType, FirstType);
9565}
9566
9567/// getRVVTypeSize - Return RVV vector register size.
9568static uint64_t getRVVTypeSize(ASTContext &Context, const BuiltinType *Ty) {
9569 assert(Ty->isRVVVLSBuiltinType() && "Invalid RVV Type");
9570 auto VScale = Context.getTargetInfo().getVScaleRange(LangOpts: Context.getLangOpts());
9571 if (!VScale)
9572 return 0;
9573
9574 ASTContext::BuiltinVectorTypeInfo Info = Context.getBuiltinVectorTypeInfo(Ty);
9575
9576 unsigned EltSize = Context.getTypeSize(Info.ElementType);
9577 if (Info.ElementType == Context.BoolTy)
9578 EltSize = 1;
9579
9580 unsigned MinElts = Info.EC.getKnownMinValue();
9581 return VScale->first * MinElts * EltSize;
9582}
9583
9584bool ASTContext::areCompatibleRVVTypes(QualType FirstType,
9585 QualType SecondType) {
9586 assert(
9587 ((FirstType->isRVVSizelessBuiltinType() && SecondType->isVectorType()) ||
9588 (FirstType->isVectorType() && SecondType->isRVVSizelessBuiltinType())) &&
9589 "Expected RVV builtin type and vector type!");
9590
9591 auto IsValidCast = [this](QualType FirstType, QualType SecondType) {
9592 if (const auto *BT = FirstType->getAs<BuiltinType>()) {
9593 if (const auto *VT = SecondType->getAs<VectorType>()) {
9594 if (VT->getVectorKind() == VectorKind::RVVFixedLengthMask) {
9595 BuiltinVectorTypeInfo Info = getBuiltinVectorTypeInfo(Ty: BT);
9596 return FirstType->isRVVVLSBuiltinType() &&
9597 Info.ElementType == BoolTy &&
9598 getTypeSize(SecondType) == getRVVTypeSize(*this, BT);
9599 }
9600 if (VT->getVectorKind() == VectorKind::RVVFixedLengthData ||
9601 VT->getVectorKind() == VectorKind::Generic)
9602 return FirstType->isRVVVLSBuiltinType() &&
9603 getTypeSize(SecondType) == getRVVTypeSize(*this, BT) &&
9604 hasSameType(VT->getElementType(),
9605 getBuiltinVectorTypeInfo(BT).ElementType);
9606 }
9607 }
9608 return false;
9609 };
9610
9611 return IsValidCast(FirstType, SecondType) ||
9612 IsValidCast(SecondType, FirstType);
9613}
9614
9615bool ASTContext::areLaxCompatibleRVVTypes(QualType FirstType,
9616 QualType SecondType) {
9617 assert(
9618 ((FirstType->isRVVSizelessBuiltinType() && SecondType->isVectorType()) ||
9619 (FirstType->isVectorType() && SecondType->isRVVSizelessBuiltinType())) &&
9620 "Expected RVV builtin type and vector type!");
9621
9622 auto IsLaxCompatible = [this](QualType FirstType, QualType SecondType) {
9623 const auto *BT = FirstType->getAs<BuiltinType>();
9624 if (!BT)
9625 return false;
9626
9627 if (!BT->isRVVVLSBuiltinType())
9628 return false;
9629
9630 const auto *VecTy = SecondType->getAs<VectorType>();
9631 if (VecTy && VecTy->getVectorKind() == VectorKind::Generic) {
9632 const LangOptions::LaxVectorConversionKind LVCKind =
9633 getLangOpts().getLaxVectorConversions();
9634
9635 // If __riscv_v_fixed_vlen != N do not allow vector lax conversion.
9636 if (getTypeSize(T: SecondType) != getRVVTypeSize(Context&: *this, Ty: BT))
9637 return false;
9638
9639 // If -flax-vector-conversions=all is specified, the types are
9640 // certainly compatible.
9641 if (LVCKind == LangOptions::LaxVectorConversionKind::All)
9642 return true;
9643
9644 // If -flax-vector-conversions=integer is specified, the types are
9645 // compatible if the elements are integer types.
9646 if (LVCKind == LangOptions::LaxVectorConversionKind::Integer)
9647 return VecTy->getElementType().getCanonicalType()->isIntegerType() &&
9648 FirstType->getRVVEltType(Ctx: *this)->isIntegerType();
9649 }
9650
9651 return false;
9652 };
9653
9654 return IsLaxCompatible(FirstType, SecondType) ||
9655 IsLaxCompatible(SecondType, FirstType);
9656}
9657
9658bool ASTContext::hasDirectOwnershipQualifier(QualType Ty) const {
9659 while (true) {
9660 // __strong id
9661 if (const AttributedType *Attr = dyn_cast<AttributedType>(Val&: Ty)) {
9662 if (Attr->getAttrKind() == attr::ObjCOwnership)
9663 return true;
9664
9665 Ty = Attr->getModifiedType();
9666
9667 // X *__strong (...)
9668 } else if (const ParenType *Paren = dyn_cast<ParenType>(Val&: Ty)) {
9669 Ty = Paren->getInnerType();
9670
9671 // We do not want to look through typedefs, typeof(expr),
9672 // typeof(type), or any other way that the type is somehow
9673 // abstracted.
9674 } else {
9675 return false;
9676 }
9677 }
9678}
9679
9680//===----------------------------------------------------------------------===//
9681// ObjCQualifiedIdTypesAreCompatible - Compatibility testing for qualified id's.
9682//===----------------------------------------------------------------------===//
9683
9684/// ProtocolCompatibleWithProtocol - return 'true' if 'lProto' is in the
9685/// inheritance hierarchy of 'rProto'.
9686bool
9687ASTContext::ProtocolCompatibleWithProtocol(ObjCProtocolDecl *lProto,
9688 ObjCProtocolDecl *rProto) const {
9689 if (declaresSameEntity(lProto, rProto))
9690 return true;
9691 for (auto *PI : rProto->protocols())
9692 if (ProtocolCompatibleWithProtocol(lProto, rProto: PI))
9693 return true;
9694 return false;
9695}
9696
9697/// ObjCQualifiedClassTypesAreCompatible - compare Class<pr,...> and
9698/// Class<pr1, ...>.
9699bool ASTContext::ObjCQualifiedClassTypesAreCompatible(
9700 const ObjCObjectPointerType *lhs, const ObjCObjectPointerType *rhs) {
9701 for (auto *lhsProto : lhs->quals()) {
9702 bool match = false;
9703 for (auto *rhsProto : rhs->quals()) {
9704 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto)) {
9705 match = true;
9706 break;
9707 }
9708 }
9709 if (!match)
9710 return false;
9711 }
9712 return true;
9713}
9714
9715/// ObjCQualifiedIdTypesAreCompatible - We know that one of lhs/rhs is an
9716/// ObjCQualifiedIDType.
9717bool ASTContext::ObjCQualifiedIdTypesAreCompatible(
9718 const ObjCObjectPointerType *lhs, const ObjCObjectPointerType *rhs,
9719 bool compare) {
9720 // Allow id<P..> and an 'id' in all cases.
9721 if (lhs->isObjCIdType() || rhs->isObjCIdType())
9722 return true;
9723
9724 // Don't allow id<P..> to convert to Class or Class<P..> in either direction.
9725 if (lhs->isObjCClassType() || lhs->isObjCQualifiedClassType() ||
9726 rhs->isObjCClassType() || rhs->isObjCQualifiedClassType())
9727 return false;
9728
9729 if (lhs->isObjCQualifiedIdType()) {
9730 if (rhs->qual_empty()) {
9731 // If the RHS is a unqualified interface pointer "NSString*",
9732 // make sure we check the class hierarchy.
9733 if (ObjCInterfaceDecl *rhsID = rhs->getInterfaceDecl()) {
9734 for (auto *I : lhs->quals()) {
9735 // when comparing an id<P> on lhs with a static type on rhs,
9736 // see if static class implements all of id's protocols, directly or
9737 // through its super class and categories.
9738 if (!rhsID->ClassImplementsProtocol(I, true))
9739 return false;
9740 }
9741 }
9742 // If there are no qualifiers and no interface, we have an 'id'.
9743 return true;
9744 }
9745 // Both the right and left sides have qualifiers.
9746 for (auto *lhsProto : lhs->quals()) {
9747 bool match = false;
9748
9749 // when comparing an id<P> on lhs with a static type on rhs,
9750 // see if static class implements all of id's protocols, directly or
9751 // through its super class and categories.
9752 for (auto *rhsProto : rhs->quals()) {
9753 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) ||
9754 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) {
9755 match = true;
9756 break;
9757 }
9758 }
9759 // If the RHS is a qualified interface pointer "NSString<P>*",
9760 // make sure we check the class hierarchy.
9761 if (ObjCInterfaceDecl *rhsID = rhs->getInterfaceDecl()) {
9762 for (auto *I : lhs->quals()) {
9763 // when comparing an id<P> on lhs with a static type on rhs,
9764 // see if static class implements all of id's protocols, directly or
9765 // through its super class and categories.
9766 if (rhsID->ClassImplementsProtocol(I, true)) {
9767 match = true;
9768 break;
9769 }
9770 }
9771 }
9772 if (!match)
9773 return false;
9774 }
9775
9776 return true;
9777 }
9778
9779 assert(rhs->isObjCQualifiedIdType() && "One of the LHS/RHS should be id<x>");
9780
9781 if (lhs->getInterfaceType()) {
9782 // If both the right and left sides have qualifiers.
9783 for (auto *lhsProto : lhs->quals()) {
9784 bool match = false;
9785
9786 // when comparing an id<P> on rhs with a static type on lhs,
9787 // see if static class implements all of id's protocols, directly or
9788 // through its super class and categories.
9789 // First, lhs protocols in the qualifier list must be found, direct
9790 // or indirect in rhs's qualifier list or it is a mismatch.
9791 for (auto *rhsProto : rhs->quals()) {
9792 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) ||
9793 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) {
9794 match = true;
9795 break;
9796 }
9797 }
9798 if (!match)
9799 return false;
9800 }
9801
9802 // Static class's protocols, or its super class or category protocols
9803 // must be found, direct or indirect in rhs's qualifier list or it is a mismatch.
9804 if (ObjCInterfaceDecl *lhsID = lhs->getInterfaceDecl()) {
9805 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSInheritedProtocols;
9806 CollectInheritedProtocols(lhsID, LHSInheritedProtocols);
9807 // This is rather dubious but matches gcc's behavior. If lhs has
9808 // no type qualifier and its class has no static protocol(s)
9809 // assume that it is mismatch.
9810 if (LHSInheritedProtocols.empty() && lhs->qual_empty())
9811 return false;
9812 for (auto *lhsProto : LHSInheritedProtocols) {
9813 bool match = false;
9814 for (auto *rhsProto : rhs->quals()) {
9815 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) ||
9816 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) {
9817 match = true;
9818 break;
9819 }
9820 }
9821 if (!match)
9822 return false;
9823 }
9824 }
9825 return true;
9826 }
9827 return false;
9828}
9829
9830/// canAssignObjCInterfaces - Return true if the two interface types are
9831/// compatible for assignment from RHS to LHS. This handles validation of any
9832/// protocol qualifiers on the LHS or RHS.
9833bool ASTContext::canAssignObjCInterfaces(const ObjCObjectPointerType *LHSOPT,
9834 const ObjCObjectPointerType *RHSOPT) {
9835 const ObjCObjectType* LHS = LHSOPT->getObjectType();
9836 const ObjCObjectType* RHS = RHSOPT->getObjectType();
9837
9838 // If either type represents the built-in 'id' type, return true.
9839 if (LHS->isObjCUnqualifiedId() || RHS->isObjCUnqualifiedId())
9840 return true;
9841
9842 // Function object that propagates a successful result or handles
9843 // __kindof types.
9844 auto finish = [&](bool succeeded) -> bool {
9845 if (succeeded)
9846 return true;
9847
9848 if (!RHS->isKindOfType())
9849 return false;
9850
9851 // Strip off __kindof and protocol qualifiers, then check whether
9852 // we can assign the other way.
9853 return canAssignObjCInterfaces(LHSOPT: RHSOPT->stripObjCKindOfTypeAndQuals(ctx: *this),
9854 RHSOPT: LHSOPT->stripObjCKindOfTypeAndQuals(ctx: *this));
9855 };
9856
9857 // Casts from or to id<P> are allowed when the other side has compatible
9858 // protocols.
9859 if (LHS->isObjCQualifiedId() || RHS->isObjCQualifiedId()) {
9860 return finish(ObjCQualifiedIdTypesAreCompatible(lhs: LHSOPT, rhs: RHSOPT, compare: false));
9861 }
9862
9863 // Verify protocol compatibility for casts from Class<P1> to Class<P2>.
9864 if (LHS->isObjCQualifiedClass() && RHS->isObjCQualifiedClass()) {
9865 return finish(ObjCQualifiedClassTypesAreCompatible(lhs: LHSOPT, rhs: RHSOPT));
9866 }
9867
9868 // Casts from Class to Class<Foo>, or vice-versa, are allowed.
9869 if (LHS->isObjCClass() && RHS->isObjCClass()) {
9870 return true;
9871 }
9872
9873 // If we have 2 user-defined types, fall into that path.
9874 if (LHS->getInterface() && RHS->getInterface()) {
9875 return finish(canAssignObjCInterfaces(LHS, RHS));
9876 }
9877
9878 return false;
9879}
9880
9881/// canAssignObjCInterfacesInBlockPointer - This routine is specifically written
9882/// for providing type-safety for objective-c pointers used to pass/return
9883/// arguments in block literals. When passed as arguments, passing 'A*' where
9884/// 'id' is expected is not OK. Passing 'Sub *" where 'Super *" is expected is
9885/// not OK. For the return type, the opposite is not OK.
9886bool ASTContext::canAssignObjCInterfacesInBlockPointer(
9887 const ObjCObjectPointerType *LHSOPT,
9888 const ObjCObjectPointerType *RHSOPT,
9889 bool BlockReturnType) {
9890
9891 // Function object that propagates a successful result or handles
9892 // __kindof types.
9893 auto finish = [&](bool succeeded) -> bool {
9894 if (succeeded)
9895 return true;
9896
9897 const ObjCObjectPointerType *Expected = BlockReturnType ? RHSOPT : LHSOPT;
9898 if (!Expected->isKindOfType())
9899 return false;
9900
9901 // Strip off __kindof and protocol qualifiers, then check whether
9902 // we can assign the other way.
9903 return canAssignObjCInterfacesInBlockPointer(
9904 LHSOPT: RHSOPT->stripObjCKindOfTypeAndQuals(ctx: *this),
9905 RHSOPT: LHSOPT->stripObjCKindOfTypeAndQuals(ctx: *this),
9906 BlockReturnType);
9907 };
9908
9909 if (RHSOPT->isObjCBuiltinType() || LHSOPT->isObjCIdType())
9910 return true;
9911
9912 if (LHSOPT->isObjCBuiltinType()) {
9913 return finish(RHSOPT->isObjCBuiltinType() ||
9914 RHSOPT->isObjCQualifiedIdType());
9915 }
9916
9917 if (LHSOPT->isObjCQualifiedIdType() || RHSOPT->isObjCQualifiedIdType()) {
9918 if (getLangOpts().CompatibilityQualifiedIdBlockParamTypeChecking)
9919 // Use for block parameters previous type checking for compatibility.
9920 return finish(ObjCQualifiedIdTypesAreCompatible(lhs: LHSOPT, rhs: RHSOPT, compare: false) ||
9921 // Or corrected type checking as in non-compat mode.
9922 (!BlockReturnType &&
9923 ObjCQualifiedIdTypesAreCompatible(lhs: RHSOPT, rhs: LHSOPT, compare: false)));
9924 else
9925 return finish(ObjCQualifiedIdTypesAreCompatible(
9926 lhs: (BlockReturnType ? LHSOPT : RHSOPT),
9927 rhs: (BlockReturnType ? RHSOPT : LHSOPT), compare: false));
9928 }
9929
9930 const ObjCInterfaceType* LHS = LHSOPT->getInterfaceType();
9931 const ObjCInterfaceType* RHS = RHSOPT->getInterfaceType();
9932 if (LHS && RHS) { // We have 2 user-defined types.
9933 if (LHS != RHS) {
9934 if (LHS->getDecl()->isSuperClassOf(I: RHS->getDecl()))
9935 return finish(BlockReturnType);
9936 if (RHS->getDecl()->isSuperClassOf(I: LHS->getDecl()))
9937 return finish(!BlockReturnType);
9938 }
9939 else
9940 return true;
9941 }
9942 return false;
9943}
9944
9945/// Comparison routine for Objective-C protocols to be used with
9946/// llvm::array_pod_sort.
9947static int compareObjCProtocolsByName(ObjCProtocolDecl * const *lhs,
9948 ObjCProtocolDecl * const *rhs) {
9949 return (*lhs)->getName().compare((*rhs)->getName());
9950}
9951
9952/// getIntersectionOfProtocols - This routine finds the intersection of set
9953/// of protocols inherited from two distinct objective-c pointer objects with
9954/// the given common base.
9955/// It is used to build composite qualifier list of the composite type of
9956/// the conditional expression involving two objective-c pointer objects.
9957static
9958void getIntersectionOfProtocols(ASTContext &Context,
9959 const ObjCInterfaceDecl *CommonBase,
9960 const ObjCObjectPointerType *LHSOPT,
9961 const ObjCObjectPointerType *RHSOPT,
9962 SmallVectorImpl<ObjCProtocolDecl *> &IntersectionSet) {
9963
9964 const ObjCObjectType* LHS = LHSOPT->getObjectType();
9965 const ObjCObjectType* RHS = RHSOPT->getObjectType();
9966 assert(LHS->getInterface() && "LHS must have an interface base");
9967 assert(RHS->getInterface() && "RHS must have an interface base");
9968
9969 // Add all of the protocols for the LHS.
9970 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSProtocolSet;
9971
9972 // Start with the protocol qualifiers.
9973 for (auto *proto : LHS->quals()) {
9974 Context.CollectInheritedProtocols(proto, LHSProtocolSet);
9975 }
9976
9977 // Also add the protocols associated with the LHS interface.
9978 Context.CollectInheritedProtocols(LHS->getInterface(), LHSProtocolSet);
9979
9980 // Add all of the protocols for the RHS.
9981 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> RHSProtocolSet;
9982
9983 // Start with the protocol qualifiers.
9984 for (auto *proto : RHS->quals()) {
9985 Context.CollectInheritedProtocols(proto, RHSProtocolSet);
9986 }
9987
9988 // Also add the protocols associated with the RHS interface.
9989 Context.CollectInheritedProtocols(RHS->getInterface(), RHSProtocolSet);
9990
9991 // Compute the intersection of the collected protocol sets.
9992 for (auto *proto : LHSProtocolSet) {
9993 if (RHSProtocolSet.count(Ptr: proto))
9994 IntersectionSet.push_back(Elt: proto);
9995 }
9996
9997 // Compute the set of protocols that is implied by either the common type or
9998 // the protocols within the intersection.
9999 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> ImpliedProtocols;
10000 Context.CollectInheritedProtocols(CommonBase, ImpliedProtocols);
10001
10002 // Remove any implied protocols from the list of inherited protocols.
10003 if (!ImpliedProtocols.empty()) {
10004 llvm::erase_if(C&: IntersectionSet, P: [&](ObjCProtocolDecl *proto) -> bool {
10005 return ImpliedProtocols.contains(Ptr: proto);
10006 });
10007 }
10008
10009 // Sort the remaining protocols by name.
10010 llvm::array_pod_sort(Start: IntersectionSet.begin(), End: IntersectionSet.end(),
10011 Compare: compareObjCProtocolsByName);
10012}
10013
10014/// Determine whether the first type is a subtype of the second.
10015static bool canAssignObjCObjectTypes(ASTContext &ctx, QualType lhs,
10016 QualType rhs) {
10017 // Common case: two object pointers.
10018 const auto *lhsOPT = lhs->getAs<ObjCObjectPointerType>();
10019 const auto *rhsOPT = rhs->getAs<ObjCObjectPointerType>();
10020 if (lhsOPT && rhsOPT)
10021 return ctx.canAssignObjCInterfaces(LHSOPT: lhsOPT, RHSOPT: rhsOPT);
10022
10023 // Two block pointers.
10024 const auto *lhsBlock = lhs->getAs<BlockPointerType>();
10025 const auto *rhsBlock = rhs->getAs<BlockPointerType>();
10026 if (lhsBlock && rhsBlock)
10027 return ctx.typesAreBlockPointerCompatible(lhs, rhs);
10028
10029 // If either is an unqualified 'id' and the other is a block, it's
10030 // acceptable.
10031 if ((lhsOPT && lhsOPT->isObjCIdType() && rhsBlock) ||
10032 (rhsOPT && rhsOPT->isObjCIdType() && lhsBlock))
10033 return true;
10034
10035 return false;
10036}
10037
10038// Check that the given Objective-C type argument lists are equivalent.
10039static bool sameObjCTypeArgs(ASTContext &ctx,
10040 const ObjCInterfaceDecl *iface,
10041 ArrayRef<QualType> lhsArgs,
10042 ArrayRef<QualType> rhsArgs,
10043 bool stripKindOf) {
10044 if (lhsArgs.size() != rhsArgs.size())
10045 return false;
10046
10047 ObjCTypeParamList *typeParams = iface->getTypeParamList();
10048 if (!typeParams)
10049 return false;
10050
10051 for (unsigned i = 0, n = lhsArgs.size(); i != n; ++i) {
10052 if (ctx.hasSameType(T1: lhsArgs[i], T2: rhsArgs[i]))
10053 continue;
10054
10055 switch (typeParams->begin()[i]->getVariance()) {
10056 case ObjCTypeParamVariance::Invariant:
10057 if (!stripKindOf ||
10058 !ctx.hasSameType(T1: lhsArgs[i].stripObjCKindOfType(ctx),
10059 T2: rhsArgs[i].stripObjCKindOfType(ctx))) {
10060 return false;
10061 }
10062 break;
10063
10064 case ObjCTypeParamVariance::Covariant:
10065 if (!canAssignObjCObjectTypes(ctx, lhs: lhsArgs[i], rhs: rhsArgs[i]))
10066 return false;
10067 break;
10068
10069 case ObjCTypeParamVariance::Contravariant:
10070 if (!canAssignObjCObjectTypes(ctx, lhs: rhsArgs[i], rhs: lhsArgs[i]))
10071 return false;
10072 break;
10073 }
10074 }
10075
10076 return true;
10077}
10078
10079QualType ASTContext::areCommonBaseCompatible(
10080 const ObjCObjectPointerType *Lptr,
10081 const ObjCObjectPointerType *Rptr) {
10082 const ObjCObjectType *LHS = Lptr->getObjectType();
10083 const ObjCObjectType *RHS = Rptr->getObjectType();
10084 const ObjCInterfaceDecl* LDecl = LHS->getInterface();
10085 const ObjCInterfaceDecl* RDecl = RHS->getInterface();
10086
10087 if (!LDecl || !RDecl)
10088 return {};
10089
10090 // When either LHS or RHS is a kindof type, we should return a kindof type.
10091 // For example, for common base of kindof(ASub1) and kindof(ASub2), we return
10092 // kindof(A).
10093 bool anyKindOf = LHS->isKindOfType() || RHS->isKindOfType();
10094
10095 // Follow the left-hand side up the class hierarchy until we either hit a
10096 // root or find the RHS. Record the ancestors in case we don't find it.
10097 llvm::SmallDenseMap<const ObjCInterfaceDecl *, const ObjCObjectType *, 4>
10098 LHSAncestors;
10099 while (true) {
10100 // Record this ancestor. We'll need this if the common type isn't in the
10101 // path from the LHS to the root.
10102 LHSAncestors[LHS->getInterface()->getCanonicalDecl()] = LHS;
10103
10104 if (declaresSameEntity(LHS->getInterface(), RDecl)) {
10105 // Get the type arguments.
10106 ArrayRef<QualType> LHSTypeArgs = LHS->getTypeArgsAsWritten();
10107 bool anyChanges = false;
10108 if (LHS->isSpecialized() && RHS->isSpecialized()) {
10109 // Both have type arguments, compare them.
10110 if (!sameObjCTypeArgs(ctx&: *this, iface: LHS->getInterface(),
10111 lhsArgs: LHS->getTypeArgs(), rhsArgs: RHS->getTypeArgs(),
10112 /*stripKindOf=*/true))
10113 return {};
10114 } else if (LHS->isSpecialized() != RHS->isSpecialized()) {
10115 // If only one has type arguments, the result will not have type
10116 // arguments.
10117 LHSTypeArgs = {};
10118 anyChanges = true;
10119 }
10120
10121 // Compute the intersection of protocols.
10122 SmallVector<ObjCProtocolDecl *, 8> Protocols;
10123 getIntersectionOfProtocols(Context&: *this, CommonBase: LHS->getInterface(), LHSOPT: Lptr, RHSOPT: Rptr,
10124 IntersectionSet&: Protocols);
10125 if (!Protocols.empty())
10126 anyChanges = true;
10127
10128 // If anything in the LHS will have changed, build a new result type.
10129 // If we need to return a kindof type but LHS is not a kindof type, we
10130 // build a new result type.
10131 if (anyChanges || LHS->isKindOfType() != anyKindOf) {
10132 QualType Result = getObjCInterfaceType(Decl: LHS->getInterface());
10133 Result = getObjCObjectType(baseType: Result, typeArgs: LHSTypeArgs, protocols: Protocols,
10134 isKindOf: anyKindOf || LHS->isKindOfType());
10135 return getObjCObjectPointerType(ObjectT: Result);
10136 }
10137
10138 return getObjCObjectPointerType(ObjectT: QualType(LHS, 0));
10139 }
10140
10141 // Find the superclass.
10142 QualType LHSSuperType = LHS->getSuperClassType();
10143 if (LHSSuperType.isNull())
10144 break;
10145
10146 LHS = LHSSuperType->castAs<ObjCObjectType>();
10147 }
10148
10149 // We didn't find anything by following the LHS to its root; now check
10150 // the RHS against the cached set of ancestors.
10151 while (true) {
10152 auto KnownLHS = LHSAncestors.find(Val: RHS->getInterface()->getCanonicalDecl());
10153 if (KnownLHS != LHSAncestors.end()) {
10154 LHS = KnownLHS->second;
10155
10156 // Get the type arguments.
10157 ArrayRef<QualType> RHSTypeArgs = RHS->getTypeArgsAsWritten();
10158 bool anyChanges = false;
10159 if (LHS->isSpecialized() && RHS->isSpecialized()) {
10160 // Both have type arguments, compare them.
10161 if (!sameObjCTypeArgs(ctx&: *this, iface: LHS->getInterface(),
10162 lhsArgs: LHS->getTypeArgs(), rhsArgs: RHS->getTypeArgs(),
10163 /*stripKindOf=*/true))
10164 return {};
10165 } else if (LHS->isSpecialized() != RHS->isSpecialized()) {
10166 // If only one has type arguments, the result will not have type
10167 // arguments.
10168 RHSTypeArgs = {};
10169 anyChanges = true;
10170 }
10171
10172 // Compute the intersection of protocols.
10173 SmallVector<ObjCProtocolDecl *, 8> Protocols;
10174 getIntersectionOfProtocols(Context&: *this, CommonBase: RHS->getInterface(), LHSOPT: Lptr, RHSOPT: Rptr,
10175 IntersectionSet&: Protocols);
10176 if (!Protocols.empty())
10177 anyChanges = true;
10178
10179 // If we need to return a kindof type but RHS is not a kindof type, we
10180 // build a new result type.
10181 if (anyChanges || RHS->isKindOfType() != anyKindOf) {
10182 QualType Result = getObjCInterfaceType(Decl: RHS->getInterface());
10183 Result = getObjCObjectType(baseType: Result, typeArgs: RHSTypeArgs, protocols: Protocols,
10184 isKindOf: anyKindOf || RHS->isKindOfType());
10185 return getObjCObjectPointerType(ObjectT: Result);
10186 }
10187
10188 return getObjCObjectPointerType(ObjectT: QualType(RHS, 0));
10189 }
10190
10191 // Find the superclass of the RHS.
10192 QualType RHSSuperType = RHS->getSuperClassType();
10193 if (RHSSuperType.isNull())
10194 break;
10195
10196 RHS = RHSSuperType->castAs<ObjCObjectType>();
10197 }
10198
10199 return {};
10200}
10201
10202bool ASTContext::canAssignObjCInterfaces(const ObjCObjectType *LHS,
10203 const ObjCObjectType *RHS) {
10204 assert(LHS->getInterface() && "LHS is not an interface type");
10205 assert(RHS->getInterface() && "RHS is not an interface type");
10206
10207 // Verify that the base decls are compatible: the RHS must be a subclass of
10208 // the LHS.
10209 ObjCInterfaceDecl *LHSInterface = LHS->getInterface();
10210 bool IsSuperClass = LHSInterface->isSuperClassOf(I: RHS->getInterface());
10211 if (!IsSuperClass)
10212 return false;
10213
10214 // If the LHS has protocol qualifiers, determine whether all of them are
10215 // satisfied by the RHS (i.e., the RHS has a superset of the protocols in the
10216 // LHS).
10217 if (LHS->getNumProtocols() > 0) {
10218 // OK if conversion of LHS to SuperClass results in narrowing of types
10219 // ; i.e., SuperClass may implement at least one of the protocols
10220 // in LHS's protocol list. Example, SuperObj<P1> = lhs<P1,P2> is ok.
10221 // But not SuperObj<P1,P2,P3> = lhs<P1,P2>.
10222 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> SuperClassInheritedProtocols;
10223 CollectInheritedProtocols(RHS->getInterface(), SuperClassInheritedProtocols);
10224 // Also, if RHS has explicit quelifiers, include them for comparing with LHS's
10225 // qualifiers.
10226 for (auto *RHSPI : RHS->quals())
10227 CollectInheritedProtocols(RHSPI, SuperClassInheritedProtocols);
10228 // If there is no protocols associated with RHS, it is not a match.
10229 if (SuperClassInheritedProtocols.empty())
10230 return false;
10231
10232 for (const auto *LHSProto : LHS->quals()) {
10233 bool SuperImplementsProtocol = false;
10234 for (auto *SuperClassProto : SuperClassInheritedProtocols)
10235 if (SuperClassProto->lookupProtocolNamed(LHSProto->getIdentifier())) {
10236 SuperImplementsProtocol = true;
10237 break;
10238 }
10239 if (!SuperImplementsProtocol)
10240 return false;
10241 }
10242 }
10243
10244 // If the LHS is specialized, we may need to check type arguments.
10245 if (LHS->isSpecialized()) {
10246 // Follow the superclass chain until we've matched the LHS class in the
10247 // hierarchy. This substitutes type arguments through.
10248 const ObjCObjectType *RHSSuper = RHS;
10249 while (!declaresSameEntity(RHSSuper->getInterface(), LHSInterface))
10250 RHSSuper = RHSSuper->getSuperClassType()->castAs<ObjCObjectType>();
10251
10252 // If the RHS is specializd, compare type arguments.
10253 if (RHSSuper->isSpecialized() &&
10254 !sameObjCTypeArgs(ctx&: *this, iface: LHS->getInterface(),
10255 lhsArgs: LHS->getTypeArgs(), rhsArgs: RHSSuper->getTypeArgs(),
10256 /*stripKindOf=*/true)) {
10257 return false;
10258 }
10259 }
10260
10261 return true;
10262}
10263
10264bool ASTContext::areComparableObjCPointerTypes(QualType LHS, QualType RHS) {
10265 // get the "pointed to" types
10266 const auto *LHSOPT = LHS->getAs<ObjCObjectPointerType>();
10267 const auto *RHSOPT = RHS->getAs<ObjCObjectPointerType>();
10268
10269 if (!LHSOPT || !RHSOPT)
10270 return false;
10271
10272 return canAssignObjCInterfaces(LHSOPT, RHSOPT) ||
10273 canAssignObjCInterfaces(LHSOPT: RHSOPT, RHSOPT: LHSOPT);
10274}
10275
10276bool ASTContext::canBindObjCObjectType(QualType To, QualType From) {
10277 return canAssignObjCInterfaces(
10278 LHSOPT: getObjCObjectPointerType(ObjectT: To)->castAs<ObjCObjectPointerType>(),
10279 RHSOPT: getObjCObjectPointerType(ObjectT: From)->castAs<ObjCObjectPointerType>());
10280}
10281
10282/// typesAreCompatible - C99 6.7.3p9: For two qualified types to be compatible,
10283/// both shall have the identically qualified version of a compatible type.
10284/// C99 6.2.7p1: Two types have compatible types if their types are the
10285/// same. See 6.7.[2,3,5] for additional rules.
10286bool ASTContext::typesAreCompatible(QualType LHS, QualType RHS,
10287 bool CompareUnqualified) {
10288 if (getLangOpts().CPlusPlus)
10289 return hasSameType(T1: LHS, T2: RHS);
10290
10291 return !mergeTypes(LHS, RHS, OfBlockPointer: false, Unqualified: CompareUnqualified).isNull();
10292}
10293
10294bool ASTContext::propertyTypesAreCompatible(QualType LHS, QualType RHS) {
10295 return typesAreCompatible(LHS, RHS);
10296}
10297
10298bool ASTContext::typesAreBlockPointerCompatible(QualType LHS, QualType RHS) {
10299 return !mergeTypes(LHS, RHS, OfBlockPointer: true).isNull();
10300}
10301
10302/// mergeTransparentUnionType - if T is a transparent union type and a member
10303/// of T is compatible with SubType, return the merged type, else return
10304/// QualType()
10305QualType ASTContext::mergeTransparentUnionType(QualType T, QualType SubType,
10306 bool OfBlockPointer,
10307 bool Unqualified) {
10308 if (const RecordType *UT = T->getAsUnionType()) {
10309 RecordDecl *UD = UT->getDecl();
10310 if (UD->hasAttr<TransparentUnionAttr>()) {
10311 for (const auto *I : UD->fields()) {
10312 QualType ET = I->getType().getUnqualifiedType();
10313 QualType MT = mergeTypes(ET, SubType, OfBlockPointer, Unqualified);
10314 if (!MT.isNull())
10315 return MT;
10316 }
10317 }
10318 }
10319
10320 return {};
10321}
10322
10323/// mergeFunctionParameterTypes - merge two types which appear as function
10324/// parameter types
10325QualType ASTContext::mergeFunctionParameterTypes(QualType lhs, QualType rhs,
10326 bool OfBlockPointer,
10327 bool Unqualified) {
10328 // GNU extension: two types are compatible if they appear as a function
10329 // argument, one of the types is a transparent union type and the other
10330 // type is compatible with a union member
10331 QualType lmerge = mergeTransparentUnionType(T: lhs, SubType: rhs, OfBlockPointer,
10332 Unqualified);
10333 if (!lmerge.isNull())
10334 return lmerge;
10335
10336 QualType rmerge = mergeTransparentUnionType(T: rhs, SubType: lhs, OfBlockPointer,
10337 Unqualified);
10338 if (!rmerge.isNull())
10339 return rmerge;
10340
10341 return mergeTypes(lhs, rhs, OfBlockPointer, Unqualified);
10342}
10343
10344QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs,
10345 bool OfBlockPointer, bool Unqualified,
10346 bool AllowCXX,
10347 bool IsConditionalOperator) {
10348 const auto *lbase = lhs->castAs<FunctionType>();
10349 const auto *rbase = rhs->castAs<FunctionType>();
10350 const auto *lproto = dyn_cast<FunctionProtoType>(Val: lbase);
10351 const auto *rproto = dyn_cast<FunctionProtoType>(Val: rbase);
10352 bool allLTypes = true;
10353 bool allRTypes = true;
10354
10355 // Check return type
10356 QualType retType;
10357 if (OfBlockPointer) {
10358 QualType RHS = rbase->getReturnType();
10359 QualType LHS = lbase->getReturnType();
10360 bool UnqualifiedResult = Unqualified;
10361 if (!UnqualifiedResult)
10362 UnqualifiedResult = (!RHS.hasQualifiers() && LHS.hasQualifiers());
10363 retType = mergeTypes(LHS, RHS, OfBlockPointer: true, Unqualified: UnqualifiedResult, BlockReturnType: true);
10364 }
10365 else
10366 retType = mergeTypes(lbase->getReturnType(), rbase->getReturnType(), OfBlockPointer: false,
10367 Unqualified);
10368 if (retType.isNull())
10369 return {};
10370
10371 if (Unqualified)
10372 retType = retType.getUnqualifiedType();
10373
10374 CanQualType LRetType = getCanonicalType(T: lbase->getReturnType());
10375 CanQualType RRetType = getCanonicalType(T: rbase->getReturnType());
10376 if (Unqualified) {
10377 LRetType = LRetType.getUnqualifiedType();
10378 RRetType = RRetType.getUnqualifiedType();
10379 }
10380
10381 if (getCanonicalType(T: retType) != LRetType)
10382 allLTypes = false;
10383 if (getCanonicalType(T: retType) != RRetType)
10384 allRTypes = false;
10385
10386 // FIXME: double check this
10387 // FIXME: should we error if lbase->getRegParmAttr() != 0 &&
10388 // rbase->getRegParmAttr() != 0 &&
10389 // lbase->getRegParmAttr() != rbase->getRegParmAttr()?
10390 FunctionType::ExtInfo lbaseInfo = lbase->getExtInfo();
10391 FunctionType::ExtInfo rbaseInfo = rbase->getExtInfo();
10392
10393 // Compatible functions must have compatible calling conventions
10394 if (lbaseInfo.getCC() != rbaseInfo.getCC())
10395 return {};
10396
10397 // Regparm is part of the calling convention.
10398 if (lbaseInfo.getHasRegParm() != rbaseInfo.getHasRegParm())
10399 return {};
10400 if (lbaseInfo.getRegParm() != rbaseInfo.getRegParm())
10401 return {};
10402
10403 if (lbaseInfo.getProducesResult() != rbaseInfo.getProducesResult())
10404 return {};
10405 if (lbaseInfo.getNoCallerSavedRegs() != rbaseInfo.getNoCallerSavedRegs())
10406 return {};
10407 if (lbaseInfo.getNoCfCheck() != rbaseInfo.getNoCfCheck())
10408 return {};
10409
10410 // When merging declarations, it's common for supplemental information like
10411 // attributes to only be present in one of the declarations, and we generally
10412 // want type merging to preserve the union of information. So a merged
10413 // function type should be noreturn if it was noreturn in *either* operand
10414 // type.
10415 //
10416 // But for the conditional operator, this is backwards. The result of the
10417 // operator could be either operand, and its type should conservatively
10418 // reflect that. So a function type in a composite type is noreturn only
10419 // if it's noreturn in *both* operand types.
10420 //
10421 // Arguably, noreturn is a kind of subtype, and the conditional operator
10422 // ought to produce the most specific common supertype of its operand types.
10423 // That would differ from this rule in contravariant positions. However,
10424 // neither C nor C++ generally uses this kind of subtype reasoning. Also,
10425 // as a practical matter, it would only affect C code that does abstraction of
10426 // higher-order functions (taking noreturn callbacks!), which is uncommon to
10427 // say the least. So we use the simpler rule.
10428 bool NoReturn = IsConditionalOperator
10429 ? lbaseInfo.getNoReturn() && rbaseInfo.getNoReturn()
10430 : lbaseInfo.getNoReturn() || rbaseInfo.getNoReturn();
10431 if (lbaseInfo.getNoReturn() != NoReturn)
10432 allLTypes = false;
10433 if (rbaseInfo.getNoReturn() != NoReturn)
10434 allRTypes = false;
10435
10436 FunctionType::ExtInfo einfo = lbaseInfo.withNoReturn(noReturn: NoReturn);
10437
10438 if (lproto && rproto) { // two C99 style function prototypes
10439 assert((AllowCXX ||
10440 (!lproto->hasExceptionSpec() && !rproto->hasExceptionSpec())) &&
10441 "C++ shouldn't be here");
10442 // Compatible functions must have the same number of parameters
10443 if (lproto->getNumParams() != rproto->getNumParams())
10444 return {};
10445
10446 // Variadic and non-variadic functions aren't compatible
10447 if (lproto->isVariadic() != rproto->isVariadic())
10448 return {};
10449
10450 if (lproto->getMethodQuals() != rproto->getMethodQuals())
10451 return {};
10452
10453 SmallVector<FunctionProtoType::ExtParameterInfo, 4> newParamInfos;
10454 bool canUseLeft, canUseRight;
10455 if (!mergeExtParameterInfo(FirstFnType: lproto, SecondFnType: rproto, CanUseFirst&: canUseLeft, CanUseSecond&: canUseRight,
10456 NewParamInfos&: newParamInfos))
10457 return {};
10458
10459 if (!canUseLeft)
10460 allLTypes = false;
10461 if (!canUseRight)
10462 allRTypes = false;
10463
10464 // Check parameter type compatibility
10465 SmallVector<QualType, 10> types;
10466 for (unsigned i = 0, n = lproto->getNumParams(); i < n; i++) {
10467 QualType lParamType = lproto->getParamType(i).getUnqualifiedType();
10468 QualType rParamType = rproto->getParamType(i).getUnqualifiedType();
10469 QualType paramType = mergeFunctionParameterTypes(
10470 lhs: lParamType, rhs: rParamType, OfBlockPointer, Unqualified);
10471 if (paramType.isNull())
10472 return {};
10473
10474 if (Unqualified)
10475 paramType = paramType.getUnqualifiedType();
10476
10477 types.push_back(Elt: paramType);
10478 if (Unqualified) {
10479 lParamType = lParamType.getUnqualifiedType();
10480 rParamType = rParamType.getUnqualifiedType();
10481 }
10482
10483 if (getCanonicalType(T: paramType) != getCanonicalType(T: lParamType))
10484 allLTypes = false;
10485 if (getCanonicalType(T: paramType) != getCanonicalType(T: rParamType))
10486 allRTypes = false;
10487 }
10488
10489 if (allLTypes) return lhs;
10490 if (allRTypes) return rhs;
10491
10492 FunctionProtoType::ExtProtoInfo EPI = lproto->getExtProtoInfo();
10493 EPI.ExtInfo = einfo;
10494 EPI.ExtParameterInfos =
10495 newParamInfos.empty() ? nullptr : newParamInfos.data();
10496 return getFunctionType(ResultTy: retType, Args: types, EPI);
10497 }
10498
10499 if (lproto) allRTypes = false;
10500 if (rproto) allLTypes = false;
10501
10502 const FunctionProtoType *proto = lproto ? lproto : rproto;
10503 if (proto) {
10504 assert((AllowCXX || !proto->hasExceptionSpec()) && "C++ shouldn't be here");
10505 if (proto->isVariadic())
10506 return {};
10507 // Check that the types are compatible with the types that
10508 // would result from default argument promotions (C99 6.7.5.3p15).
10509 // The only types actually affected are promotable integer
10510 // types and floats, which would be passed as a different
10511 // type depending on whether the prototype is visible.
10512 for (unsigned i = 0, n = proto->getNumParams(); i < n; ++i) {
10513 QualType paramTy = proto->getParamType(i);
10514
10515 // Look at the converted type of enum types, since that is the type used
10516 // to pass enum values.
10517 if (const auto *Enum = paramTy->getAs<EnumType>()) {
10518 paramTy = Enum->getDecl()->getIntegerType();
10519 if (paramTy.isNull())
10520 return {};
10521 }
10522
10523 if (isPromotableIntegerType(paramTy) ||
10524 getCanonicalType(paramTy).getUnqualifiedType() == FloatTy)
10525 return {};
10526 }
10527
10528 if (allLTypes) return lhs;
10529 if (allRTypes) return rhs;
10530
10531 FunctionProtoType::ExtProtoInfo EPI = proto->getExtProtoInfo();
10532 EPI.ExtInfo = einfo;
10533 return getFunctionType(ResultTy: retType, Args: proto->getParamTypes(), EPI);
10534 }
10535
10536 if (allLTypes) return lhs;
10537 if (allRTypes) return rhs;
10538 return getFunctionNoProtoType(ResultTy: retType, Info: einfo);
10539}
10540
10541/// Given that we have an enum type and a non-enum type, try to merge them.
10542static QualType mergeEnumWithInteger(ASTContext &Context, const EnumType *ET,
10543 QualType other, bool isBlockReturnType) {
10544 // C99 6.7.2.2p4: Each enumerated type shall be compatible with char,
10545 // a signed integer type, or an unsigned integer type.
10546 // Compatibility is based on the underlying type, not the promotion
10547 // type.
10548 QualType underlyingType = ET->getDecl()->getIntegerType();
10549 if (underlyingType.isNull())
10550 return {};
10551 if (Context.hasSameType(T1: underlyingType, T2: other))
10552 return other;
10553
10554 // In block return types, we're more permissive and accept any
10555 // integral type of the same size.
10556 if (isBlockReturnType && other->isIntegerType() &&
10557 Context.getTypeSize(T: underlyingType) == Context.getTypeSize(T: other))
10558 return other;
10559
10560 return {};
10561}
10562
10563QualType ASTContext::mergeTypes(QualType LHS, QualType RHS, bool OfBlockPointer,
10564 bool Unqualified, bool BlockReturnType,
10565 bool IsConditionalOperator) {
10566 // For C++ we will not reach this code with reference types (see below),
10567 // for OpenMP variant call overloading we might.
10568 //
10569 // C++ [expr]: If an expression initially has the type "reference to T", the
10570 // type is adjusted to "T" prior to any further analysis, the expression
10571 // designates the object or function denoted by the reference, and the
10572 // expression is an lvalue unless the reference is an rvalue reference and
10573 // the expression is a function call (possibly inside parentheses).
10574 auto *LHSRefTy = LHS->getAs<ReferenceType>();
10575 auto *RHSRefTy = RHS->getAs<ReferenceType>();
10576 if (LangOpts.OpenMP && LHSRefTy && RHSRefTy &&
10577 LHS->getTypeClass() == RHS->getTypeClass())
10578 return mergeTypes(LHS: LHSRefTy->getPointeeType(), RHS: RHSRefTy->getPointeeType(),
10579 OfBlockPointer, Unqualified, BlockReturnType);
10580 if (LHSRefTy || RHSRefTy)
10581 return {};
10582
10583 if (Unqualified) {
10584 LHS = LHS.getUnqualifiedType();
10585 RHS = RHS.getUnqualifiedType();
10586 }
10587
10588 QualType LHSCan = getCanonicalType(T: LHS),
10589 RHSCan = getCanonicalType(T: RHS);
10590
10591 // If two types are identical, they are compatible.
10592 if (LHSCan == RHSCan)
10593 return LHS;
10594
10595 // If the qualifiers are different, the types aren't compatible... mostly.
10596 Qualifiers LQuals = LHSCan.getLocalQualifiers();
10597 Qualifiers RQuals = RHSCan.getLocalQualifiers();
10598 if (LQuals != RQuals) {
10599 // If any of these qualifiers are different, we have a type
10600 // mismatch.
10601 if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() ||
10602 LQuals.getAddressSpace() != RQuals.getAddressSpace() ||
10603 LQuals.getObjCLifetime() != RQuals.getObjCLifetime() ||
10604 LQuals.hasUnaligned() != RQuals.hasUnaligned())
10605 return {};
10606
10607 // Exactly one GC qualifier difference is allowed: __strong is
10608 // okay if the other type has no GC qualifier but is an Objective
10609 // C object pointer (i.e. implicitly strong by default). We fix
10610 // this by pretending that the unqualified type was actually
10611 // qualified __strong.
10612 Qualifiers::GC GC_L = LQuals.getObjCGCAttr();
10613 Qualifiers::GC GC_R = RQuals.getObjCGCAttr();
10614 assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements");
10615
10616 if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak)
10617 return {};
10618
10619 if (GC_L == Qualifiers::Strong && RHSCan->isObjCObjectPointerType()) {
10620 return mergeTypes(LHS, RHS: getObjCGCQualType(T: RHS, GCAttr: Qualifiers::Strong));
10621 }
10622 if (GC_R == Qualifiers::Strong && LHSCan->isObjCObjectPointerType()) {
10623 return mergeTypes(LHS: getObjCGCQualType(T: LHS, GCAttr: Qualifiers::Strong), RHS);
10624 }
10625 return {};
10626 }
10627
10628 // Okay, qualifiers are equal.
10629
10630 Type::TypeClass LHSClass = LHSCan->getTypeClass();
10631 Type::TypeClass RHSClass = RHSCan->getTypeClass();
10632
10633 // We want to consider the two function types to be the same for these
10634 // comparisons, just force one to the other.
10635 if (LHSClass == Type::FunctionProto) LHSClass = Type::FunctionNoProto;
10636 if (RHSClass == Type::FunctionProto) RHSClass = Type::FunctionNoProto;
10637
10638 // Same as above for arrays
10639 if (LHSClass == Type::VariableArray || LHSClass == Type::IncompleteArray)
10640 LHSClass = Type::ConstantArray;
10641 if (RHSClass == Type::VariableArray || RHSClass == Type::IncompleteArray)
10642 RHSClass = Type::ConstantArray;
10643
10644 // ObjCInterfaces are just specialized ObjCObjects.
10645 if (LHSClass == Type::ObjCInterface) LHSClass = Type::ObjCObject;
10646 if (RHSClass == Type::ObjCInterface) RHSClass = Type::ObjCObject;
10647
10648 // Canonicalize ExtVector -> Vector.
10649 if (LHSClass == Type::ExtVector) LHSClass = Type::Vector;
10650 if (RHSClass == Type::ExtVector) RHSClass = Type::Vector;
10651
10652 // If the canonical type classes don't match.
10653 if (LHSClass != RHSClass) {
10654 // Note that we only have special rules for turning block enum
10655 // returns into block int returns, not vice-versa.
10656 if (const auto *ETy = LHS->getAs<EnumType>()) {
10657 return mergeEnumWithInteger(Context&: *this, ET: ETy, other: RHS, isBlockReturnType: false);
10658 }
10659 if (const EnumType* ETy = RHS->getAs<EnumType>()) {
10660 return mergeEnumWithInteger(Context&: *this, ET: ETy, other: LHS, isBlockReturnType: BlockReturnType);
10661 }
10662 // allow block pointer type to match an 'id' type.
10663 if (OfBlockPointer && !BlockReturnType) {
10664 if (LHS->isObjCIdType() && RHS->isBlockPointerType())
10665 return LHS;
10666 if (RHS->isObjCIdType() && LHS->isBlockPointerType())
10667 return RHS;
10668 }
10669 // Allow __auto_type to match anything; it merges to the type with more
10670 // information.
10671 if (const auto *AT = LHS->getAs<AutoType>()) {
10672 if (!AT->isDeduced() && AT->isGNUAutoType())
10673 return RHS;
10674 }
10675 if (const auto *AT = RHS->getAs<AutoType>()) {
10676 if (!AT->isDeduced() && AT->isGNUAutoType())
10677 return LHS;
10678 }
10679 return {};
10680 }
10681
10682 // The canonical type classes match.
10683 switch (LHSClass) {
10684#define TYPE(Class, Base)
10685#define ABSTRACT_TYPE(Class, Base)
10686#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
10687#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
10688#define DEPENDENT_TYPE(Class, Base) case Type::Class:
10689#include "clang/AST/TypeNodes.inc"
10690 llvm_unreachable("Non-canonical and dependent types shouldn't get here");
10691
10692 case Type::Auto:
10693 case Type::DeducedTemplateSpecialization:
10694 case Type::LValueReference:
10695 case Type::RValueReference:
10696 case Type::MemberPointer:
10697 llvm_unreachable("C++ should never be in mergeTypes");
10698
10699 case Type::ObjCInterface:
10700 case Type::IncompleteArray:
10701 case Type::VariableArray:
10702 case Type::FunctionProto:
10703 case Type::ExtVector:
10704 llvm_unreachable("Types are eliminated above");
10705
10706 case Type::Pointer:
10707 {
10708 // Merge two pointer types, while trying to preserve typedef info
10709 QualType LHSPointee = LHS->castAs<PointerType>()->getPointeeType();
10710 QualType RHSPointee = RHS->castAs<PointerType>()->getPointeeType();
10711 if (Unqualified) {
10712 LHSPointee = LHSPointee.getUnqualifiedType();
10713 RHSPointee = RHSPointee.getUnqualifiedType();
10714 }
10715 QualType ResultType = mergeTypes(LHS: LHSPointee, RHS: RHSPointee, OfBlockPointer: false,
10716 Unqualified);
10717 if (ResultType.isNull())
10718 return {};
10719 if (getCanonicalType(T: LHSPointee) == getCanonicalType(T: ResultType))
10720 return LHS;
10721 if (getCanonicalType(T: RHSPointee) == getCanonicalType(T: ResultType))
10722 return RHS;
10723 return getPointerType(T: ResultType);
10724 }
10725 case Type::BlockPointer:
10726 {
10727 // Merge two block pointer types, while trying to preserve typedef info
10728 QualType LHSPointee = LHS->castAs<BlockPointerType>()->getPointeeType();
10729 QualType RHSPointee = RHS->castAs<BlockPointerType>()->getPointeeType();
10730 if (Unqualified) {
10731 LHSPointee = LHSPointee.getUnqualifiedType();
10732 RHSPointee = RHSPointee.getUnqualifiedType();
10733 }
10734 if (getLangOpts().OpenCL) {
10735 Qualifiers LHSPteeQual = LHSPointee.getQualifiers();
10736 Qualifiers RHSPteeQual = RHSPointee.getQualifiers();
10737 // Blocks can't be an expression in a ternary operator (OpenCL v2.0
10738 // 6.12.5) thus the following check is asymmetric.
10739 if (!LHSPteeQual.isAddressSpaceSupersetOf(other: RHSPteeQual))
10740 return {};
10741 LHSPteeQual.removeAddressSpace();
10742 RHSPteeQual.removeAddressSpace();
10743 LHSPointee =
10744 QualType(LHSPointee.getTypePtr(), LHSPteeQual.getAsOpaqueValue());
10745 RHSPointee =
10746 QualType(RHSPointee.getTypePtr(), RHSPteeQual.getAsOpaqueValue());
10747 }
10748 QualType ResultType = mergeTypes(LHS: LHSPointee, RHS: RHSPointee, OfBlockPointer,
10749 Unqualified);
10750 if (ResultType.isNull())
10751 return {};
10752 if (getCanonicalType(T: LHSPointee) == getCanonicalType(T: ResultType))
10753 return LHS;
10754 if (getCanonicalType(T: RHSPointee) == getCanonicalType(T: ResultType))
10755 return RHS;
10756 return getBlockPointerType(T: ResultType);
10757 }
10758 case Type::Atomic:
10759 {
10760 // Merge two pointer types, while trying to preserve typedef info
10761 QualType LHSValue = LHS->castAs<AtomicType>()->getValueType();
10762 QualType RHSValue = RHS->castAs<AtomicType>()->getValueType();
10763 if (Unqualified) {
10764 LHSValue = LHSValue.getUnqualifiedType();
10765 RHSValue = RHSValue.getUnqualifiedType();
10766 }
10767 QualType ResultType = mergeTypes(LHS: LHSValue, RHS: RHSValue, OfBlockPointer: false,
10768 Unqualified);
10769 if (ResultType.isNull())
10770 return {};
10771 if (getCanonicalType(T: LHSValue) == getCanonicalType(T: ResultType))
10772 return LHS;
10773 if (getCanonicalType(T: RHSValue) == getCanonicalType(T: ResultType))
10774 return RHS;
10775 return getAtomicType(T: ResultType);
10776 }
10777 case Type::ConstantArray:
10778 {
10779 const ConstantArrayType* LCAT = getAsConstantArrayType(T: LHS);
10780 const ConstantArrayType* RCAT = getAsConstantArrayType(T: RHS);
10781 if (LCAT && RCAT && RCAT->getSize() != LCAT->getSize())
10782 return {};
10783
10784 QualType LHSElem = getAsArrayType(T: LHS)->getElementType();
10785 QualType RHSElem = getAsArrayType(T: RHS)->getElementType();
10786 if (Unqualified) {
10787 LHSElem = LHSElem.getUnqualifiedType();
10788 RHSElem = RHSElem.getUnqualifiedType();
10789 }
10790
10791 QualType ResultType = mergeTypes(LHS: LHSElem, RHS: RHSElem, OfBlockPointer: false, Unqualified);
10792 if (ResultType.isNull())
10793 return {};
10794
10795 const VariableArrayType* LVAT = getAsVariableArrayType(T: LHS);
10796 const VariableArrayType* RVAT = getAsVariableArrayType(T: RHS);
10797
10798 // If either side is a variable array, and both are complete, check whether
10799 // the current dimension is definite.
10800 if (LVAT || RVAT) {
10801 auto SizeFetch = [this](const VariableArrayType* VAT,
10802 const ConstantArrayType* CAT)
10803 -> std::pair<bool,llvm::APInt> {
10804 if (VAT) {
10805 std::optional<llvm::APSInt> TheInt;
10806 Expr *E = VAT->getSizeExpr();
10807 if (E && (TheInt = E->getIntegerConstantExpr(*this)))
10808 return std::make_pair(true, *TheInt);
10809 return std::make_pair(false, llvm::APSInt());
10810 }
10811 if (CAT)
10812 return std::make_pair(true, CAT->getSize());
10813 return std::make_pair(false, llvm::APInt());
10814 };
10815
10816 bool HaveLSize, HaveRSize;
10817 llvm::APInt LSize, RSize;
10818 std::tie(HaveLSize, LSize) = SizeFetch(LVAT, LCAT);
10819 std::tie(HaveRSize, RSize) = SizeFetch(RVAT, RCAT);
10820 if (HaveLSize && HaveRSize && !llvm::APInt::isSameValue(I1: LSize, I2: RSize))
10821 return {}; // Definite, but unequal, array dimension
10822 }
10823
10824 if (LCAT && getCanonicalType(T: LHSElem) == getCanonicalType(T: ResultType))
10825 return LHS;
10826 if (RCAT && getCanonicalType(T: RHSElem) == getCanonicalType(T: ResultType))
10827 return RHS;
10828 if (LCAT)
10829 return getConstantArrayType(EltTy: ResultType, ArySizeIn: LCAT->getSize(),
10830 SizeExpr: LCAT->getSizeExpr(), ASM: ArraySizeModifier(), IndexTypeQuals: 0);
10831 if (RCAT)
10832 return getConstantArrayType(EltTy: ResultType, ArySizeIn: RCAT->getSize(),
10833 SizeExpr: RCAT->getSizeExpr(), ASM: ArraySizeModifier(), IndexTypeQuals: 0);
10834 if (LVAT && getCanonicalType(T: LHSElem) == getCanonicalType(T: ResultType))
10835 return LHS;
10836 if (RVAT && getCanonicalType(T: RHSElem) == getCanonicalType(T: ResultType))
10837 return RHS;
10838 if (LVAT) {
10839 // FIXME: This isn't correct! But tricky to implement because
10840 // the array's size has to be the size of LHS, but the type
10841 // has to be different.
10842 return LHS;
10843 }
10844 if (RVAT) {
10845 // FIXME: This isn't correct! But tricky to implement because
10846 // the array's size has to be the size of RHS, but the type
10847 // has to be different.
10848 return RHS;
10849 }
10850 if (getCanonicalType(T: LHSElem) == getCanonicalType(T: ResultType)) return LHS;
10851 if (getCanonicalType(T: RHSElem) == getCanonicalType(T: ResultType)) return RHS;
10852 return getIncompleteArrayType(elementType: ResultType, ASM: ArraySizeModifier(), elementTypeQuals: 0);
10853 }
10854 case Type::FunctionNoProto:
10855 return mergeFunctionTypes(lhs: LHS, rhs: RHS, OfBlockPointer, Unqualified,
10856 /*AllowCXX=*/false, IsConditionalOperator);
10857 case Type::Record:
10858 case Type::Enum:
10859 return {};
10860 case Type::Builtin:
10861 // Only exactly equal builtin types are compatible, which is tested above.
10862 return {};
10863 case Type::Complex:
10864 // Distinct complex types are incompatible.
10865 return {};
10866 case Type::Vector:
10867 // FIXME: The merged type should be an ExtVector!
10868 if (areCompatVectorTypes(LHSCan->castAs<VectorType>(),
10869 RHSCan->castAs<VectorType>()))
10870 return LHS;
10871 return {};
10872 case Type::ConstantMatrix:
10873 if (areCompatMatrixTypes(LHSCan->castAs<ConstantMatrixType>(),
10874 RHSCan->castAs<ConstantMatrixType>()))
10875 return LHS;
10876 return {};
10877 case Type::ObjCObject: {
10878 // Check if the types are assignment compatible.
10879 // FIXME: This should be type compatibility, e.g. whether
10880 // "LHS x; RHS x;" at global scope is legal.
10881 if (canAssignObjCInterfaces(LHS->castAs<ObjCObjectType>(),
10882 RHS->castAs<ObjCObjectType>()))
10883 return LHS;
10884 return {};
10885 }
10886 case Type::ObjCObjectPointer:
10887 if (OfBlockPointer) {
10888 if (canAssignObjCInterfacesInBlockPointer(
10889 LHSOPT: LHS->castAs<ObjCObjectPointerType>(),
10890 RHSOPT: RHS->castAs<ObjCObjectPointerType>(), BlockReturnType))
10891 return LHS;
10892 return {};
10893 }
10894 if (canAssignObjCInterfaces(LHS->castAs<ObjCObjectPointerType>(),
10895 RHS->castAs<ObjCObjectPointerType>()))
10896 return LHS;
10897 return {};
10898 case Type::Pipe:
10899 assert(LHS != RHS &&
10900 "Equivalent pipe types should have already been handled!");
10901 return {};
10902 case Type::BitInt: {
10903 // Merge two bit-precise int types, while trying to preserve typedef info.
10904 bool LHSUnsigned = LHS->castAs<BitIntType>()->isUnsigned();
10905 bool RHSUnsigned = RHS->castAs<BitIntType>()->isUnsigned();
10906 unsigned LHSBits = LHS->castAs<BitIntType>()->getNumBits();
10907 unsigned RHSBits = RHS->castAs<BitIntType>()->getNumBits();
10908
10909 // Like unsigned/int, shouldn't have a type if they don't match.
10910 if (LHSUnsigned != RHSUnsigned)
10911 return {};
10912
10913 if (LHSBits != RHSBits)
10914 return {};
10915 return LHS;
10916 }
10917 }
10918
10919 llvm_unreachable("Invalid Type::Class!");
10920}
10921
10922bool ASTContext::mergeExtParameterInfo(
10923 const FunctionProtoType *FirstFnType, const FunctionProtoType *SecondFnType,
10924 bool &CanUseFirst, bool &CanUseSecond,
10925 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &NewParamInfos) {
10926 assert(NewParamInfos.empty() && "param info list not empty");
10927 CanUseFirst = CanUseSecond = true;
10928 bool FirstHasInfo = FirstFnType->hasExtParameterInfos();
10929 bool SecondHasInfo = SecondFnType->hasExtParameterInfos();
10930
10931 // Fast path: if the first type doesn't have ext parameter infos,
10932 // we match if and only if the second type also doesn't have them.
10933 if (!FirstHasInfo && !SecondHasInfo)
10934 return true;
10935
10936 bool NeedParamInfo = false;
10937 size_t E = FirstHasInfo ? FirstFnType->getExtParameterInfos().size()
10938 : SecondFnType->getExtParameterInfos().size();
10939
10940 for (size_t I = 0; I < E; ++I) {
10941 FunctionProtoType::ExtParameterInfo FirstParam, SecondParam;
10942 if (FirstHasInfo)
10943 FirstParam = FirstFnType->getExtParameterInfo(I);
10944 if (SecondHasInfo)
10945 SecondParam = SecondFnType->getExtParameterInfo(I);
10946
10947 // Cannot merge unless everything except the noescape flag matches.
10948 if (FirstParam.withIsNoEscape(NoEscape: false) != SecondParam.withIsNoEscape(NoEscape: false))
10949 return false;
10950
10951 bool FirstNoEscape = FirstParam.isNoEscape();
10952 bool SecondNoEscape = SecondParam.isNoEscape();
10953 bool IsNoEscape = FirstNoEscape && SecondNoEscape;
10954 NewParamInfos.push_back(Elt: FirstParam.withIsNoEscape(NoEscape: IsNoEscape));
10955 if (NewParamInfos.back().getOpaqueValue())
10956 NeedParamInfo = true;
10957 if (FirstNoEscape != IsNoEscape)
10958 CanUseFirst = false;
10959 if (SecondNoEscape != IsNoEscape)
10960 CanUseSecond = false;
10961 }
10962
10963 if (!NeedParamInfo)
10964 NewParamInfos.clear();
10965
10966 return true;
10967}
10968
10969void ASTContext::ResetObjCLayout(const ObjCContainerDecl *CD) {
10970 ObjCLayouts[CD] = nullptr;
10971}
10972
10973/// mergeObjCGCQualifiers - This routine merges ObjC's GC attribute of 'LHS' and
10974/// 'RHS' attributes and returns the merged version; including for function
10975/// return types.
10976QualType ASTContext::mergeObjCGCQualifiers(QualType LHS, QualType RHS) {
10977 QualType LHSCan = getCanonicalType(T: LHS),
10978 RHSCan = getCanonicalType(T: RHS);
10979 // If two types are identical, they are compatible.
10980 if (LHSCan == RHSCan)
10981 return LHS;
10982 if (RHSCan->isFunctionType()) {
10983 if (!LHSCan->isFunctionType())
10984 return {};
10985 QualType OldReturnType =
10986 cast<FunctionType>(Val: RHSCan.getTypePtr())->getReturnType();
10987 QualType NewReturnType =
10988 cast<FunctionType>(Val: LHSCan.getTypePtr())->getReturnType();
10989 QualType ResReturnType =
10990 mergeObjCGCQualifiers(LHS: NewReturnType, RHS: OldReturnType);
10991 if (ResReturnType.isNull())
10992 return {};
10993 if (ResReturnType == NewReturnType || ResReturnType == OldReturnType) {
10994 // id foo(); ... __strong id foo(); or: __strong id foo(); ... id foo();
10995 // In either case, use OldReturnType to build the new function type.
10996 const auto *F = LHS->castAs<FunctionType>();
10997 if (const auto *FPT = cast<FunctionProtoType>(Val: F)) {
10998 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
10999 EPI.ExtInfo = getFunctionExtInfo(t: LHS);
11000 QualType ResultType =
11001 getFunctionType(ResultTy: OldReturnType, Args: FPT->getParamTypes(), EPI);
11002 return ResultType;
11003 }
11004 }
11005 return {};
11006 }
11007
11008 // If the qualifiers are different, the types can still be merged.
11009 Qualifiers LQuals = LHSCan.getLocalQualifiers();
11010 Qualifiers RQuals = RHSCan.getLocalQualifiers();
11011 if (LQuals != RQuals) {
11012 // If any of these qualifiers are different, we have a type mismatch.
11013 if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() ||
11014 LQuals.getAddressSpace() != RQuals.getAddressSpace())
11015 return {};
11016
11017 // Exactly one GC qualifier difference is allowed: __strong is
11018 // okay if the other type has no GC qualifier but is an Objective
11019 // C object pointer (i.e. implicitly strong by default). We fix
11020 // this by pretending that the unqualified type was actually
11021 // qualified __strong.
11022 Qualifiers::GC GC_L = LQuals.getObjCGCAttr();
11023 Qualifiers::GC GC_R = RQuals.getObjCGCAttr();
11024 assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements");
11025
11026 if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak)
11027 return {};
11028
11029 if (GC_L == Qualifiers::Strong)
11030 return LHS;
11031 if (GC_R == Qualifiers::Strong)
11032 return RHS;
11033 return {};
11034 }
11035
11036 if (LHSCan->isObjCObjectPointerType() && RHSCan->isObjCObjectPointerType()) {
11037 QualType LHSBaseQT = LHS->castAs<ObjCObjectPointerType>()->getPointeeType();
11038 QualType RHSBaseQT = RHS->castAs<ObjCObjectPointerType>()->getPointeeType();
11039 QualType ResQT = mergeObjCGCQualifiers(LHS: LHSBaseQT, RHS: RHSBaseQT);
11040 if (ResQT == LHSBaseQT)
11041 return LHS;
11042 if (ResQT == RHSBaseQT)
11043 return RHS;
11044 }
11045 return {};
11046}
11047
11048//===----------------------------------------------------------------------===//
11049// Integer Predicates
11050//===----------------------------------------------------------------------===//
11051
11052unsigned ASTContext::getIntWidth(QualType T) const {
11053 if (const auto *ET = T->getAs<EnumType>())
11054 T = ET->getDecl()->getIntegerType();
11055 if (T->isBooleanType())
11056 return 1;
11057 if (const auto *EIT = T->getAs<BitIntType>())
11058 return EIT->getNumBits();
11059 // For builtin types, just use the standard type sizing method
11060 return (unsigned)getTypeSize(T);
11061}
11062
11063QualType ASTContext::getCorrespondingUnsignedType(QualType T) const {
11064 assert((T->hasIntegerRepresentation() || T->isEnumeralType() ||
11065 T->isFixedPointType()) &&
11066 "Unexpected type");
11067
11068 // Turn <4 x signed int> -> <4 x unsigned int>
11069 if (const auto *VTy = T->getAs<VectorType>())
11070 return getVectorType(vecType: getCorrespondingUnsignedType(T: VTy->getElementType()),
11071 NumElts: VTy->getNumElements(), VecKind: VTy->getVectorKind());
11072
11073 // For _BitInt, return an unsigned _BitInt with same width.
11074 if (const auto *EITy = T->getAs<BitIntType>())
11075 return getBitIntType(/*Unsigned=*/IsUnsigned: true, NumBits: EITy->getNumBits());
11076
11077 // For enums, get the underlying integer type of the enum, and let the general
11078 // integer type signchanging code handle it.
11079 if (const auto *ETy = T->getAs<EnumType>())
11080 T = ETy->getDecl()->getIntegerType();
11081
11082 switch (T->castAs<BuiltinType>()->getKind()) {
11083 case BuiltinType::Char_U:
11084 // Plain `char` is mapped to `unsigned char` even if it's already unsigned
11085 case BuiltinType::Char_S:
11086 case BuiltinType::SChar:
11087 case BuiltinType::Char8:
11088 return UnsignedCharTy;
11089 case BuiltinType::Short:
11090 return UnsignedShortTy;
11091 case BuiltinType::Int:
11092 return UnsignedIntTy;
11093 case BuiltinType::Long:
11094 return UnsignedLongTy;
11095 case BuiltinType::LongLong:
11096 return UnsignedLongLongTy;
11097 case BuiltinType::Int128:
11098 return UnsignedInt128Ty;
11099 // wchar_t is special. It is either signed or not, but when it's signed,
11100 // there's no matching "unsigned wchar_t". Therefore we return the unsigned
11101 // version of its underlying type instead.
11102 case BuiltinType::WChar_S:
11103 return getUnsignedWCharType();
11104
11105 case BuiltinType::ShortAccum:
11106 return UnsignedShortAccumTy;
11107 case BuiltinType::Accum:
11108 return UnsignedAccumTy;
11109 case BuiltinType::LongAccum:
11110 return UnsignedLongAccumTy;
11111 case BuiltinType::SatShortAccum:
11112 return SatUnsignedShortAccumTy;
11113 case BuiltinType::SatAccum:
11114 return SatUnsignedAccumTy;
11115 case BuiltinType::SatLongAccum:
11116 return SatUnsignedLongAccumTy;
11117 case BuiltinType::ShortFract:
11118 return UnsignedShortFractTy;
11119 case BuiltinType::Fract:
11120 return UnsignedFractTy;
11121 case BuiltinType::LongFract:
11122 return UnsignedLongFractTy;
11123 case BuiltinType::SatShortFract:
11124 return SatUnsignedShortFractTy;
11125 case BuiltinType::SatFract:
11126 return SatUnsignedFractTy;
11127 case BuiltinType::SatLongFract:
11128 return SatUnsignedLongFractTy;
11129 default:
11130 assert((T->hasUnsignedIntegerRepresentation() ||
11131 T->isUnsignedFixedPointType()) &&
11132 "Unexpected signed integer or fixed point type");
11133 return T;
11134 }
11135}
11136
11137QualType ASTContext::getCorrespondingSignedType(QualType T) const {
11138 assert((T->hasIntegerRepresentation() || T->isEnumeralType() ||
11139 T->isFixedPointType()) &&
11140 "Unexpected type");
11141
11142 // Turn <4 x unsigned int> -> <4 x signed int>
11143 if (const auto *VTy = T->getAs<VectorType>())
11144 return getVectorType(vecType: getCorrespondingSignedType(T: VTy->getElementType()),
11145 NumElts: VTy->getNumElements(), VecKind: VTy->getVectorKind());
11146
11147 // For _BitInt, return a signed _BitInt with same width.
11148 if (const auto *EITy = T->getAs<BitIntType>())
11149 return getBitIntType(/*Unsigned=*/IsUnsigned: false, NumBits: EITy->getNumBits());
11150
11151 // For enums, get the underlying integer type of the enum, and let the general
11152 // integer type signchanging code handle it.
11153 if (const auto *ETy = T->getAs<EnumType>())
11154 T = ETy->getDecl()->getIntegerType();
11155
11156 switch (T->castAs<BuiltinType>()->getKind()) {
11157 case BuiltinType::Char_S:
11158 // Plain `char` is mapped to `signed char` even if it's already signed
11159 case BuiltinType::Char_U:
11160 case BuiltinType::UChar:
11161 case BuiltinType::Char8:
11162 return SignedCharTy;
11163 case BuiltinType::UShort:
11164 return ShortTy;
11165 case BuiltinType::UInt:
11166 return IntTy;
11167 case BuiltinType::ULong:
11168 return LongTy;
11169 case BuiltinType::ULongLong:
11170 return LongLongTy;
11171 case BuiltinType::UInt128:
11172 return Int128Ty;
11173 // wchar_t is special. It is either unsigned or not, but when it's unsigned,
11174 // there's no matching "signed wchar_t". Therefore we return the signed
11175 // version of its underlying type instead.
11176 case BuiltinType::WChar_U:
11177 return getSignedWCharType();
11178
11179 case BuiltinType::UShortAccum:
11180 return ShortAccumTy;
11181 case BuiltinType::UAccum:
11182 return AccumTy;
11183 case BuiltinType::ULongAccum:
11184 return LongAccumTy;
11185 case BuiltinType::SatUShortAccum:
11186 return SatShortAccumTy;
11187 case BuiltinType::SatUAccum:
11188 return SatAccumTy;
11189 case BuiltinType::SatULongAccum:
11190 return SatLongAccumTy;
11191 case BuiltinType::UShortFract:
11192 return ShortFractTy;
11193 case BuiltinType::UFract:
11194 return FractTy;
11195 case BuiltinType::ULongFract:
11196 return LongFractTy;
11197 case BuiltinType::SatUShortFract:
11198 return SatShortFractTy;
11199 case BuiltinType::SatUFract:
11200 return SatFractTy;
11201 case BuiltinType::SatULongFract:
11202 return SatLongFractTy;
11203 default:
11204 assert(
11205 (T->hasSignedIntegerRepresentation() || T->isSignedFixedPointType()) &&
11206 "Unexpected signed integer or fixed point type");
11207 return T;
11208 }
11209}
11210
11211ASTMutationListener::~ASTMutationListener() = default;
11212
11213void ASTMutationListener::DeducedReturnType(const FunctionDecl *FD,
11214 QualType ReturnType) {}
11215
11216//===----------------------------------------------------------------------===//
11217// Builtin Type Computation
11218//===----------------------------------------------------------------------===//
11219
11220/// DecodeTypeFromStr - This decodes one type descriptor from Str, advancing the
11221/// pointer over the consumed characters. This returns the resultant type. If
11222/// AllowTypeModifiers is false then modifier like * are not parsed, just basic
11223/// types. This allows "v2i*" to be parsed as a pointer to a v2i instead of
11224/// a vector of "i*".
11225///
11226/// RequiresICE is filled in on return to indicate whether the value is required
11227/// to be an Integer Constant Expression.
11228static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context,
11229 ASTContext::GetBuiltinTypeError &Error,
11230 bool &RequiresICE,
11231 bool AllowTypeModifiers) {
11232 // Modifiers.
11233 int HowLong = 0;
11234 bool Signed = false, Unsigned = false;
11235 RequiresICE = false;
11236
11237 // Read the prefixed modifiers first.
11238 bool Done = false;
11239 #ifndef NDEBUG
11240 bool IsSpecial = false;
11241 #endif
11242 while (!Done) {
11243 switch (*Str++) {
11244 default: Done = true; --Str; break;
11245 case 'I':
11246 RequiresICE = true;
11247 break;
11248 case 'S':
11249 assert(!Unsigned && "Can't use both 'S' and 'U' modifiers!");
11250 assert(!Signed && "Can't use 'S' modifier multiple times!");
11251 Signed = true;
11252 break;
11253 case 'U':
11254 assert(!Signed && "Can't use both 'S' and 'U' modifiers!");
11255 assert(!Unsigned && "Can't use 'U' modifier multiple times!");
11256 Unsigned = true;
11257 break;
11258 case 'L':
11259 assert(!IsSpecial && "Can't use 'L' with 'W', 'N', 'Z' or 'O' modifiers");
11260 assert(HowLong <= 2 && "Can't have LLLL modifier");
11261 ++HowLong;
11262 break;
11263 case 'N':
11264 // 'N' behaves like 'L' for all non LP64 targets and 'int' otherwise.
11265 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!");
11266 assert(HowLong == 0 && "Can't use both 'L' and 'N' modifiers!");
11267 #ifndef NDEBUG
11268 IsSpecial = true;
11269 #endif
11270 if (Context.getTargetInfo().getLongWidth() == 32)
11271 ++HowLong;
11272 break;
11273 case 'W':
11274 // This modifier represents int64 type.
11275 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!");
11276 assert(HowLong == 0 && "Can't use both 'L' and 'W' modifiers!");
11277 #ifndef NDEBUG
11278 IsSpecial = true;
11279 #endif
11280 switch (Context.getTargetInfo().getInt64Type()) {
11281 default:
11282 llvm_unreachable("Unexpected integer type");
11283 case TargetInfo::SignedLong:
11284 HowLong = 1;
11285 break;
11286 case TargetInfo::SignedLongLong:
11287 HowLong = 2;
11288 break;
11289 }
11290 break;
11291 case 'Z':
11292 // This modifier represents int32 type.
11293 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!");
11294 assert(HowLong == 0 && "Can't use both 'L' and 'Z' modifiers!");
11295 #ifndef NDEBUG
11296 IsSpecial = true;
11297 #endif
11298 switch (Context.getTargetInfo().getIntTypeByWidth(BitWidth: 32, IsSigned: true)) {
11299 default:
11300 llvm_unreachable("Unexpected integer type");
11301 case TargetInfo::SignedInt:
11302 HowLong = 0;
11303 break;
11304 case TargetInfo::SignedLong:
11305 HowLong = 1;
11306 break;
11307 case TargetInfo::SignedLongLong:
11308 HowLong = 2;
11309 break;
11310 }
11311 break;
11312 case 'O':
11313 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!");
11314 assert(HowLong == 0 && "Can't use both 'L' and 'O' modifiers!");
11315 #ifndef NDEBUG
11316 IsSpecial = true;
11317 #endif
11318 if (Context.getLangOpts().OpenCL)
11319 HowLong = 1;
11320 else
11321 HowLong = 2;
11322 break;
11323 }
11324 }
11325
11326 QualType Type;
11327
11328 // Read the base type.
11329 switch (*Str++) {
11330 default: llvm_unreachable("Unknown builtin type letter!");
11331 case 'x':
11332 assert(HowLong == 0 && !Signed && !Unsigned &&
11333 "Bad modifiers used with 'x'!");
11334 Type = Context.Float16Ty;
11335 break;
11336 case 'y':
11337 assert(HowLong == 0 && !Signed && !Unsigned &&
11338 "Bad modifiers used with 'y'!");
11339 Type = Context.BFloat16Ty;
11340 break;
11341 case 'v':
11342 assert(HowLong == 0 && !Signed && !Unsigned &&
11343 "Bad modifiers used with 'v'!");
11344 Type = Context.VoidTy;
11345 break;
11346 case 'h':
11347 assert(HowLong == 0 && !Signed && !Unsigned &&
11348 "Bad modifiers used with 'h'!");
11349 Type = Context.HalfTy;
11350 break;
11351 case 'f':
11352 assert(HowLong == 0 && !Signed && !Unsigned &&
11353 "Bad modifiers used with 'f'!");
11354 Type = Context.FloatTy;
11355 break;
11356 case 'd':
11357 assert(HowLong < 3 && !Signed && !Unsigned &&
11358 "Bad modifiers used with 'd'!");
11359 if (HowLong == 1)
11360 Type = Context.LongDoubleTy;
11361 else if (HowLong == 2)
11362 Type = Context.Float128Ty;
11363 else
11364 Type = Context.DoubleTy;
11365 break;
11366 case 's':
11367 assert(HowLong == 0 && "Bad modifiers used with 's'!");
11368 if (Unsigned)
11369 Type = Context.UnsignedShortTy;
11370 else
11371 Type = Context.ShortTy;
11372 break;
11373 case 'i':
11374 if (HowLong == 3)
11375 Type = Unsigned ? Context.UnsignedInt128Ty : Context.Int128Ty;
11376 else if (HowLong == 2)
11377 Type = Unsigned ? Context.UnsignedLongLongTy : Context.LongLongTy;
11378 else if (HowLong == 1)
11379 Type = Unsigned ? Context.UnsignedLongTy : Context.LongTy;
11380 else
11381 Type = Unsigned ? Context.UnsignedIntTy : Context.IntTy;
11382 break;
11383 case 'c':
11384 assert(HowLong == 0 && "Bad modifiers used with 'c'!");
11385 if (Signed)
11386 Type = Context.SignedCharTy;
11387 else if (Unsigned)
11388 Type = Context.UnsignedCharTy;
11389 else
11390 Type = Context.CharTy;
11391 break;
11392 case 'b': // boolean
11393 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'b'!");
11394 Type = Context.BoolTy;
11395 break;
11396 case 'z': // size_t.
11397 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'z'!");
11398 Type = Context.getSizeType();
11399 break;
11400 case 'w': // wchar_t.
11401 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'w'!");
11402 Type = Context.getWideCharType();
11403 break;
11404 case 'F':
11405 Type = Context.getCFConstantStringType();
11406 break;
11407 case 'G':
11408 Type = Context.getObjCIdType();
11409 break;
11410 case 'H':
11411 Type = Context.getObjCSelType();
11412 break;
11413 case 'M':
11414 Type = Context.getObjCSuperType();
11415 break;
11416 case 'a':
11417 Type = Context.getBuiltinVaListType();
11418 assert(!Type.isNull() && "builtin va list type not initialized!");
11419 break;
11420 case 'A':
11421 // This is a "reference" to a va_list; however, what exactly
11422 // this means depends on how va_list is defined. There are two
11423 // different kinds of va_list: ones passed by value, and ones
11424 // passed by reference. An example of a by-value va_list is
11425 // x86, where va_list is a char*. An example of by-ref va_list
11426 // is x86-64, where va_list is a __va_list_tag[1]. For x86,
11427 // we want this argument to be a char*&; for x86-64, we want
11428 // it to be a __va_list_tag*.
11429 Type = Context.getBuiltinVaListType();
11430 assert(!Type.isNull() && "builtin va list type not initialized!");
11431 if (Type->isArrayType())
11432 Type = Context.getArrayDecayedType(Ty: Type);
11433 else
11434 Type = Context.getLValueReferenceType(T: Type);
11435 break;
11436 case 'q': {
11437 char *End;
11438 unsigned NumElements = strtoul(nptr: Str, endptr: &End, base: 10);
11439 assert(End != Str && "Missing vector size");
11440 Str = End;
11441
11442 QualType ElementType = DecodeTypeFromStr(Str, Context, Error,
11443 RequiresICE, AllowTypeModifiers: false);
11444 assert(!RequiresICE && "Can't require vector ICE");
11445
11446 Type = Context.getScalableVectorType(EltTy: ElementType, NumElts: NumElements);
11447 break;
11448 }
11449 case 'Q': {
11450 switch (*Str++) {
11451 case 'a': {
11452 Type = Context.SveCountTy;
11453 break;
11454 }
11455 default:
11456 llvm_unreachable("Unexpected target builtin type");
11457 }
11458 break;
11459 }
11460 case 'V': {
11461 char *End;
11462 unsigned NumElements = strtoul(nptr: Str, endptr: &End, base: 10);
11463 assert(End != Str && "Missing vector size");
11464 Str = End;
11465
11466 QualType ElementType = DecodeTypeFromStr(Str, Context, Error,
11467 RequiresICE, AllowTypeModifiers: false);
11468 assert(!RequiresICE && "Can't require vector ICE");
11469
11470 // TODO: No way to make AltiVec vectors in builtins yet.
11471 Type = Context.getVectorType(vecType: ElementType, NumElts: NumElements, VecKind: VectorKind::Generic);
11472 break;
11473 }
11474 case 'E': {
11475 char *End;
11476
11477 unsigned NumElements = strtoul(nptr: Str, endptr: &End, base: 10);
11478 assert(End != Str && "Missing vector size");
11479
11480 Str = End;
11481
11482 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE,
11483 AllowTypeModifiers: false);
11484 Type = Context.getExtVectorType(vecType: ElementType, NumElts: NumElements);
11485 break;
11486 }
11487 case 'X': {
11488 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE,
11489 AllowTypeModifiers: false);
11490 assert(!RequiresICE && "Can't require complex ICE");
11491 Type = Context.getComplexType(T: ElementType);
11492 break;
11493 }
11494 case 'Y':
11495 Type = Context.getPointerDiffType();
11496 break;
11497 case 'P':
11498 Type = Context.getFILEType();
11499 if (Type.isNull()) {
11500 Error = ASTContext::GE_Missing_stdio;
11501 return {};
11502 }
11503 break;
11504 case 'J':
11505 if (Signed)
11506 Type = Context.getsigjmp_bufType();
11507 else
11508 Type = Context.getjmp_bufType();
11509
11510 if (Type.isNull()) {
11511 Error = ASTContext::GE_Missing_setjmp;
11512 return {};
11513 }
11514 break;
11515 case 'K':
11516 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'K'!");
11517 Type = Context.getucontext_tType();
11518
11519 if (Type.isNull()) {
11520 Error = ASTContext::GE_Missing_ucontext;
11521 return {};
11522 }
11523 break;
11524 case 'p':
11525 Type = Context.getProcessIDType();
11526 break;
11527 }
11528
11529 // If there are modifiers and if we're allowed to parse them, go for it.
11530 Done = !AllowTypeModifiers;
11531 while (!Done) {
11532 switch (char c = *Str++) {
11533 default: Done = true; --Str; break;
11534 case '*':
11535 case '&': {
11536 // Both pointers and references can have their pointee types
11537 // qualified with an address space.
11538 char *End;
11539 unsigned AddrSpace = strtoul(nptr: Str, endptr: &End, base: 10);
11540 if (End != Str) {
11541 // Note AddrSpace == 0 is not the same as an unspecified address space.
11542 Type = Context.getAddrSpaceQualType(
11543 T: Type,
11544 AddressSpace: Context.getLangASForBuiltinAddressSpace(AS: AddrSpace));
11545 Str = End;
11546 }
11547 if (c == '*')
11548 Type = Context.getPointerType(T: Type);
11549 else
11550 Type = Context.getLValueReferenceType(T: Type);
11551 break;
11552 }
11553 // FIXME: There's no way to have a built-in with an rvalue ref arg.
11554 case 'C':
11555 Type = Type.withConst();
11556 break;
11557 case 'D':
11558 Type = Context.getVolatileType(T: Type);
11559 break;
11560 case 'R':
11561 Type = Type.withRestrict();
11562 break;
11563 }
11564 }
11565
11566 assert((!RequiresICE || Type->isIntegralOrEnumerationType()) &&
11567 "Integer constant 'I' type must be an integer");
11568
11569 return Type;
11570}
11571
11572// On some targets such as PowerPC, some of the builtins are defined with custom
11573// type descriptors for target-dependent types. These descriptors are decoded in
11574// other functions, but it may be useful to be able to fall back to default
11575// descriptor decoding to define builtins mixing target-dependent and target-
11576// independent types. This function allows decoding one type descriptor with
11577// default decoding.
11578QualType ASTContext::DecodeTypeStr(const char *&Str, const ASTContext &Context,
11579 GetBuiltinTypeError &Error, bool &RequireICE,
11580 bool AllowTypeModifiers) const {
11581 return DecodeTypeFromStr(Str, Context, Error, RequiresICE&: RequireICE, AllowTypeModifiers);
11582}
11583
11584/// GetBuiltinType - Return the type for the specified builtin.
11585QualType ASTContext::GetBuiltinType(unsigned Id,
11586 GetBuiltinTypeError &Error,
11587 unsigned *IntegerConstantArgs) const {
11588 const char *TypeStr = BuiltinInfo.getTypeString(ID: Id);
11589 if (TypeStr[0] == '\0') {
11590 Error = GE_Missing_type;
11591 return {};
11592 }
11593
11594 SmallVector<QualType, 8> ArgTypes;
11595
11596 bool RequiresICE = false;
11597 Error = GE_None;
11598 QualType ResType = DecodeTypeFromStr(Str&: TypeStr, Context: *this, Error,
11599 RequiresICE, AllowTypeModifiers: true);
11600 if (Error != GE_None)
11601 return {};
11602
11603 assert(!RequiresICE && "Result of intrinsic cannot be required to be an ICE");
11604
11605 while (TypeStr[0] && TypeStr[0] != '.') {
11606 QualType Ty = DecodeTypeFromStr(Str&: TypeStr, Context: *this, Error, RequiresICE, AllowTypeModifiers: true);
11607 if (Error != GE_None)
11608 return {};
11609
11610 // If this argument is required to be an IntegerConstantExpression and the
11611 // caller cares, fill in the bitmask we return.
11612 if (RequiresICE && IntegerConstantArgs)
11613 *IntegerConstantArgs |= 1 << ArgTypes.size();
11614
11615 // Do array -> pointer decay. The builtin should use the decayed type.
11616 if (Ty->isArrayType())
11617 Ty = getArrayDecayedType(Ty);
11618
11619 ArgTypes.push_back(Elt: Ty);
11620 }
11621
11622 if (Id == Builtin::BI__GetExceptionInfo)
11623 return {};
11624
11625 assert((TypeStr[0] != '.' || TypeStr[1] == 0) &&
11626 "'.' should only occur at end of builtin type list!");
11627
11628 bool Variadic = (TypeStr[0] == '.');
11629
11630 FunctionType::ExtInfo EI(getDefaultCallingConvention(
11631 IsVariadic: Variadic, /*IsCXXMethod=*/false, /*IsBuiltin=*/true));
11632 if (BuiltinInfo.isNoReturn(ID: Id)) EI = EI.withNoReturn(noReturn: true);
11633
11634
11635 // We really shouldn't be making a no-proto type here.
11636 if (ArgTypes.empty() && Variadic && !getLangOpts().requiresStrictPrototypes())
11637 return getFunctionNoProtoType(ResultTy: ResType, Info: EI);
11638
11639 FunctionProtoType::ExtProtoInfo EPI;
11640 EPI.ExtInfo = EI;
11641 EPI.Variadic = Variadic;
11642 if (getLangOpts().CPlusPlus && BuiltinInfo.isNoThrow(ID: Id))
11643 EPI.ExceptionSpec.Type =
11644 getLangOpts().CPlusPlus11 ? EST_BasicNoexcept : EST_DynamicNone;
11645
11646 return getFunctionType(ResultTy: ResType, Args: ArgTypes, EPI);
11647}
11648
11649static GVALinkage basicGVALinkageForFunction(const ASTContext &Context,
11650 const FunctionDecl *FD) {
11651 if (!FD->isExternallyVisible())
11652 return GVA_Internal;
11653
11654 // Non-user-provided functions get emitted as weak definitions with every
11655 // use, no matter whether they've been explicitly instantiated etc.
11656 if (!FD->isUserProvided())
11657 return GVA_DiscardableODR;
11658
11659 GVALinkage External;
11660 switch (FD->getTemplateSpecializationKind()) {
11661 case TSK_Undeclared:
11662 case TSK_ExplicitSpecialization:
11663 External = GVA_StrongExternal;
11664 break;
11665
11666 case TSK_ExplicitInstantiationDefinition:
11667 return GVA_StrongODR;
11668
11669 // C++11 [temp.explicit]p10:
11670 // [ Note: The intent is that an inline function that is the subject of
11671 // an explicit instantiation declaration will still be implicitly
11672 // instantiated when used so that the body can be considered for
11673 // inlining, but that no out-of-line copy of the inline function would be
11674 // generated in the translation unit. -- end note ]
11675 case TSK_ExplicitInstantiationDeclaration:
11676 return GVA_AvailableExternally;
11677
11678 case TSK_ImplicitInstantiation:
11679 External = GVA_DiscardableODR;
11680 break;
11681 }
11682
11683 if (!FD->isInlined())
11684 return External;
11685
11686 if ((!Context.getLangOpts().CPlusPlus &&
11687 !Context.getTargetInfo().getCXXABI().isMicrosoft() &&
11688 !FD->hasAttr<DLLExportAttr>()) ||
11689 FD->hasAttr<GNUInlineAttr>()) {
11690 // FIXME: This doesn't match gcc's behavior for dllexport inline functions.
11691
11692 // GNU or C99 inline semantics. Determine whether this symbol should be
11693 // externally visible.
11694 if (FD->isInlineDefinitionExternallyVisible())
11695 return External;
11696
11697 // C99 inline semantics, where the symbol is not externally visible.
11698 return GVA_AvailableExternally;
11699 }
11700
11701 // Functions specified with extern and inline in -fms-compatibility mode
11702 // forcibly get emitted. While the body of the function cannot be later
11703 // replaced, the function definition cannot be discarded.
11704 if (FD->isMSExternInline())
11705 return GVA_StrongODR;
11706
11707 if (Context.getTargetInfo().getCXXABI().isMicrosoft() &&
11708 isa<CXXConstructorDecl>(Val: FD) &&
11709 cast<CXXConstructorDecl>(Val: FD)->isInheritingConstructor())
11710 // Our approach to inheriting constructors is fundamentally different from
11711 // that used by the MS ABI, so keep our inheriting constructor thunks
11712 // internal rather than trying to pick an unambiguous mangling for them.
11713 return GVA_Internal;
11714
11715 return GVA_DiscardableODR;
11716}
11717
11718static GVALinkage adjustGVALinkageForAttributes(const ASTContext &Context,
11719 const Decl *D, GVALinkage L) {
11720 // See http://msdn.microsoft.com/en-us/library/xa0d9ste.aspx
11721 // dllexport/dllimport on inline functions.
11722 if (D->hasAttr<DLLImportAttr>()) {
11723 if (L == GVA_DiscardableODR || L == GVA_StrongODR)
11724 return GVA_AvailableExternally;
11725 } else if (D->hasAttr<DLLExportAttr>()) {
11726 if (L == GVA_DiscardableODR)
11727 return GVA_StrongODR;
11728 } else if (Context.getLangOpts().CUDA && Context.getLangOpts().CUDAIsDevice) {
11729 // Device-side functions with __global__ attribute must always be
11730 // visible externally so they can be launched from host.
11731 if (D->hasAttr<CUDAGlobalAttr>() &&
11732 (L == GVA_DiscardableODR || L == GVA_Internal))
11733 return GVA_StrongODR;
11734 // Single source offloading languages like CUDA/HIP need to be able to
11735 // access static device variables from host code of the same compilation
11736 // unit. This is done by externalizing the static variable with a shared
11737 // name between the host and device compilation which is the same for the
11738 // same compilation unit whereas different among different compilation
11739 // units.
11740 if (Context.shouldExternalize(D))
11741 return GVA_StrongExternal;
11742 }
11743 return L;
11744}
11745
11746/// Adjust the GVALinkage for a declaration based on what an external AST source
11747/// knows about whether there can be other definitions of this declaration.
11748static GVALinkage
11749adjustGVALinkageForExternalDefinitionKind(const ASTContext &Ctx, const Decl *D,
11750 GVALinkage L) {
11751 ExternalASTSource *Source = Ctx.getExternalSource();
11752 if (!Source)
11753 return L;
11754
11755 switch (Source->hasExternalDefinitions(D)) {
11756 case ExternalASTSource::EK_Never:
11757 // Other translation units rely on us to provide the definition.
11758 if (L == GVA_DiscardableODR)
11759 return GVA_StrongODR;
11760 break;
11761
11762 case ExternalASTSource::EK_Always:
11763 return GVA_AvailableExternally;
11764
11765 case ExternalASTSource::EK_ReplyHazy:
11766 break;
11767 }
11768 return L;
11769}
11770
11771GVALinkage ASTContext::GetGVALinkageForFunction(const FunctionDecl *FD) const {
11772 return adjustGVALinkageForExternalDefinitionKind(*this, FD,
11773 adjustGVALinkageForAttributes(*this, FD,
11774 basicGVALinkageForFunction(Context: *this, FD)));
11775}
11776
11777static GVALinkage basicGVALinkageForVariable(const ASTContext &Context,
11778 const VarDecl *VD) {
11779 // As an extension for interactive REPLs, make sure constant variables are
11780 // only emitted once instead of LinkageComputer::getLVForNamespaceScopeDecl
11781 // marking them as internal.
11782 if (Context.getLangOpts().CPlusPlus &&
11783 Context.getLangOpts().IncrementalExtensions &&
11784 VD->getType().isConstQualified() &&
11785 !VD->getType().isVolatileQualified() && !VD->isInline() &&
11786 !isa<VarTemplateSpecializationDecl>(Val: VD) && !VD->getDescribedVarTemplate())
11787 return GVA_DiscardableODR;
11788
11789 if (!VD->isExternallyVisible())
11790 return GVA_Internal;
11791
11792 if (VD->isStaticLocal()) {
11793 const DeclContext *LexicalContext = VD->getParentFunctionOrMethod();
11794 while (LexicalContext && !isa<FunctionDecl>(Val: LexicalContext))
11795 LexicalContext = LexicalContext->getLexicalParent();
11796
11797 // ObjC Blocks can create local variables that don't have a FunctionDecl
11798 // LexicalContext.
11799 if (!LexicalContext)
11800 return GVA_DiscardableODR;
11801
11802 // Otherwise, let the static local variable inherit its linkage from the
11803 // nearest enclosing function.
11804 auto StaticLocalLinkage =
11805 Context.GetGVALinkageForFunction(FD: cast<FunctionDecl>(Val: LexicalContext));
11806
11807 // Itanium ABI 5.2.2: "Each COMDAT group [for a static local variable] must
11808 // be emitted in any object with references to the symbol for the object it
11809 // contains, whether inline or out-of-line."
11810 // Similar behavior is observed with MSVC. An alternative ABI could use
11811 // StrongODR/AvailableExternally to match the function, but none are
11812 // known/supported currently.
11813 if (StaticLocalLinkage == GVA_StrongODR ||
11814 StaticLocalLinkage == GVA_AvailableExternally)
11815 return GVA_DiscardableODR;
11816 return StaticLocalLinkage;
11817 }
11818
11819 // MSVC treats in-class initialized static data members as definitions.
11820 // By giving them non-strong linkage, out-of-line definitions won't
11821 // cause link errors.
11822 if (Context.isMSStaticDataMemberInlineDefinition(VD))
11823 return GVA_DiscardableODR;
11824
11825 // Most non-template variables have strong linkage; inline variables are
11826 // linkonce_odr or (occasionally, for compatibility) weak_odr.
11827 GVALinkage StrongLinkage;
11828 switch (Context.getInlineVariableDefinitionKind(VD)) {
11829 case ASTContext::InlineVariableDefinitionKind::None:
11830 StrongLinkage = GVA_StrongExternal;
11831 break;
11832 case ASTContext::InlineVariableDefinitionKind::Weak:
11833 case ASTContext::InlineVariableDefinitionKind::WeakUnknown:
11834 StrongLinkage = GVA_DiscardableODR;
11835 break;
11836 case ASTContext::InlineVariableDefinitionKind::Strong:
11837 StrongLinkage = GVA_StrongODR;
11838 break;
11839 }
11840
11841 switch (VD->getTemplateSpecializationKind()) {
11842 case TSK_Undeclared:
11843 return StrongLinkage;
11844
11845 case TSK_ExplicitSpecialization:
11846 return Context.getTargetInfo().getCXXABI().isMicrosoft() &&
11847 VD->isStaticDataMember()
11848 ? GVA_StrongODR
11849 : StrongLinkage;
11850
11851 case TSK_ExplicitInstantiationDefinition:
11852 return GVA_StrongODR;
11853
11854 case TSK_ExplicitInstantiationDeclaration:
11855 return GVA_AvailableExternally;
11856
11857 case TSK_ImplicitInstantiation:
11858 return GVA_DiscardableODR;
11859 }
11860
11861 llvm_unreachable("Invalid Linkage!");
11862}
11863
11864GVALinkage ASTContext::GetGVALinkageForVariable(const VarDecl *VD) const {
11865 return adjustGVALinkageForExternalDefinitionKind(*this, VD,
11866 adjustGVALinkageForAttributes(*this, VD,
11867 basicGVALinkageForVariable(Context: *this, VD)));
11868}
11869
11870bool ASTContext::DeclMustBeEmitted(const Decl *D) {
11871 if (const auto *VD = dyn_cast<VarDecl>(Val: D)) {
11872 if (!VD->isFileVarDecl())
11873 return false;
11874 // Global named register variables (GNU extension) are never emitted.
11875 if (VD->getStorageClass() == SC_Register)
11876 return false;
11877 if (VD->getDescribedVarTemplate() ||
11878 isa<VarTemplatePartialSpecializationDecl>(Val: VD))
11879 return false;
11880 } else if (const auto *FD = dyn_cast<FunctionDecl>(Val: D)) {
11881 // We never need to emit an uninstantiated function template.
11882 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate)
11883 return false;
11884 } else if (isa<PragmaCommentDecl>(Val: D))
11885 return true;
11886 else if (isa<PragmaDetectMismatchDecl>(Val: D))
11887 return true;
11888 else if (isa<OMPRequiresDecl>(Val: D))
11889 return true;
11890 else if (isa<OMPThreadPrivateDecl>(Val: D))
11891 return !D->getDeclContext()->isDependentContext();
11892 else if (isa<OMPAllocateDecl>(Val: D))
11893 return !D->getDeclContext()->isDependentContext();
11894 else if (isa<OMPDeclareReductionDecl>(Val: D) || isa<OMPDeclareMapperDecl>(Val: D))
11895 return !D->getDeclContext()->isDependentContext();
11896 else if (isa<ImportDecl>(Val: D))
11897 return true;
11898 else
11899 return false;
11900
11901 // If this is a member of a class template, we do not need to emit it.
11902 if (D->getDeclContext()->isDependentContext())
11903 return false;
11904
11905 // Weak references don't produce any output by themselves.
11906 if (D->hasAttr<WeakRefAttr>())
11907 return false;
11908
11909 // Aliases and used decls are required.
11910 if (D->hasAttr<AliasAttr>() || D->hasAttr<UsedAttr>())
11911 return true;
11912
11913 if (const auto *FD = dyn_cast<FunctionDecl>(Val: D)) {
11914 // Forward declarations aren't required.
11915 if (!FD->doesThisDeclarationHaveABody())
11916 return FD->doesDeclarationForceExternallyVisibleDefinition();
11917
11918 // Constructors and destructors are required.
11919 if (FD->hasAttr<ConstructorAttr>() || FD->hasAttr<DestructorAttr>())
11920 return true;
11921
11922 // The key function for a class is required. This rule only comes
11923 // into play when inline functions can be key functions, though.
11924 if (getTargetInfo().getCXXABI().canKeyFunctionBeInline()) {
11925 if (const auto *MD = dyn_cast<CXXMethodDecl>(Val: FD)) {
11926 const CXXRecordDecl *RD = MD->getParent();
11927 if (MD->isOutOfLine() && RD->isDynamicClass()) {
11928 const CXXMethodDecl *KeyFunc = getCurrentKeyFunction(RD);
11929 if (KeyFunc && KeyFunc->getCanonicalDecl() == MD->getCanonicalDecl())
11930 return true;
11931 }
11932 }
11933 }
11934
11935 GVALinkage Linkage = GetGVALinkageForFunction(FD);
11936
11937 // static, static inline, always_inline, and extern inline functions can
11938 // always be deferred. Normal inline functions can be deferred in C99/C++.
11939 // Implicit template instantiations can also be deferred in C++.
11940 return !isDiscardableGVALinkage(L: Linkage);
11941 }
11942
11943 const auto *VD = cast<VarDecl>(Val: D);
11944 assert(VD->isFileVarDecl() && "Expected file scoped var");
11945
11946 // If the decl is marked as `declare target to`, it should be emitted for the
11947 // host and for the device.
11948 if (LangOpts.OpenMP &&
11949 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
11950 return true;
11951
11952 if (VD->isThisDeclarationADefinition() == VarDecl::DeclarationOnly &&
11953 !isMSStaticDataMemberInlineDefinition(VD))
11954 return false;
11955
11956 // Variables in other module units shouldn't be forced to be emitted.
11957 if (VD->isInAnotherModuleUnit())
11958 return false;
11959
11960 // Variables that can be needed in other TUs are required.
11961 auto Linkage = GetGVALinkageForVariable(VD);
11962 if (!isDiscardableGVALinkage(L: Linkage))
11963 return true;
11964
11965 // We never need to emit a variable that is available in another TU.
11966 if (Linkage == GVA_AvailableExternally)
11967 return false;
11968
11969 // Variables that have destruction with side-effects are required.
11970 if (VD->needsDestruction(Ctx: *this))
11971 return true;
11972
11973 // Variables that have initialization with side-effects are required.
11974 if (VD->getInit() && VD->getInit()->HasSideEffects(Ctx: *this) &&
11975 // We can get a value-dependent initializer during error recovery.
11976 (VD->getInit()->isValueDependent() || !VD->evaluateValue()))
11977 return true;
11978
11979 // Likewise, variables with tuple-like bindings are required if their
11980 // bindings have side-effects.
11981 if (const auto *DD = dyn_cast<DecompositionDecl>(Val: VD))
11982 for (const auto *BD : DD->bindings())
11983 if (const auto *BindingVD = BD->getHoldingVar())
11984 if (DeclMustBeEmitted(BindingVD))
11985 return true;
11986
11987 return false;
11988}
11989
11990void ASTContext::forEachMultiversionedFunctionVersion(
11991 const FunctionDecl *FD,
11992 llvm::function_ref<void(FunctionDecl *)> Pred) const {
11993 assert(FD->isMultiVersion() && "Only valid for multiversioned functions");
11994 llvm::SmallDenseSet<const FunctionDecl*, 4> SeenDecls;
11995 FD = FD->getMostRecentDecl();
11996 // FIXME: The order of traversal here matters and depends on the order of
11997 // lookup results, which happens to be (mostly) oldest-to-newest, but we
11998 // shouldn't rely on that.
11999 for (auto *CurDecl :
12000 FD->getDeclContext()->getRedeclContext()->lookup(FD->getDeclName())) {
12001 FunctionDecl *CurFD = CurDecl->getAsFunction()->getMostRecentDecl();
12002 if (CurFD && hasSameType(CurFD->getType(), FD->getType()) &&
12003 !SeenDecls.contains(CurFD)) {
12004 SeenDecls.insert(CurFD);
12005 Pred(CurFD);
12006 }
12007 }
12008}
12009
12010CallingConv ASTContext::getDefaultCallingConvention(bool IsVariadic,
12011 bool IsCXXMethod,
12012 bool IsBuiltin) const {
12013 // Pass through to the C++ ABI object
12014 if (IsCXXMethod)
12015 return ABI->getDefaultMethodCallConv(isVariadic: IsVariadic);
12016
12017 // Builtins ignore user-specified default calling convention and remain the
12018 // Target's default calling convention.
12019 if (!IsBuiltin) {
12020 switch (LangOpts.getDefaultCallingConv()) {
12021 case LangOptions::DCC_None:
12022 break;
12023 case LangOptions::DCC_CDecl:
12024 return CC_C;
12025 case LangOptions::DCC_FastCall:
12026 if (getTargetInfo().hasFeature(Feature: "sse2") && !IsVariadic)
12027 return CC_X86FastCall;
12028 break;
12029 case LangOptions::DCC_StdCall:
12030 if (!IsVariadic)
12031 return CC_X86StdCall;
12032 break;
12033 case LangOptions::DCC_VectorCall:
12034 // __vectorcall cannot be applied to variadic functions.
12035 if (!IsVariadic)
12036 return CC_X86VectorCall;
12037 break;
12038 case LangOptions::DCC_RegCall:
12039 // __regcall cannot be applied to variadic functions.
12040 if (!IsVariadic)
12041 return CC_X86RegCall;
12042 break;
12043 case LangOptions::DCC_RtdCall:
12044 if (!IsVariadic)
12045 return CC_M68kRTD;
12046 break;
12047 }
12048 }
12049 return Target->getDefaultCallingConv();
12050}
12051
12052bool ASTContext::isNearlyEmpty(const CXXRecordDecl *RD) const {
12053 // Pass through to the C++ ABI object
12054 return ABI->isNearlyEmpty(RD);
12055}
12056
12057VTableContextBase *ASTContext::getVTableContext() {
12058 if (!VTContext.get()) {
12059 auto ABI = Target->getCXXABI();
12060 if (ABI.isMicrosoft())
12061 VTContext.reset(p: new MicrosoftVTableContext(*this));
12062 else {
12063 auto ComponentLayout = getLangOpts().RelativeCXXABIVTables
12064 ? ItaniumVTableContext::Relative
12065 : ItaniumVTableContext::Pointer;
12066 VTContext.reset(p: new ItaniumVTableContext(*this, ComponentLayout));
12067 }
12068 }
12069 return VTContext.get();
12070}
12071
12072MangleContext *ASTContext::createMangleContext(const TargetInfo *T) {
12073 if (!T)
12074 T = Target;
12075 switch (T->getCXXABI().getKind()) {
12076 case TargetCXXABI::AppleARM64:
12077 case TargetCXXABI::Fuchsia:
12078 case TargetCXXABI::GenericAArch64:
12079 case TargetCXXABI::GenericItanium:
12080 case TargetCXXABI::GenericARM:
12081 case TargetCXXABI::GenericMIPS:
12082 case TargetCXXABI::iOS:
12083 case TargetCXXABI::WebAssembly:
12084 case TargetCXXABI::WatchOS:
12085 case TargetCXXABI::XL:
12086 return ItaniumMangleContext::create(Context&: *this, Diags&: getDiagnostics());
12087 case TargetCXXABI::Microsoft:
12088 return MicrosoftMangleContext::create(Context&: *this, Diags&: getDiagnostics());
12089 }
12090 llvm_unreachable("Unsupported ABI");
12091}
12092
12093MangleContext *ASTContext::createDeviceMangleContext(const TargetInfo &T) {
12094 assert(T.getCXXABI().getKind() != TargetCXXABI::Microsoft &&
12095 "Device mangle context does not support Microsoft mangling.");
12096 switch (T.getCXXABI().getKind()) {
12097 case TargetCXXABI::AppleARM64:
12098 case TargetCXXABI::Fuchsia:
12099 case TargetCXXABI::GenericAArch64:
12100 case TargetCXXABI::GenericItanium:
12101 case TargetCXXABI::GenericARM:
12102 case TargetCXXABI::GenericMIPS:
12103 case TargetCXXABI::iOS:
12104 case TargetCXXABI::WebAssembly:
12105 case TargetCXXABI::WatchOS:
12106 case TargetCXXABI::XL:
12107 return ItaniumMangleContext::create(
12108 Context&: *this, Diags&: getDiagnostics(),
12109 Discriminator: [](ASTContext &, const NamedDecl *ND) -> std::optional<unsigned> {
12110 if (const auto *RD = dyn_cast<CXXRecordDecl>(Val: ND))
12111 return RD->getDeviceLambdaManglingNumber();
12112 return std::nullopt;
12113 },
12114 /*IsAux=*/true);
12115 case TargetCXXABI::Microsoft:
12116 return MicrosoftMangleContext::create(Context&: *this, Diags&: getDiagnostics(),
12117 /*IsAux=*/true);
12118 }
12119 llvm_unreachable("Unsupported ABI");
12120}
12121
12122CXXABI::~CXXABI() = default;
12123
12124size_t ASTContext::getSideTableAllocatedMemory() const {
12125 return ASTRecordLayouts.getMemorySize() +
12126 llvm::capacity_in_bytes(X: ObjCLayouts) +
12127 llvm::capacity_in_bytes(X: KeyFunctions) +
12128 llvm::capacity_in_bytes(X: ObjCImpls) +
12129 llvm::capacity_in_bytes(X: BlockVarCopyInits) +
12130 llvm::capacity_in_bytes(X: DeclAttrs) +
12131 llvm::capacity_in_bytes(X: TemplateOrInstantiation) +
12132 llvm::capacity_in_bytes(X: InstantiatedFromUsingDecl) +
12133 llvm::capacity_in_bytes(X: InstantiatedFromUsingShadowDecl) +
12134 llvm::capacity_in_bytes(X: InstantiatedFromUnnamedFieldDecl) +
12135 llvm::capacity_in_bytes(X: OverriddenMethods) +
12136 llvm::capacity_in_bytes(X: Types) +
12137 llvm::capacity_in_bytes(x: VariableArrayTypes);
12138}
12139
12140/// getIntTypeForBitwidth -
12141/// sets integer QualTy according to specified details:
12142/// bitwidth, signed/unsigned.
12143/// Returns empty type if there is no appropriate target types.
12144QualType ASTContext::getIntTypeForBitwidth(unsigned DestWidth,
12145 unsigned Signed) const {
12146 TargetInfo::IntType Ty = getTargetInfo().getIntTypeByWidth(BitWidth: DestWidth, IsSigned: Signed);
12147 CanQualType QualTy = getFromTargetType(Type: Ty);
12148 if (!QualTy && DestWidth == 128)
12149 return Signed ? Int128Ty : UnsignedInt128Ty;
12150 return QualTy;
12151}
12152
12153/// getRealTypeForBitwidth -
12154/// sets floating point QualTy according to specified bitwidth.
12155/// Returns empty type if there is no appropriate target types.
12156QualType ASTContext::getRealTypeForBitwidth(unsigned DestWidth,
12157 FloatModeKind ExplicitType) const {
12158 FloatModeKind Ty =
12159 getTargetInfo().getRealTypeByWidth(BitWidth: DestWidth, ExplicitType);
12160 switch (Ty) {
12161 case FloatModeKind::Half:
12162 return HalfTy;
12163 case FloatModeKind::Float:
12164 return FloatTy;
12165 case FloatModeKind::Double:
12166 return DoubleTy;
12167 case FloatModeKind::LongDouble:
12168 return LongDoubleTy;
12169 case FloatModeKind::Float128:
12170 return Float128Ty;
12171 case FloatModeKind::Ibm128:
12172 return Ibm128Ty;
12173 case FloatModeKind::NoFloat:
12174 return {};
12175 }
12176
12177 llvm_unreachable("Unhandled TargetInfo::RealType value");
12178}
12179
12180void ASTContext::setManglingNumber(const NamedDecl *ND, unsigned Number) {
12181 if (Number > 1)
12182 MangleNumbers[ND] = Number;
12183}
12184
12185unsigned ASTContext::getManglingNumber(const NamedDecl *ND,
12186 bool ForAuxTarget) const {
12187 auto I = MangleNumbers.find(Key: ND);
12188 unsigned Res = I != MangleNumbers.end() ? I->second : 1;
12189 // CUDA/HIP host compilation encodes host and device mangling numbers
12190 // as lower and upper half of 32 bit integer.
12191 if (LangOpts.CUDA && !LangOpts.CUDAIsDevice) {
12192 Res = ForAuxTarget ? Res >> 16 : Res & 0xFFFF;
12193 } else {
12194 assert(!ForAuxTarget && "Only CUDA/HIP host compilation supports mangling "
12195 "number for aux target");
12196 }
12197 return Res > 1 ? Res : 1;
12198}
12199
12200void ASTContext::setStaticLocalNumber(const VarDecl *VD, unsigned Number) {
12201 if (Number > 1)
12202 StaticLocalNumbers[VD] = Number;
12203}
12204
12205unsigned ASTContext::getStaticLocalNumber(const VarDecl *VD) const {
12206 auto I = StaticLocalNumbers.find(Key: VD);
12207 return I != StaticLocalNumbers.end() ? I->second : 1;
12208}
12209
12210MangleNumberingContext &
12211ASTContext::getManglingNumberContext(const DeclContext *DC) {
12212 assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C.
12213 std::unique_ptr<MangleNumberingContext> &MCtx = MangleNumberingContexts[DC];
12214 if (!MCtx)
12215 MCtx = createMangleNumberingContext();
12216 return *MCtx;
12217}
12218
12219MangleNumberingContext &
12220ASTContext::getManglingNumberContext(NeedExtraManglingDecl_t, const Decl *D) {
12221 assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C.
12222 std::unique_ptr<MangleNumberingContext> &MCtx =
12223 ExtraMangleNumberingContexts[D];
12224 if (!MCtx)
12225 MCtx = createMangleNumberingContext();
12226 return *MCtx;
12227}
12228
12229std::unique_ptr<MangleNumberingContext>
12230ASTContext::createMangleNumberingContext() const {
12231 return ABI->createMangleNumberingContext();
12232}
12233
12234const CXXConstructorDecl *
12235ASTContext::getCopyConstructorForExceptionObject(CXXRecordDecl *RD) {
12236 return ABI->getCopyConstructorForExceptionObject(
12237 cast<CXXRecordDecl>(RD->getFirstDecl()));
12238}
12239
12240void ASTContext::addCopyConstructorForExceptionObject(CXXRecordDecl *RD,
12241 CXXConstructorDecl *CD) {
12242 return ABI->addCopyConstructorForExceptionObject(
12243 cast<CXXRecordDecl>(RD->getFirstDecl()),
12244 cast<CXXConstructorDecl>(CD->getFirstDecl()));
12245}
12246
12247void ASTContext::addTypedefNameForUnnamedTagDecl(TagDecl *TD,
12248 TypedefNameDecl *DD) {
12249 return ABI->addTypedefNameForUnnamedTagDecl(TD, DD);
12250}
12251
12252TypedefNameDecl *
12253ASTContext::getTypedefNameForUnnamedTagDecl(const TagDecl *TD) {
12254 return ABI->getTypedefNameForUnnamedTagDecl(TD);
12255}
12256
12257void ASTContext::addDeclaratorForUnnamedTagDecl(TagDecl *TD,
12258 DeclaratorDecl *DD) {
12259 return ABI->addDeclaratorForUnnamedTagDecl(TD, DD);
12260}
12261
12262DeclaratorDecl *ASTContext::getDeclaratorForUnnamedTagDecl(const TagDecl *TD) {
12263 return ABI->getDeclaratorForUnnamedTagDecl(TD);
12264}
12265
12266void ASTContext::setParameterIndex(const ParmVarDecl *D, unsigned int index) {
12267 ParamIndices[D] = index;
12268}
12269
12270unsigned ASTContext::getParameterIndex(const ParmVarDecl *D) const {
12271 ParameterIndexTable::const_iterator I = ParamIndices.find(D);
12272 assert(I != ParamIndices.end() &&
12273 "ParmIndices lacks entry set by ParmVarDecl");
12274 return I->second;
12275}
12276
12277QualType ASTContext::getStringLiteralArrayType(QualType EltTy,
12278 unsigned Length) const {
12279 // A C++ string literal has a const-qualified element type (C++ 2.13.4p1).
12280 if (getLangOpts().CPlusPlus || getLangOpts().ConstStrings)
12281 EltTy = EltTy.withConst();
12282
12283 EltTy = adjustStringLiteralBaseType(Ty: EltTy);
12284
12285 // Get an array type for the string, according to C99 6.4.5. This includes
12286 // the null terminator character.
12287 return getConstantArrayType(EltTy, ArySizeIn: llvm::APInt(32, Length + 1), SizeExpr: nullptr,
12288 ASM: ArraySizeModifier::Normal, /*IndexTypeQuals*/ 0);
12289}
12290
12291StringLiteral *
12292ASTContext::getPredefinedStringLiteralFromCache(StringRef Key) const {
12293 StringLiteral *&Result = StringLiteralCache[Key];
12294 if (!Result)
12295 Result = StringLiteral::Create(
12296 *this, Key, StringLiteralKind::Ordinary,
12297 /*Pascal*/ false, getStringLiteralArrayType(CharTy, Key.size()),
12298 SourceLocation());
12299 return Result;
12300}
12301
12302MSGuidDecl *
12303ASTContext::getMSGuidDecl(MSGuidDecl::Parts Parts) const {
12304 assert(MSGuidTagDecl && "building MS GUID without MS extensions?");
12305
12306 llvm::FoldingSetNodeID ID;
12307 MSGuidDecl::Profile(ID, P: Parts);
12308
12309 void *InsertPos;
12310 if (MSGuidDecl *Existing = MSGuidDecls.FindNodeOrInsertPos(ID, InsertPos))
12311 return Existing;
12312
12313 QualType GUIDType = getMSGuidType().withConst();
12314 MSGuidDecl *New = MSGuidDecl::Create(C: *this, T: GUIDType, P: Parts);
12315 MSGuidDecls.InsertNode(New, InsertPos);
12316 return New;
12317}
12318
12319UnnamedGlobalConstantDecl *
12320ASTContext::getUnnamedGlobalConstantDecl(QualType Ty,
12321 const APValue &APVal) const {
12322 llvm::FoldingSetNodeID ID;
12323 UnnamedGlobalConstantDecl::Profile(ID, Ty, APVal);
12324
12325 void *InsertPos;
12326 if (UnnamedGlobalConstantDecl *Existing =
12327 UnnamedGlobalConstantDecls.FindNodeOrInsertPos(ID, InsertPos))
12328 return Existing;
12329
12330 UnnamedGlobalConstantDecl *New =
12331 UnnamedGlobalConstantDecl::Create(C: *this, T: Ty, APVal);
12332 UnnamedGlobalConstantDecls.InsertNode(New, InsertPos);
12333 return New;
12334}
12335
12336TemplateParamObjectDecl *
12337ASTContext::getTemplateParamObjectDecl(QualType T, const APValue &V) const {
12338 assert(T->isRecordType() && "template param object of unexpected type");
12339
12340 // C++ [temp.param]p8:
12341 // [...] a static storage duration object of type 'const T' [...]
12342 T.addConst();
12343
12344 llvm::FoldingSetNodeID ID;
12345 TemplateParamObjectDecl::Profile(ID, T, V);
12346
12347 void *InsertPos;
12348 if (TemplateParamObjectDecl *Existing =
12349 TemplateParamObjectDecls.FindNodeOrInsertPos(ID, InsertPos))
12350 return Existing;
12351
12352 TemplateParamObjectDecl *New = TemplateParamObjectDecl::Create(C: *this, T, V);
12353 TemplateParamObjectDecls.InsertNode(New, InsertPos);
12354 return New;
12355}
12356
12357bool ASTContext::AtomicUsesUnsupportedLibcall(const AtomicExpr *E) const {
12358 const llvm::Triple &T = getTargetInfo().getTriple();
12359 if (!T.isOSDarwin())
12360 return false;
12361
12362 if (!(T.isiOS() && T.isOSVersionLT(Major: 7)) &&
12363 !(T.isMacOSX() && T.isOSVersionLT(Major: 10, Minor: 9)))
12364 return false;
12365
12366 QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
12367 CharUnits sizeChars = getTypeSizeInChars(T: AtomicTy);
12368 uint64_t Size = sizeChars.getQuantity();
12369 CharUnits alignChars = getTypeAlignInChars(T: AtomicTy);
12370 unsigned Align = alignChars.getQuantity();
12371 unsigned MaxInlineWidthInBits = getTargetInfo().getMaxAtomicInlineWidth();
12372 return (Size != Align || toBits(CharSize: sizeChars) > MaxInlineWidthInBits);
12373}
12374
12375bool
12376ASTContext::ObjCMethodsAreEqual(const ObjCMethodDecl *MethodDecl,
12377 const ObjCMethodDecl *MethodImpl) {
12378 // No point trying to match an unavailable/deprecated mothod.
12379 if (MethodDecl->hasAttr<UnavailableAttr>()
12380 || MethodDecl->hasAttr<DeprecatedAttr>())
12381 return false;
12382 if (MethodDecl->getObjCDeclQualifier() !=
12383 MethodImpl->getObjCDeclQualifier())
12384 return false;
12385 if (!hasSameType(T1: MethodDecl->getReturnType(), T2: MethodImpl->getReturnType()))
12386 return false;
12387
12388 if (MethodDecl->param_size() != MethodImpl->param_size())
12389 return false;
12390
12391 for (ObjCMethodDecl::param_const_iterator IM = MethodImpl->param_begin(),
12392 IF = MethodDecl->param_begin(), EM = MethodImpl->param_end(),
12393 EF = MethodDecl->param_end();
12394 IM != EM && IF != EF; ++IM, ++IF) {
12395 const ParmVarDecl *DeclVar = (*IF);
12396 const ParmVarDecl *ImplVar = (*IM);
12397 if (ImplVar->getObjCDeclQualifier() != DeclVar->getObjCDeclQualifier())
12398 return false;
12399 if (!hasSameType(DeclVar->getType(), ImplVar->getType()))
12400 return false;
12401 }
12402
12403 return (MethodDecl->isVariadic() == MethodImpl->isVariadic());
12404}
12405
12406uint64_t ASTContext::getTargetNullPointerValue(QualType QT) const {
12407 LangAS AS;
12408 if (QT->getUnqualifiedDesugaredType()->isNullPtrType())
12409 AS = LangAS::Default;
12410 else
12411 AS = QT->getPointeeType().getAddressSpace();
12412
12413 return getTargetInfo().getNullPointerValue(AddrSpace: AS);
12414}
12415
12416unsigned ASTContext::getTargetAddressSpace(LangAS AS) const {
12417 return getTargetInfo().getTargetAddressSpace(AS);
12418}
12419
12420bool ASTContext::hasSameExpr(const Expr *X, const Expr *Y) const {
12421 if (X == Y)
12422 return true;
12423 if (!X || !Y)
12424 return false;
12425 llvm::FoldingSetNodeID IDX, IDY;
12426 X->Profile(IDX, *this, /*Canonical=*/true);
12427 Y->Profile(IDY, *this, /*Canonical=*/true);
12428 return IDX == IDY;
12429}
12430
12431// The getCommon* helpers return, for given 'same' X and Y entities given as
12432// inputs, another entity which is also the 'same' as the inputs, but which
12433// is closer to the canonical form of the inputs, each according to a given
12434// criteria.
12435// The getCommon*Checked variants are 'null inputs not-allowed' equivalents of
12436// the regular ones.
12437
12438static Decl *getCommonDecl(Decl *X, Decl *Y) {
12439 if (!declaresSameEntity(D1: X, D2: Y))
12440 return nullptr;
12441 for (const Decl *DX : X->redecls()) {
12442 // If we reach Y before reaching the first decl, that means X is older.
12443 if (DX == Y)
12444 return X;
12445 // If we reach the first decl, then Y is older.
12446 if (DX->isFirstDecl())
12447 return Y;
12448 }
12449 llvm_unreachable("Corrupt redecls chain");
12450}
12451
12452template <class T, std::enable_if_t<std::is_base_of_v<Decl, T>, bool> = true>
12453static T *getCommonDecl(T *X, T *Y) {
12454 return cast_or_null<T>(
12455 getCommonDecl(X: const_cast<Decl *>(cast_or_null<Decl>(X)),
12456 Y: const_cast<Decl *>(cast_or_null<Decl>(Y))));
12457}
12458
12459template <class T, std::enable_if_t<std::is_base_of_v<Decl, T>, bool> = true>
12460static T *getCommonDeclChecked(T *X, T *Y) {
12461 return cast<T>(getCommonDecl(X: const_cast<Decl *>(cast<Decl>(X)),
12462 Y: const_cast<Decl *>(cast<Decl>(Y))));
12463}
12464
12465static TemplateName getCommonTemplateName(ASTContext &Ctx, TemplateName X,
12466 TemplateName Y) {
12467 if (X.getAsVoidPointer() == Y.getAsVoidPointer())
12468 return X;
12469 // FIXME: There are cases here where we could find a common template name
12470 // with more sugar. For example one could be a SubstTemplateTemplate*
12471 // replacing the other.
12472 TemplateName CX = Ctx.getCanonicalTemplateName(Name: X);
12473 if (CX.getAsVoidPointer() !=
12474 Ctx.getCanonicalTemplateName(Name: Y).getAsVoidPointer())
12475 return TemplateName();
12476 return CX;
12477}
12478
12479static TemplateName
12480getCommonTemplateNameChecked(ASTContext &Ctx, TemplateName X, TemplateName Y) {
12481 TemplateName R = getCommonTemplateName(Ctx, X, Y);
12482 assert(R.getAsVoidPointer() != nullptr);
12483 return R;
12484}
12485
12486static auto getCommonTypes(ASTContext &Ctx, ArrayRef<QualType> Xs,
12487 ArrayRef<QualType> Ys, bool Unqualified = false) {
12488 assert(Xs.size() == Ys.size());
12489 SmallVector<QualType, 8> Rs(Xs.size());
12490 for (size_t I = 0; I < Rs.size(); ++I)
12491 Rs[I] = Ctx.getCommonSugaredType(X: Xs[I], Y: Ys[I], Unqualified);
12492 return Rs;
12493}
12494
12495template <class T>
12496static SourceLocation getCommonAttrLoc(const T *X, const T *Y) {
12497 return X->getAttributeLoc() == Y->getAttributeLoc() ? X->getAttributeLoc()
12498 : SourceLocation();
12499}
12500
12501static TemplateArgument getCommonTemplateArgument(ASTContext &Ctx,
12502 const TemplateArgument &X,
12503 const TemplateArgument &Y) {
12504 if (X.getKind() != Y.getKind())
12505 return TemplateArgument();
12506
12507 switch (X.getKind()) {
12508 case TemplateArgument::ArgKind::Type:
12509 if (!Ctx.hasSameType(T1: X.getAsType(), T2: Y.getAsType()))
12510 return TemplateArgument();
12511 return TemplateArgument(
12512 Ctx.getCommonSugaredType(X: X.getAsType(), Y: Y.getAsType()));
12513 case TemplateArgument::ArgKind::NullPtr:
12514 if (!Ctx.hasSameType(T1: X.getNullPtrType(), T2: Y.getNullPtrType()))
12515 return TemplateArgument();
12516 return TemplateArgument(
12517 Ctx.getCommonSugaredType(X: X.getNullPtrType(), Y: Y.getNullPtrType()),
12518 /*Unqualified=*/true);
12519 case TemplateArgument::ArgKind::Expression:
12520 if (!Ctx.hasSameType(T1: X.getAsExpr()->getType(), T2: Y.getAsExpr()->getType()))
12521 return TemplateArgument();
12522 // FIXME: Try to keep the common sugar.
12523 return X;
12524 case TemplateArgument::ArgKind::Template: {
12525 TemplateName TX = X.getAsTemplate(), TY = Y.getAsTemplate();
12526 TemplateName CTN = ::getCommonTemplateName(Ctx, X: TX, Y: TY);
12527 if (!CTN.getAsVoidPointer())
12528 return TemplateArgument();
12529 return TemplateArgument(CTN);
12530 }
12531 case TemplateArgument::ArgKind::TemplateExpansion: {
12532 TemplateName TX = X.getAsTemplateOrTemplatePattern(),
12533 TY = Y.getAsTemplateOrTemplatePattern();
12534 TemplateName CTN = ::getCommonTemplateName(Ctx, X: TX, Y: TY);
12535 if (!CTN.getAsVoidPointer())
12536 return TemplateName();
12537 auto NExpX = X.getNumTemplateExpansions();
12538 assert(NExpX == Y.getNumTemplateExpansions());
12539 return TemplateArgument(CTN, NExpX);
12540 }
12541 default:
12542 // FIXME: Handle the other argument kinds.
12543 return X;
12544 }
12545}
12546
12547static bool getCommonTemplateArguments(ASTContext &Ctx,
12548 SmallVectorImpl<TemplateArgument> &R,
12549 ArrayRef<TemplateArgument> Xs,
12550 ArrayRef<TemplateArgument> Ys) {
12551 if (Xs.size() != Ys.size())
12552 return true;
12553 R.resize(N: Xs.size());
12554 for (size_t I = 0; I < R.size(); ++I) {
12555 R[I] = getCommonTemplateArgument(Ctx, X: Xs[I], Y: Ys[I]);
12556 if (R[I].isNull())
12557 return true;
12558 }
12559 return false;
12560}
12561
12562static auto getCommonTemplateArguments(ASTContext &Ctx,
12563 ArrayRef<TemplateArgument> Xs,
12564 ArrayRef<TemplateArgument> Ys) {
12565 SmallVector<TemplateArgument, 8> R;
12566 bool Different = getCommonTemplateArguments(Ctx, R, Xs, Ys);
12567 assert(!Different);
12568 (void)Different;
12569 return R;
12570}
12571
12572template <class T>
12573static ElaboratedTypeKeyword getCommonTypeKeyword(const T *X, const T *Y) {
12574 return X->getKeyword() == Y->getKeyword() ? X->getKeyword()
12575 : ElaboratedTypeKeyword::None;
12576}
12577
12578template <class T>
12579static NestedNameSpecifier *getCommonNNS(ASTContext &Ctx, const T *X,
12580 const T *Y) {
12581 // FIXME: Try to keep the common NNS sugar.
12582 return X->getQualifier() == Y->getQualifier()
12583 ? X->getQualifier()
12584 : Ctx.getCanonicalNestedNameSpecifier(NNS: X->getQualifier());
12585}
12586
12587template <class T>
12588static QualType getCommonElementType(ASTContext &Ctx, const T *X, const T *Y) {
12589 return Ctx.getCommonSugaredType(X: X->getElementType(), Y: Y->getElementType());
12590}
12591
12592template <class T>
12593static QualType getCommonArrayElementType(ASTContext &Ctx, const T *X,
12594 Qualifiers &QX, const T *Y,
12595 Qualifiers &QY) {
12596 QualType EX = X->getElementType(), EY = Y->getElementType();
12597 QualType R = Ctx.getCommonSugaredType(X: EX, Y: EY,
12598 /*Unqualified=*/true);
12599 Qualifiers RQ = R.getQualifiers();
12600 QX += EX.getQualifiers() - RQ;
12601 QY += EY.getQualifiers() - RQ;
12602 return R;
12603}
12604
12605template <class T>
12606static QualType getCommonPointeeType(ASTContext &Ctx, const T *X, const T *Y) {
12607 return Ctx.getCommonSugaredType(X: X->getPointeeType(), Y: Y->getPointeeType());
12608}
12609
12610template <class T> static auto *getCommonSizeExpr(ASTContext &Ctx, T *X, T *Y) {
12611 assert(Ctx.hasSameExpr(X->getSizeExpr(), Y->getSizeExpr()));
12612 return X->getSizeExpr();
12613}
12614
12615static auto getCommonSizeModifier(const ArrayType *X, const ArrayType *Y) {
12616 assert(X->getSizeModifier() == Y->getSizeModifier());
12617 return X->getSizeModifier();
12618}
12619
12620static auto getCommonIndexTypeCVRQualifiers(const ArrayType *X,
12621 const ArrayType *Y) {
12622 assert(X->getIndexTypeCVRQualifiers() == Y->getIndexTypeCVRQualifiers());
12623 return X->getIndexTypeCVRQualifiers();
12624}
12625
12626// Merges two type lists such that the resulting vector will contain
12627// each type (in a canonical sense) only once, in the order they appear
12628// from X to Y. If they occur in both X and Y, the result will contain
12629// the common sugared type between them.
12630static void mergeTypeLists(ASTContext &Ctx, SmallVectorImpl<QualType> &Out,
12631 ArrayRef<QualType> X, ArrayRef<QualType> Y) {
12632 llvm::DenseMap<QualType, unsigned> Found;
12633 for (auto Ts : {X, Y}) {
12634 for (QualType T : Ts) {
12635 auto Res = Found.try_emplace(Ctx.getCanonicalType(T), Out.size());
12636 if (!Res.second) {
12637 QualType &U = Out[Res.first->second];
12638 U = Ctx.getCommonSugaredType(X: U, Y: T);
12639 } else {
12640 Out.emplace_back(Args&: T);
12641 }
12642 }
12643 }
12644}
12645
12646FunctionProtoType::ExceptionSpecInfo
12647ASTContext::mergeExceptionSpecs(FunctionProtoType::ExceptionSpecInfo ESI1,
12648 FunctionProtoType::ExceptionSpecInfo ESI2,
12649 SmallVectorImpl<QualType> &ExceptionTypeStorage,
12650 bool AcceptDependent) {
12651 ExceptionSpecificationType EST1 = ESI1.Type, EST2 = ESI2.Type;
12652
12653 // If either of them can throw anything, that is the result.
12654 for (auto I : {EST_None, EST_MSAny, EST_NoexceptFalse}) {
12655 if (EST1 == I)
12656 return ESI1;
12657 if (EST2 == I)
12658 return ESI2;
12659 }
12660
12661 // If either of them is non-throwing, the result is the other.
12662 for (auto I :
12663 {EST_NoThrow, EST_DynamicNone, EST_BasicNoexcept, EST_NoexceptTrue}) {
12664 if (EST1 == I)
12665 return ESI2;
12666 if (EST2 == I)
12667 return ESI1;
12668 }
12669
12670 // If we're left with value-dependent computed noexcept expressions, we're
12671 // stuck. Before C++17, we can just drop the exception specification entirely,
12672 // since it's not actually part of the canonical type. And this should never
12673 // happen in C++17, because it would mean we were computing the composite
12674 // pointer type of dependent types, which should never happen.
12675 if (EST1 == EST_DependentNoexcept || EST2 == EST_DependentNoexcept) {
12676 assert(AcceptDependent &&
12677 "computing composite pointer type of dependent types");
12678 return FunctionProtoType::ExceptionSpecInfo();
12679 }
12680
12681 // Switch over the possibilities so that people adding new values know to
12682 // update this function.
12683 switch (EST1) {
12684 case EST_None:
12685 case EST_DynamicNone:
12686 case EST_MSAny:
12687 case EST_BasicNoexcept:
12688 case EST_DependentNoexcept:
12689 case EST_NoexceptFalse:
12690 case EST_NoexceptTrue:
12691 case EST_NoThrow:
12692 llvm_unreachable("These ESTs should be handled above");
12693
12694 case EST_Dynamic: {
12695 // This is the fun case: both exception specifications are dynamic. Form
12696 // the union of the two lists.
12697 assert(EST2 == EST_Dynamic && "other cases should already be handled");
12698 mergeTypeLists(*this, ExceptionTypeStorage, ESI1.Exceptions,
12699 ESI2.Exceptions);
12700 FunctionProtoType::ExceptionSpecInfo Result(EST_Dynamic);
12701 Result.Exceptions = ExceptionTypeStorage;
12702 return Result;
12703 }
12704
12705 case EST_Unevaluated:
12706 case EST_Uninstantiated:
12707 case EST_Unparsed:
12708 llvm_unreachable("shouldn't see unresolved exception specifications here");
12709 }
12710
12711 llvm_unreachable("invalid ExceptionSpecificationType");
12712}
12713
12714static QualType getCommonNonSugarTypeNode(ASTContext &Ctx, const Type *X,
12715 Qualifiers &QX, const Type *Y,
12716 Qualifiers &QY) {
12717 Type::TypeClass TC = X->getTypeClass();
12718 assert(TC == Y->getTypeClass());
12719 switch (TC) {
12720#define UNEXPECTED_TYPE(Class, Kind) \
12721 case Type::Class: \
12722 llvm_unreachable("Unexpected " Kind ": " #Class);
12723
12724#define NON_CANONICAL_TYPE(Class, Base) UNEXPECTED_TYPE(Class, "non-canonical")
12725#define TYPE(Class, Base)
12726#include "clang/AST/TypeNodes.inc"
12727
12728#define SUGAR_FREE_TYPE(Class) UNEXPECTED_TYPE(Class, "sugar-free")
12729 SUGAR_FREE_TYPE(Builtin)
12730 SUGAR_FREE_TYPE(DeducedTemplateSpecialization)
12731 SUGAR_FREE_TYPE(DependentBitInt)
12732 SUGAR_FREE_TYPE(Enum)
12733 SUGAR_FREE_TYPE(BitInt)
12734 SUGAR_FREE_TYPE(ObjCInterface)
12735 SUGAR_FREE_TYPE(Record)
12736 SUGAR_FREE_TYPE(SubstTemplateTypeParmPack)
12737 SUGAR_FREE_TYPE(UnresolvedUsing)
12738#undef SUGAR_FREE_TYPE
12739#define NON_UNIQUE_TYPE(Class) UNEXPECTED_TYPE(Class, "non-unique")
12740 NON_UNIQUE_TYPE(TypeOfExpr)
12741 NON_UNIQUE_TYPE(VariableArray)
12742#undef NON_UNIQUE_TYPE
12743
12744 UNEXPECTED_TYPE(TypeOf, "sugar")
12745
12746#undef UNEXPECTED_TYPE
12747
12748 case Type::Auto: {
12749 const auto *AX = cast<AutoType>(X), *AY = cast<AutoType>(Y);
12750 assert(AX->getDeducedType().isNull());
12751 assert(AY->getDeducedType().isNull());
12752 assert(AX->getKeyword() == AY->getKeyword());
12753 assert(AX->isInstantiationDependentType() ==
12754 AY->isInstantiationDependentType());
12755 auto As = getCommonTemplateArguments(Ctx, AX->getTypeConstraintArguments(),
12756 AY->getTypeConstraintArguments());
12757 return Ctx.getAutoType(DeducedType: QualType(), Keyword: AX->getKeyword(),
12758 IsDependent: AX->isInstantiationDependentType(),
12759 IsPack: AX->containsUnexpandedParameterPack(),
12760 TypeConstraintConcept: getCommonDeclChecked(AX->getTypeConstraintConcept(),
12761 AY->getTypeConstraintConcept()),
12762 TypeConstraintArgs: As);
12763 }
12764 case Type::IncompleteArray: {
12765 const auto *AX = cast<IncompleteArrayType>(X),
12766 *AY = cast<IncompleteArrayType>(Y);
12767 return Ctx.getIncompleteArrayType(
12768 elementType: getCommonArrayElementType(Ctx, AX, QX, AY, QY),
12769 ASM: getCommonSizeModifier(AX, AY), elementTypeQuals: getCommonIndexTypeCVRQualifiers(AX, AY));
12770 }
12771 case Type::DependentSizedArray: {
12772 const auto *AX = cast<DependentSizedArrayType>(X),
12773 *AY = cast<DependentSizedArrayType>(Y);
12774 return Ctx.getDependentSizedArrayType(
12775 elementType: getCommonArrayElementType(Ctx, AX, QX, AY, QY),
12776 numElements: getCommonSizeExpr(Ctx, AX, AY), ASM: getCommonSizeModifier(AX, AY),
12777 elementTypeQuals: getCommonIndexTypeCVRQualifiers(AX, AY),
12778 brackets: AX->getBracketsRange() == AY->getBracketsRange()
12779 ? AX->getBracketsRange()
12780 : SourceRange());
12781 }
12782 case Type::ConstantArray: {
12783 const auto *AX = cast<ConstantArrayType>(X),
12784 *AY = cast<ConstantArrayType>(Y);
12785 assert(AX->getSize() == AY->getSize());
12786 const Expr *SizeExpr = Ctx.hasSameExpr(X: AX->getSizeExpr(), Y: AY->getSizeExpr())
12787 ? AX->getSizeExpr()
12788 : nullptr;
12789 return Ctx.getConstantArrayType(
12790 EltTy: getCommonArrayElementType(Ctx, AX, QX, AY, QY), ArySizeIn: AX->getSize(), SizeExpr,
12791 ASM: getCommonSizeModifier(AX, AY), IndexTypeQuals: getCommonIndexTypeCVRQualifiers(AX, AY));
12792 }
12793 case Type::Atomic: {
12794 const auto *AX = cast<AtomicType>(X), *AY = cast<AtomicType>(Y);
12795 return Ctx.getAtomicType(
12796 T: Ctx.getCommonSugaredType(X: AX->getValueType(), Y: AY->getValueType()));
12797 }
12798 case Type::Complex: {
12799 const auto *CX = cast<ComplexType>(X), *CY = cast<ComplexType>(Y);
12800 return Ctx.getComplexType(getCommonArrayElementType(Ctx, CX, QX, CY, QY));
12801 }
12802 case Type::Pointer: {
12803 const auto *PX = cast<PointerType>(X), *PY = cast<PointerType>(Y);
12804 return Ctx.getPointerType(getCommonPointeeType(Ctx, PX, PY));
12805 }
12806 case Type::BlockPointer: {
12807 const auto *PX = cast<BlockPointerType>(X), *PY = cast<BlockPointerType>(Y);
12808 return Ctx.getBlockPointerType(T: getCommonPointeeType(Ctx, PX, PY));
12809 }
12810 case Type::ObjCObjectPointer: {
12811 const auto *PX = cast<ObjCObjectPointerType>(X),
12812 *PY = cast<ObjCObjectPointerType>(Y);
12813 return Ctx.getObjCObjectPointerType(ObjectT: getCommonPointeeType(Ctx, PX, PY));
12814 }
12815 case Type::MemberPointer: {
12816 const auto *PX = cast<MemberPointerType>(X),
12817 *PY = cast<MemberPointerType>(Y);
12818 return Ctx.getMemberPointerType(
12819 T: getCommonPointeeType(Ctx, PX, PY),
12820 Cls: Ctx.getCommonSugaredType(X: QualType(PX->getClass(), 0),
12821 Y: QualType(PY->getClass(), 0))
12822 .getTypePtr());
12823 }
12824 case Type::LValueReference: {
12825 const auto *PX = cast<LValueReferenceType>(X),
12826 *PY = cast<LValueReferenceType>(Y);
12827 // FIXME: Preserve PointeeTypeAsWritten.
12828 return Ctx.getLValueReferenceType(T: getCommonPointeeType(Ctx, PX, PY),
12829 SpelledAsLValue: PX->isSpelledAsLValue() ||
12830 PY->isSpelledAsLValue());
12831 }
12832 case Type::RValueReference: {
12833 const auto *PX = cast<RValueReferenceType>(X),
12834 *PY = cast<RValueReferenceType>(Y);
12835 // FIXME: Preserve PointeeTypeAsWritten.
12836 return Ctx.getRValueReferenceType(T: getCommonPointeeType(Ctx, PX, PY));
12837 }
12838 case Type::DependentAddressSpace: {
12839 const auto *PX = cast<DependentAddressSpaceType>(X),
12840 *PY = cast<DependentAddressSpaceType>(Y);
12841 assert(Ctx.hasSameExpr(PX->getAddrSpaceExpr(), PY->getAddrSpaceExpr()));
12842 return Ctx.getDependentAddressSpaceType(PointeeType: getCommonPointeeType(Ctx, PX, PY),
12843 AddrSpaceExpr: PX->getAddrSpaceExpr(),
12844 AttrLoc: getCommonAttrLoc(PX, PY));
12845 }
12846 case Type::FunctionNoProto: {
12847 const auto *FX = cast<FunctionNoProtoType>(X),
12848 *FY = cast<FunctionNoProtoType>(Y);
12849 assert(FX->getExtInfo() == FY->getExtInfo());
12850 return Ctx.getFunctionNoProtoType(
12851 Ctx.getCommonSugaredType(X: FX->getReturnType(), Y: FY->getReturnType()),
12852 FX->getExtInfo());
12853 }
12854 case Type::FunctionProto: {
12855 const auto *FX = cast<FunctionProtoType>(X),
12856 *FY = cast<FunctionProtoType>(Y);
12857 FunctionProtoType::ExtProtoInfo EPIX = FX->getExtProtoInfo(),
12858 EPIY = FY->getExtProtoInfo();
12859 assert(EPIX.ExtInfo == EPIY.ExtInfo);
12860 assert(EPIX.ExtParameterInfos == EPIY.ExtParameterInfos);
12861 assert(EPIX.RefQualifier == EPIY.RefQualifier);
12862 assert(EPIX.TypeQuals == EPIY.TypeQuals);
12863 assert(EPIX.Variadic == EPIY.Variadic);
12864
12865 // FIXME: Can we handle an empty EllipsisLoc?
12866 // Use emtpy EllipsisLoc if X and Y differ.
12867
12868 EPIX.HasTrailingReturn = EPIX.HasTrailingReturn && EPIY.HasTrailingReturn;
12869
12870 QualType R =
12871 Ctx.getCommonSugaredType(X: FX->getReturnType(), Y: FY->getReturnType());
12872 auto P = getCommonTypes(Ctx, FX->param_types(), FY->param_types(),
12873 /*Unqualified=*/true);
12874
12875 SmallVector<QualType, 8> Exceptions;
12876 EPIX.ExceptionSpec = Ctx.mergeExceptionSpecs(
12877 ESI1: EPIX.ExceptionSpec, ESI2: EPIY.ExceptionSpec, ExceptionTypeStorage&: Exceptions, AcceptDependent: true);
12878 return Ctx.getFunctionType(ResultTy: R, Args: P, EPI: EPIX);
12879 }
12880 case Type::ObjCObject: {
12881 const auto *OX = cast<ObjCObjectType>(X), *OY = cast<ObjCObjectType>(Y);
12882 assert(
12883 std::equal(OX->getProtocols().begin(), OX->getProtocols().end(),
12884 OY->getProtocols().begin(), OY->getProtocols().end(),
12885 [](const ObjCProtocolDecl *P0, const ObjCProtocolDecl *P1) {
12886 return P0->getCanonicalDecl() == P1->getCanonicalDecl();
12887 }) &&
12888 "protocol lists must be the same");
12889 auto TAs = getCommonTypes(Ctx, OX->getTypeArgsAsWritten(),
12890 OY->getTypeArgsAsWritten());
12891 return Ctx.getObjCObjectType(
12892 Ctx.getCommonSugaredType(X: OX->getBaseType(), Y: OY->getBaseType()), TAs,
12893 OX->getProtocols(),
12894 OX->isKindOfTypeAsWritten() && OY->isKindOfTypeAsWritten());
12895 }
12896 case Type::ConstantMatrix: {
12897 const auto *MX = cast<ConstantMatrixType>(X),
12898 *MY = cast<ConstantMatrixType>(Y);
12899 assert(MX->getNumRows() == MY->getNumRows());
12900 assert(MX->getNumColumns() == MY->getNumColumns());
12901 return Ctx.getConstantMatrixType(ElementTy: getCommonElementType(Ctx, MX, MY),
12902 NumRows: MX->getNumRows(), NumColumns: MX->getNumColumns());
12903 }
12904 case Type::DependentSizedMatrix: {
12905 const auto *MX = cast<DependentSizedMatrixType>(X),
12906 *MY = cast<DependentSizedMatrixType>(Y);
12907 assert(Ctx.hasSameExpr(MX->getRowExpr(), MY->getRowExpr()));
12908 assert(Ctx.hasSameExpr(MX->getColumnExpr(), MY->getColumnExpr()));
12909 return Ctx.getDependentSizedMatrixType(
12910 ElementTy: getCommonElementType(Ctx, MX, MY), RowExpr: MX->getRowExpr(),
12911 ColumnExpr: MX->getColumnExpr(), AttrLoc: getCommonAttrLoc(MX, MY));
12912 }
12913 case Type::Vector: {
12914 const auto *VX = cast<VectorType>(X), *VY = cast<VectorType>(Y);
12915 assert(VX->getNumElements() == VY->getNumElements());
12916 assert(VX->getVectorKind() == VY->getVectorKind());
12917 return Ctx.getVectorType(vecType: getCommonElementType(Ctx, VX, VY),
12918 NumElts: VX->getNumElements(), VecKind: VX->getVectorKind());
12919 }
12920 case Type::ExtVector: {
12921 const auto *VX = cast<ExtVectorType>(X), *VY = cast<ExtVectorType>(Y);
12922 assert(VX->getNumElements() == VY->getNumElements());
12923 return Ctx.getExtVectorType(vecType: getCommonElementType(Ctx, VX, VY),
12924 NumElts: VX->getNumElements());
12925 }
12926 case Type::DependentSizedExtVector: {
12927 const auto *VX = cast<DependentSizedExtVectorType>(X),
12928 *VY = cast<DependentSizedExtVectorType>(Y);
12929 return Ctx.getDependentSizedExtVectorType(vecType: getCommonElementType(Ctx, VX, VY),
12930 SizeExpr: getCommonSizeExpr(Ctx, VX, VY),
12931 AttrLoc: getCommonAttrLoc(VX, VY));
12932 }
12933 case Type::DependentVector: {
12934 const auto *VX = cast<DependentVectorType>(X),
12935 *VY = cast<DependentVectorType>(Y);
12936 assert(VX->getVectorKind() == VY->getVectorKind());
12937 return Ctx.getDependentVectorType(
12938 VecType: getCommonElementType(Ctx, VX, VY), SizeExpr: getCommonSizeExpr(Ctx, VX, VY),
12939 AttrLoc: getCommonAttrLoc(VX, VY), VecKind: VX->getVectorKind());
12940 }
12941 case Type::InjectedClassName: {
12942 const auto *IX = cast<InjectedClassNameType>(X),
12943 *IY = cast<InjectedClassNameType>(Y);
12944 return Ctx.getInjectedClassNameType(
12945 Decl: getCommonDeclChecked(IX->getDecl(), IY->getDecl()),
12946 TST: Ctx.getCommonSugaredType(X: IX->getInjectedSpecializationType(),
12947 Y: IY->getInjectedSpecializationType()));
12948 }
12949 case Type::TemplateSpecialization: {
12950 const auto *TX = cast<TemplateSpecializationType>(X),
12951 *TY = cast<TemplateSpecializationType>(Y);
12952 auto As = getCommonTemplateArguments(Ctx, TX->template_arguments(),
12953 TY->template_arguments());
12954 return Ctx.getTemplateSpecializationType(
12955 ::getCommonTemplateNameChecked(Ctx, X: TX->getTemplateName(),
12956 Y: TY->getTemplateName()),
12957 As, X->getCanonicalTypeInternal());
12958 }
12959 case Type::Decltype: {
12960 const auto *DX = cast<DecltypeType>(X);
12961 [[maybe_unused]] const auto *DY = cast<DecltypeType>(Y);
12962 assert(DX->isDependentType());
12963 assert(DY->isDependentType());
12964 assert(Ctx.hasSameExpr(DX->getUnderlyingExpr(), DY->getUnderlyingExpr()));
12965 // As Decltype is not uniqued, building a common type would be wasteful.
12966 return QualType(DX, 0);
12967 }
12968 case Type::PackIndexing: {
12969 const auto *DX = cast<PackIndexingType>(X);
12970 [[maybe_unused]] const auto *DY = cast<PackIndexingType>(Y);
12971 assert(DX->isDependentType());
12972 assert(DY->isDependentType());
12973 assert(Ctx.hasSameExpr(DX->getIndexExpr(), DY->getIndexExpr()));
12974 return QualType(DX, 0);
12975 }
12976 case Type::DependentName: {
12977 const auto *NX = cast<DependentNameType>(X),
12978 *NY = cast<DependentNameType>(Y);
12979 assert(NX->getIdentifier() == NY->getIdentifier());
12980 return Ctx.getDependentNameType(
12981 Keyword: getCommonTypeKeyword(NX, NY), NNS: getCommonNNS(Ctx, NX, NY),
12982 Name: NX->getIdentifier(), Canon: NX->getCanonicalTypeInternal());
12983 }
12984 case Type::DependentTemplateSpecialization: {
12985 const auto *TX = cast<DependentTemplateSpecializationType>(X),
12986 *TY = cast<DependentTemplateSpecializationType>(Y);
12987 assert(TX->getIdentifier() == TY->getIdentifier());
12988 auto As = getCommonTemplateArguments(Ctx, TX->template_arguments(),
12989 TY->template_arguments());
12990 return Ctx.getDependentTemplateSpecializationType(
12991 getCommonTypeKeyword(TX, TY), getCommonNNS(Ctx, TX, TY),
12992 TX->getIdentifier(), As);
12993 }
12994 case Type::UnaryTransform: {
12995 const auto *TX = cast<UnaryTransformType>(X),
12996 *TY = cast<UnaryTransformType>(Y);
12997 assert(TX->getUTTKind() == TY->getUTTKind());
12998 return Ctx.getUnaryTransformType(
12999 BaseType: Ctx.getCommonSugaredType(X: TX->getBaseType(), Y: TY->getBaseType()),
13000 UnderlyingType: Ctx.getCommonSugaredType(X: TX->getUnderlyingType(),
13001 Y: TY->getUnderlyingType()),
13002 Kind: TX->getUTTKind());
13003 }
13004 case Type::PackExpansion: {
13005 const auto *PX = cast<PackExpansionType>(X),
13006 *PY = cast<PackExpansionType>(Y);
13007 assert(PX->getNumExpansions() == PY->getNumExpansions());
13008 return Ctx.getPackExpansionType(
13009 Pattern: Ctx.getCommonSugaredType(X: PX->getPattern(), Y: PY->getPattern()),
13010 NumExpansions: PX->getNumExpansions(), ExpectPackInType: false);
13011 }
13012 case Type::Pipe: {
13013 const auto *PX = cast<PipeType>(X), *PY = cast<PipeType>(Y);
13014 assert(PX->isReadOnly() == PY->isReadOnly());
13015 auto MP = PX->isReadOnly() ? &ASTContext::getReadPipeType
13016 : &ASTContext::getWritePipeType;
13017 return (Ctx.*MP)(getCommonElementType(Ctx, PX, PY));
13018 }
13019 case Type::TemplateTypeParm: {
13020 const auto *TX = cast<TemplateTypeParmType>(X),
13021 *TY = cast<TemplateTypeParmType>(Y);
13022 assert(TX->getDepth() == TY->getDepth());
13023 assert(TX->getIndex() == TY->getIndex());
13024 assert(TX->isParameterPack() == TY->isParameterPack());
13025 return Ctx.getTemplateTypeParmType(
13026 Depth: TX->getDepth(), Index: TX->getIndex(), ParameterPack: TX->isParameterPack(),
13027 TTPDecl: getCommonDecl(TX->getDecl(), TY->getDecl()));
13028 }
13029 }
13030 llvm_unreachable("Unknown Type Class");
13031}
13032
13033static QualType getCommonSugarTypeNode(ASTContext &Ctx, const Type *X,
13034 const Type *Y,
13035 SplitQualType Underlying) {
13036 Type::TypeClass TC = X->getTypeClass();
13037 if (TC != Y->getTypeClass())
13038 return QualType();
13039 switch (TC) {
13040#define UNEXPECTED_TYPE(Class, Kind) \
13041 case Type::Class: \
13042 llvm_unreachable("Unexpected " Kind ": " #Class);
13043#define TYPE(Class, Base)
13044#define DEPENDENT_TYPE(Class, Base) UNEXPECTED_TYPE(Class, "dependent")
13045#include "clang/AST/TypeNodes.inc"
13046
13047#define CANONICAL_TYPE(Class) UNEXPECTED_TYPE(Class, "canonical")
13048 CANONICAL_TYPE(Atomic)
13049 CANONICAL_TYPE(BitInt)
13050 CANONICAL_TYPE(BlockPointer)
13051 CANONICAL_TYPE(Builtin)
13052 CANONICAL_TYPE(Complex)
13053 CANONICAL_TYPE(ConstantArray)
13054 CANONICAL_TYPE(ConstantMatrix)
13055 CANONICAL_TYPE(Enum)
13056 CANONICAL_TYPE(ExtVector)
13057 CANONICAL_TYPE(FunctionNoProto)
13058 CANONICAL_TYPE(FunctionProto)
13059 CANONICAL_TYPE(IncompleteArray)
13060 CANONICAL_TYPE(LValueReference)
13061 CANONICAL_TYPE(MemberPointer)
13062 CANONICAL_TYPE(ObjCInterface)
13063 CANONICAL_TYPE(ObjCObject)
13064 CANONICAL_TYPE(ObjCObjectPointer)
13065 CANONICAL_TYPE(Pipe)
13066 CANONICAL_TYPE(Pointer)
13067 CANONICAL_TYPE(Record)
13068 CANONICAL_TYPE(RValueReference)
13069 CANONICAL_TYPE(VariableArray)
13070 CANONICAL_TYPE(Vector)
13071#undef CANONICAL_TYPE
13072
13073#undef UNEXPECTED_TYPE
13074
13075 case Type::Adjusted: {
13076 const auto *AX = cast<AdjustedType>(X), *AY = cast<AdjustedType>(Y);
13077 QualType OX = AX->getOriginalType(), OY = AY->getOriginalType();
13078 if (!Ctx.hasSameType(T1: OX, T2: OY))
13079 return QualType();
13080 // FIXME: It's inefficient to have to unify the original types.
13081 return Ctx.getAdjustedType(Orig: Ctx.getCommonSugaredType(X: OX, Y: OY),
13082 New: Ctx.getQualifiedType(split: Underlying));
13083 }
13084 case Type::Decayed: {
13085 const auto *DX = cast<DecayedType>(X), *DY = cast<DecayedType>(Y);
13086 QualType OX = DX->getOriginalType(), OY = DY->getOriginalType();
13087 if (!Ctx.hasSameType(T1: OX, T2: OY))
13088 return QualType();
13089 // FIXME: It's inefficient to have to unify the original types.
13090 return Ctx.getDecayedType(Orig: Ctx.getCommonSugaredType(X: OX, Y: OY),
13091 Decayed: Ctx.getQualifiedType(split: Underlying));
13092 }
13093 case Type::Attributed: {
13094 const auto *AX = cast<AttributedType>(X), *AY = cast<AttributedType>(Y);
13095 AttributedType::Kind Kind = AX->getAttrKind();
13096 if (Kind != AY->getAttrKind())
13097 return QualType();
13098 QualType MX = AX->getModifiedType(), MY = AY->getModifiedType();
13099 if (!Ctx.hasSameType(T1: MX, T2: MY))
13100 return QualType();
13101 // FIXME: It's inefficient to have to unify the modified types.
13102 return Ctx.getAttributedType(attrKind: Kind, modifiedType: Ctx.getCommonSugaredType(X: MX, Y: MY),
13103 equivalentType: Ctx.getQualifiedType(split: Underlying));
13104 }
13105 case Type::BTFTagAttributed: {
13106 const auto *BX = cast<BTFTagAttributedType>(X);
13107 const BTFTypeTagAttr *AX = BX->getAttr();
13108 // The attribute is not uniqued, so just compare the tag.
13109 if (AX->getBTFTypeTag() !=
13110 cast<BTFTagAttributedType>(Y)->getAttr()->getBTFTypeTag())
13111 return QualType();
13112 return Ctx.getBTFTagAttributedType(BTFAttr: AX, Wrapped: Ctx.getQualifiedType(split: Underlying));
13113 }
13114 case Type::Auto: {
13115 const auto *AX = cast<AutoType>(X), *AY = cast<AutoType>(Y);
13116
13117 AutoTypeKeyword KW = AX->getKeyword();
13118 if (KW != AY->getKeyword())
13119 return QualType();
13120
13121 ConceptDecl *CD = ::getCommonDecl(AX->getTypeConstraintConcept(),
13122 AY->getTypeConstraintConcept());
13123 SmallVector<TemplateArgument, 8> As;
13124 if (CD &&
13125 getCommonTemplateArguments(Ctx, As, AX->getTypeConstraintArguments(),
13126 AY->getTypeConstraintArguments())) {
13127 CD = nullptr; // The arguments differ, so make it unconstrained.
13128 As.clear();
13129 }
13130
13131 // Both auto types can't be dependent, otherwise they wouldn't have been
13132 // sugar. This implies they can't contain unexpanded packs either.
13133 return Ctx.getAutoType(DeducedType: Ctx.getQualifiedType(split: Underlying), Keyword: AX->getKeyword(),
13134 /*IsDependent=*/false, /*IsPack=*/false, TypeConstraintConcept: CD, TypeConstraintArgs: As);
13135 }
13136 case Type::PackIndexing:
13137 case Type::Decltype:
13138 return QualType();
13139 case Type::DeducedTemplateSpecialization:
13140 // FIXME: Try to merge these.
13141 return QualType();
13142
13143 case Type::Elaborated: {
13144 const auto *EX = cast<ElaboratedType>(X), *EY = cast<ElaboratedType>(Y);
13145 return Ctx.getElaboratedType(
13146 Keyword: ::getCommonTypeKeyword(EX, EY), NNS: ::getCommonNNS(Ctx, EX, EY),
13147 NamedType: Ctx.getQualifiedType(split: Underlying),
13148 OwnedTagDecl: ::getCommonDecl(EX->getOwnedTagDecl(), EY->getOwnedTagDecl()));
13149 }
13150 case Type::MacroQualified: {
13151 const auto *MX = cast<MacroQualifiedType>(X),
13152 *MY = cast<MacroQualifiedType>(Y);
13153 const IdentifierInfo *IX = MX->getMacroIdentifier();
13154 if (IX != MY->getMacroIdentifier())
13155 return QualType();
13156 return Ctx.getMacroQualifiedType(UnderlyingTy: Ctx.getQualifiedType(split: Underlying), MacroII: IX);
13157 }
13158 case Type::SubstTemplateTypeParm: {
13159 const auto *SX = cast<SubstTemplateTypeParmType>(X),
13160 *SY = cast<SubstTemplateTypeParmType>(Y);
13161 Decl *CD =
13162 ::getCommonDecl(SX->getAssociatedDecl(), SY->getAssociatedDecl());
13163 if (!CD)
13164 return QualType();
13165 unsigned Index = SX->getIndex();
13166 if (Index != SY->getIndex())
13167 return QualType();
13168 auto PackIndex = SX->getPackIndex();
13169 if (PackIndex != SY->getPackIndex())
13170 return QualType();
13171 return Ctx.getSubstTemplateTypeParmType(Replacement: Ctx.getQualifiedType(split: Underlying),
13172 AssociatedDecl: CD, Index, PackIndex: PackIndex);
13173 }
13174 case Type::ObjCTypeParam:
13175 // FIXME: Try to merge these.
13176 return QualType();
13177 case Type::Paren:
13178 return Ctx.getParenType(InnerType: Ctx.getQualifiedType(split: Underlying));
13179
13180 case Type::TemplateSpecialization: {
13181 const auto *TX = cast<TemplateSpecializationType>(X),
13182 *TY = cast<TemplateSpecializationType>(Y);
13183 TemplateName CTN = ::getCommonTemplateName(Ctx, X: TX->getTemplateName(),
13184 Y: TY->getTemplateName());
13185 if (!CTN.getAsVoidPointer())
13186 return QualType();
13187 SmallVector<TemplateArgument, 8> Args;
13188 if (getCommonTemplateArguments(Ctx, Args, TX->template_arguments(),
13189 TY->template_arguments()))
13190 return QualType();
13191 return Ctx.getTemplateSpecializationType(CTN, Args,
13192 Ctx.getQualifiedType(split: Underlying));
13193 }
13194 case Type::Typedef: {
13195 const auto *TX = cast<TypedefType>(X), *TY = cast<TypedefType>(Y);
13196 const TypedefNameDecl *CD = ::getCommonDecl(TX->getDecl(), TY->getDecl());
13197 if (!CD)
13198 return QualType();
13199 return Ctx.getTypedefType(Decl: CD, Underlying: Ctx.getQualifiedType(split: Underlying));
13200 }
13201 case Type::TypeOf: {
13202 // The common sugar between two typeof expressions, where one is
13203 // potentially a typeof_unqual and the other is not, we unify to the
13204 // qualified type as that retains the most information along with the type.
13205 // We only return a typeof_unqual type when both types are unqual types.
13206 TypeOfKind Kind = TypeOfKind::Qualified;
13207 if (cast<TypeOfType>(X)->getKind() == cast<TypeOfType>(Y)->getKind() &&
13208 cast<TypeOfType>(X)->getKind() == TypeOfKind::Unqualified)
13209 Kind = TypeOfKind::Unqualified;
13210 return Ctx.getTypeOfType(tofType: Ctx.getQualifiedType(split: Underlying), Kind);
13211 }
13212 case Type::TypeOfExpr:
13213 return QualType();
13214
13215 case Type::UnaryTransform: {
13216 const auto *UX = cast<UnaryTransformType>(X),
13217 *UY = cast<UnaryTransformType>(Y);
13218 UnaryTransformType::UTTKind KX = UX->getUTTKind();
13219 if (KX != UY->getUTTKind())
13220 return QualType();
13221 QualType BX = UX->getBaseType(), BY = UY->getBaseType();
13222 if (!Ctx.hasSameType(T1: BX, T2: BY))
13223 return QualType();
13224 // FIXME: It's inefficient to have to unify the base types.
13225 return Ctx.getUnaryTransformType(BaseType: Ctx.getCommonSugaredType(X: BX, Y: BY),
13226 UnderlyingType: Ctx.getQualifiedType(split: Underlying), Kind: KX);
13227 }
13228 case Type::Using: {
13229 const auto *UX = cast<UsingType>(X), *UY = cast<UsingType>(Y);
13230 const UsingShadowDecl *CD =
13231 ::getCommonDecl(UX->getFoundDecl(), UY->getFoundDecl());
13232 if (!CD)
13233 return QualType();
13234 return Ctx.getUsingType(Found: CD, Underlying: Ctx.getQualifiedType(split: Underlying));
13235 }
13236 }
13237 llvm_unreachable("Unhandled Type Class");
13238}
13239
13240static auto unwrapSugar(SplitQualType &T, Qualifiers &QTotal) {
13241 SmallVector<SplitQualType, 8> R;
13242 while (true) {
13243 QTotal.addConsistentQualifiers(qs: T.Quals);
13244 QualType NT = T.Ty->getLocallyUnqualifiedSingleStepDesugaredType();
13245 if (NT == QualType(T.Ty, 0))
13246 break;
13247 R.push_back(Elt: T);
13248 T = NT.split();
13249 }
13250 return R;
13251}
13252
13253QualType ASTContext::getCommonSugaredType(QualType X, QualType Y,
13254 bool Unqualified) {
13255 assert(Unqualified ? hasSameUnqualifiedType(X, Y) : hasSameType(X, Y));
13256 if (X == Y)
13257 return X;
13258 if (!Unqualified) {
13259 if (X.isCanonical())
13260 return X;
13261 if (Y.isCanonical())
13262 return Y;
13263 }
13264
13265 SplitQualType SX = X.split(), SY = Y.split();
13266 Qualifiers QX, QY;
13267 // Desugar SX and SY, setting the sugar and qualifiers aside into Xs and Ys,
13268 // until we reach their underlying "canonical nodes". Note these are not
13269 // necessarily canonical types, as they may still have sugared properties.
13270 // QX and QY will store the sum of all qualifiers in Xs and Ys respectively.
13271 auto Xs = ::unwrapSugar(T&: SX, QTotal&: QX), Ys = ::unwrapSugar(T&: SY, QTotal&: QY);
13272 if (SX.Ty != SY.Ty) {
13273 // The canonical nodes differ. Build a common canonical node out of the two,
13274 // unifying their sugar. This may recurse back here.
13275 SX.Ty =
13276 ::getCommonNonSugarTypeNode(Ctx&: *this, X: SX.Ty, QX, Y: SY.Ty, QY).getTypePtr();
13277 } else {
13278 // The canonical nodes were identical: We may have desugared too much.
13279 // Add any common sugar back in.
13280 while (!Xs.empty() && !Ys.empty() && Xs.back().Ty == Ys.back().Ty) {
13281 QX -= SX.Quals;
13282 QY -= SY.Quals;
13283 SX = Xs.pop_back_val();
13284 SY = Ys.pop_back_val();
13285 }
13286 }
13287 if (Unqualified)
13288 QX = Qualifiers::removeCommonQualifiers(L&: QX, R&: QY);
13289 else
13290 assert(QX == QY);
13291
13292 // Even though the remaining sugar nodes in Xs and Ys differ, some may be
13293 // related. Walk up these nodes, unifying them and adding the result.
13294 while (!Xs.empty() && !Ys.empty()) {
13295 auto Underlying = SplitQualType(
13296 SX.Ty, Qualifiers::removeCommonQualifiers(L&: SX.Quals, R&: SY.Quals));
13297 SX = Xs.pop_back_val();
13298 SY = Ys.pop_back_val();
13299 SX.Ty = ::getCommonSugarTypeNode(Ctx&: *this, X: SX.Ty, Y: SY.Ty, Underlying)
13300 .getTypePtrOrNull();
13301 // Stop at the first pair which is unrelated.
13302 if (!SX.Ty) {
13303 SX.Ty = Underlying.Ty;
13304 break;
13305 }
13306 QX -= Underlying.Quals;
13307 };
13308
13309 // Add back the missing accumulated qualifiers, which were stripped off
13310 // with the sugar nodes we could not unify.
13311 QualType R = getQualifiedType(T: SX.Ty, Qs: QX);
13312 assert(Unqualified ? hasSameUnqualifiedType(R, X) : hasSameType(R, X));
13313 return R;
13314}
13315
13316QualType ASTContext::getCorrespondingSaturatedType(QualType Ty) const {
13317 assert(Ty->isFixedPointType());
13318
13319 if (Ty->isSaturatedFixedPointType()) return Ty;
13320
13321 switch (Ty->castAs<BuiltinType>()->getKind()) {
13322 default:
13323 llvm_unreachable("Not a fixed point type!");
13324 case BuiltinType::ShortAccum:
13325 return SatShortAccumTy;
13326 case BuiltinType::Accum:
13327 return SatAccumTy;
13328 case BuiltinType::LongAccum:
13329 return SatLongAccumTy;
13330 case BuiltinType::UShortAccum:
13331 return SatUnsignedShortAccumTy;
13332 case BuiltinType::UAccum:
13333 return SatUnsignedAccumTy;
13334 case BuiltinType::ULongAccum:
13335 return SatUnsignedLongAccumTy;
13336 case BuiltinType::ShortFract:
13337 return SatShortFractTy;
13338 case BuiltinType::Fract:
13339 return SatFractTy;
13340 case BuiltinType::LongFract:
13341 return SatLongFractTy;
13342 case BuiltinType::UShortFract:
13343 return SatUnsignedShortFractTy;
13344 case BuiltinType::UFract:
13345 return SatUnsignedFractTy;
13346 case BuiltinType::ULongFract:
13347 return SatUnsignedLongFractTy;
13348 }
13349}
13350
13351LangAS ASTContext::getLangASForBuiltinAddressSpace(unsigned AS) const {
13352 if (LangOpts.OpenCL)
13353 return getTargetInfo().getOpenCLBuiltinAddressSpace(AS);
13354
13355 if (LangOpts.CUDA)
13356 return getTargetInfo().getCUDABuiltinAddressSpace(AS);
13357
13358 return getLangASFromTargetAS(TargetAS: AS);
13359}
13360
13361// Explicitly instantiate this in case a Redeclarable<T> is used from a TU that
13362// doesn't include ASTContext.h
13363template
13364clang::LazyGenerationalUpdatePtr<
13365 const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::ValueType
13366clang::LazyGenerationalUpdatePtr<
13367 const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::makeValue(
13368 const clang::ASTContext &Ctx, Decl *Value);
13369
13370unsigned char ASTContext::getFixedPointScale(QualType Ty) const {
13371 assert(Ty->isFixedPointType());
13372
13373 const TargetInfo &Target = getTargetInfo();
13374 switch (Ty->castAs<BuiltinType>()->getKind()) {
13375 default:
13376 llvm_unreachable("Not a fixed point type!");
13377 case BuiltinType::ShortAccum:
13378 case BuiltinType::SatShortAccum:
13379 return Target.getShortAccumScale();
13380 case BuiltinType::Accum:
13381 case BuiltinType::SatAccum:
13382 return Target.getAccumScale();
13383 case BuiltinType::LongAccum:
13384 case BuiltinType::SatLongAccum:
13385 return Target.getLongAccumScale();
13386 case BuiltinType::UShortAccum:
13387 case BuiltinType::SatUShortAccum:
13388 return Target.getUnsignedShortAccumScale();
13389 case BuiltinType::UAccum:
13390 case BuiltinType::SatUAccum:
13391 return Target.getUnsignedAccumScale();
13392 case BuiltinType::ULongAccum:
13393 case BuiltinType::SatULongAccum:
13394 return Target.getUnsignedLongAccumScale();
13395 case BuiltinType::ShortFract:
13396 case BuiltinType::SatShortFract:
13397 return Target.getShortFractScale();
13398 case BuiltinType::Fract:
13399 case BuiltinType::SatFract:
13400 return Target.getFractScale();
13401 case BuiltinType::LongFract:
13402 case BuiltinType::SatLongFract:
13403 return Target.getLongFractScale();
13404 case BuiltinType::UShortFract:
13405 case BuiltinType::SatUShortFract:
13406 return Target.getUnsignedShortFractScale();
13407 case BuiltinType::UFract:
13408 case BuiltinType::SatUFract:
13409 return Target.getUnsignedFractScale();
13410 case BuiltinType::ULongFract:
13411 case BuiltinType::SatULongFract:
13412 return Target.getUnsignedLongFractScale();
13413 }
13414}
13415
13416unsigned char ASTContext::getFixedPointIBits(QualType Ty) const {
13417 assert(Ty->isFixedPointType());
13418
13419 const TargetInfo &Target = getTargetInfo();
13420 switch (Ty->castAs<BuiltinType>()->getKind()) {
13421 default:
13422 llvm_unreachable("Not a fixed point type!");
13423 case BuiltinType::ShortAccum:
13424 case BuiltinType::SatShortAccum:
13425 return Target.getShortAccumIBits();
13426 case BuiltinType::Accum:
13427 case BuiltinType::SatAccum:
13428 return Target.getAccumIBits();
13429 case BuiltinType::LongAccum:
13430 case BuiltinType::SatLongAccum:
13431 return Target.getLongAccumIBits();
13432 case BuiltinType::UShortAccum:
13433 case BuiltinType::SatUShortAccum:
13434 return Target.getUnsignedShortAccumIBits();
13435 case BuiltinType::UAccum:
13436 case BuiltinType::SatUAccum:
13437 return Target.getUnsignedAccumIBits();
13438 case BuiltinType::ULongAccum:
13439 case BuiltinType::SatULongAccum:
13440 return Target.getUnsignedLongAccumIBits();
13441 case BuiltinType::ShortFract:
13442 case BuiltinType::SatShortFract:
13443 case BuiltinType::Fract:
13444 case BuiltinType::SatFract:
13445 case BuiltinType::LongFract:
13446 case BuiltinType::SatLongFract:
13447 case BuiltinType::UShortFract:
13448 case BuiltinType::SatUShortFract:
13449 case BuiltinType::UFract:
13450 case BuiltinType::SatUFract:
13451 case BuiltinType::ULongFract:
13452 case BuiltinType::SatULongFract:
13453 return 0;
13454 }
13455}
13456
13457llvm::FixedPointSemantics
13458ASTContext::getFixedPointSemantics(QualType Ty) const {
13459 assert((Ty->isFixedPointType() || Ty->isIntegerType()) &&
13460 "Can only get the fixed point semantics for a "
13461 "fixed point or integer type.");
13462 if (Ty->isIntegerType())
13463 return llvm::FixedPointSemantics::GetIntegerSemantics(
13464 Width: getIntWidth(T: Ty), IsSigned: Ty->isSignedIntegerType());
13465
13466 bool isSigned = Ty->isSignedFixedPointType();
13467 return llvm::FixedPointSemantics(
13468 static_cast<unsigned>(getTypeSize(T: Ty)), getFixedPointScale(Ty), isSigned,
13469 Ty->isSaturatedFixedPointType(),
13470 !isSigned && getTargetInfo().doUnsignedFixedPointTypesHavePadding());
13471}
13472
13473llvm::APFixedPoint ASTContext::getFixedPointMax(QualType Ty) const {
13474 assert(Ty->isFixedPointType());
13475 return llvm::APFixedPoint::getMax(Sema: getFixedPointSemantics(Ty));
13476}
13477
13478llvm::APFixedPoint ASTContext::getFixedPointMin(QualType Ty) const {
13479 assert(Ty->isFixedPointType());
13480 return llvm::APFixedPoint::getMin(Sema: getFixedPointSemantics(Ty));
13481}
13482
13483QualType ASTContext::getCorrespondingSignedFixedPointType(QualType Ty) const {
13484 assert(Ty->isUnsignedFixedPointType() &&
13485 "Expected unsigned fixed point type");
13486
13487 switch (Ty->castAs<BuiltinType>()->getKind()) {
13488 case BuiltinType::UShortAccum:
13489 return ShortAccumTy;
13490 case BuiltinType::UAccum:
13491 return AccumTy;
13492 case BuiltinType::ULongAccum:
13493 return LongAccumTy;
13494 case BuiltinType::SatUShortAccum:
13495 return SatShortAccumTy;
13496 case BuiltinType::SatUAccum:
13497 return SatAccumTy;
13498 case BuiltinType::SatULongAccum:
13499 return SatLongAccumTy;
13500 case BuiltinType::UShortFract:
13501 return ShortFractTy;
13502 case BuiltinType::UFract:
13503 return FractTy;
13504 case BuiltinType::ULongFract:
13505 return LongFractTy;
13506 case BuiltinType::SatUShortFract:
13507 return SatShortFractTy;
13508 case BuiltinType::SatUFract:
13509 return SatFractTy;
13510 case BuiltinType::SatULongFract:
13511 return SatLongFractTy;
13512 default:
13513 llvm_unreachable("Unexpected unsigned fixed point type");
13514 }
13515}
13516
13517std::vector<std::string> ASTContext::filterFunctionTargetVersionAttrs(
13518 const TargetVersionAttr *TV) const {
13519 assert(TV != nullptr);
13520 llvm::SmallVector<StringRef, 8> Feats;
13521 std::vector<std::string> ResFeats;
13522 TV->getFeatures(Feats);
13523 for (auto &Feature : Feats)
13524 if (Target->validateCpuSupports(Name: Feature.str()))
13525 // Use '?' to mark features that came from TargetVersion.
13526 ResFeats.push_back(x: "?" + Feature.str());
13527 return ResFeats;
13528}
13529
13530ParsedTargetAttr
13531ASTContext::filterFunctionTargetAttrs(const TargetAttr *TD) const {
13532 assert(TD != nullptr);
13533 ParsedTargetAttr ParsedAttr = Target->parseTargetAttr(Str: TD->getFeaturesStr());
13534
13535 llvm::erase_if(C&: ParsedAttr.Features, P: [&](const std::string &Feat) {
13536 return !Target->isValidFeatureName(Feature: StringRef{Feat}.substr(Start: 1));
13537 });
13538 return ParsedAttr;
13539}
13540
13541void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap,
13542 const FunctionDecl *FD) const {
13543 if (FD)
13544 getFunctionFeatureMap(FeatureMap, GD: GlobalDecl().getWithDecl(FD));
13545 else
13546 Target->initFeatureMap(Features&: FeatureMap, Diags&: getDiagnostics(),
13547 CPU: Target->getTargetOpts().CPU,
13548 FeatureVec: Target->getTargetOpts().Features);
13549}
13550
13551// Fills in the supplied string map with the set of target features for the
13552// passed in function.
13553void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap,
13554 GlobalDecl GD) const {
13555 StringRef TargetCPU = Target->getTargetOpts().CPU;
13556 const FunctionDecl *FD = GD.getDecl()->getAsFunction();
13557 if (const auto *TD = FD->getAttr<TargetAttr>()) {
13558 ParsedTargetAttr ParsedAttr = filterFunctionTargetAttrs(TD: TD);
13559
13560 // Make a copy of the features as passed on the command line into the
13561 // beginning of the additional features from the function to override.
13562 ParsedAttr.Features.insert(
13563 position: ParsedAttr.Features.begin(),
13564 first: Target->getTargetOpts().FeaturesAsWritten.begin(),
13565 last: Target->getTargetOpts().FeaturesAsWritten.end());
13566
13567 if (ParsedAttr.CPU != "" && Target->isValidCPUName(Name: ParsedAttr.CPU))
13568 TargetCPU = ParsedAttr.CPU;
13569
13570 // Now populate the feature map, first with the TargetCPU which is either
13571 // the default or a new one from the target attribute string. Then we'll use
13572 // the passed in features (FeaturesAsWritten) along with the new ones from
13573 // the attribute.
13574 Target->initFeatureMap(Features&: FeatureMap, Diags&: getDiagnostics(), CPU: TargetCPU,
13575 FeatureVec: ParsedAttr.Features);
13576 } else if (const auto *SD = FD->getAttr<CPUSpecificAttr>()) {
13577 llvm::SmallVector<StringRef, 32> FeaturesTmp;
13578 Target->getCPUSpecificCPUDispatchFeatures(
13579 Name: SD->getCPUName(GD.getMultiVersionIndex())->getName(), Features&: FeaturesTmp);
13580 std::vector<std::string> Features(FeaturesTmp.begin(), FeaturesTmp.end());
13581 Features.insert(position: Features.begin(),
13582 first: Target->getTargetOpts().FeaturesAsWritten.begin(),
13583 last: Target->getTargetOpts().FeaturesAsWritten.end());
13584 Target->initFeatureMap(Features&: FeatureMap, Diags&: getDiagnostics(), CPU: TargetCPU, FeatureVec: Features);
13585 } else if (const auto *TC = FD->getAttr<TargetClonesAttr>()) {
13586 std::vector<std::string> Features;
13587 StringRef VersionStr = TC->getFeatureStr(GD.getMultiVersionIndex());
13588 if (Target->getTriple().isAArch64()) {
13589 // TargetClones for AArch64
13590 if (VersionStr != "default") {
13591 SmallVector<StringRef, 1> VersionFeatures;
13592 VersionStr.split(A&: VersionFeatures, Separator: "+");
13593 for (auto &VFeature : VersionFeatures) {
13594 VFeature = VFeature.trim();
13595 // Use '?' to mark features that came from AArch64 TargetClones.
13596 Features.push_back(x: (StringRef{"?"} + VFeature).str());
13597 }
13598 }
13599 Features.insert(position: Features.begin(),
13600 first: Target->getTargetOpts().FeaturesAsWritten.begin(),
13601 last: Target->getTargetOpts().FeaturesAsWritten.end());
13602 } else {
13603 if (VersionStr.starts_with(Prefix: "arch="))
13604 TargetCPU = VersionStr.drop_front(N: sizeof("arch=") - 1);
13605 else if (VersionStr != "default")
13606 Features.push_back(x: (StringRef{"+"} + VersionStr).str());
13607 }
13608 Target->initFeatureMap(Features&: FeatureMap, Diags&: getDiagnostics(), CPU: TargetCPU, FeatureVec: Features);
13609 } else if (const auto *TV = FD->getAttr<TargetVersionAttr>()) {
13610 std::vector<std::string> Feats = filterFunctionTargetVersionAttrs(TV);
13611 Feats.insert(position: Feats.begin(),
13612 first: Target->getTargetOpts().FeaturesAsWritten.begin(),
13613 last: Target->getTargetOpts().FeaturesAsWritten.end());
13614 Target->initFeatureMap(Features&: FeatureMap, Diags&: getDiagnostics(), CPU: TargetCPU, FeatureVec: Feats);
13615 } else {
13616 FeatureMap = Target->getTargetOpts().FeatureMap;
13617 }
13618}
13619
13620OMPTraitInfo &ASTContext::getNewOMPTraitInfo() {
13621 OMPTraitInfoVector.emplace_back(Args: new OMPTraitInfo());
13622 return *OMPTraitInfoVector.back();
13623}
13624
13625const StreamingDiagnostic &clang::
13626operator<<(const StreamingDiagnostic &DB,
13627 const ASTContext::SectionInfo &Section) {
13628 if (Section.Decl)
13629 return DB << Section.Decl;
13630 return DB << "a prior #pragma section";
13631}
13632
13633bool ASTContext::mayExternalize(const Decl *D) const {
13634 bool IsInternalVar =
13635 isa<VarDecl>(Val: D) &&
13636 basicGVALinkageForVariable(Context: *this, VD: cast<VarDecl>(Val: D)) == GVA_Internal;
13637 bool IsExplicitDeviceVar = (D->hasAttr<CUDADeviceAttr>() &&
13638 !D->getAttr<CUDADeviceAttr>()->isImplicit()) ||
13639 (D->hasAttr<CUDAConstantAttr>() &&
13640 !D->getAttr<CUDAConstantAttr>()->isImplicit());
13641 // CUDA/HIP: managed variables need to be externalized since it is
13642 // a declaration in IR, therefore cannot have internal linkage. Kernels in
13643 // anonymous name space needs to be externalized to avoid duplicate symbols.
13644 return (IsInternalVar &&
13645 (D->hasAttr<HIPManagedAttr>() || IsExplicitDeviceVar)) ||
13646 (D->hasAttr<CUDAGlobalAttr>() &&
13647 basicGVALinkageForFunction(*this, cast<FunctionDecl>(D)) ==
13648 GVA_Internal);
13649}
13650
13651bool ASTContext::shouldExternalize(const Decl *D) const {
13652 return mayExternalize(D) &&
13653 (D->hasAttr<HIPManagedAttr>() || D->hasAttr<CUDAGlobalAttr>() ||
13654 CUDADeviceVarODRUsedByHost.count(cast<VarDecl>(D)));
13655}
13656
13657StringRef ASTContext::getCUIDHash() const {
13658 if (!CUIDHash.empty())
13659 return CUIDHash;
13660 if (LangOpts.CUID.empty())
13661 return StringRef();
13662 CUIDHash = llvm::utohexstr(X: llvm::MD5Hash(Str: LangOpts.CUID), /*LowerCase=*/true);
13663 return CUIDHash;
13664}
13665

source code of clang/lib/AST/ASTContext.cpp