1//===--- CodeGenModule.cpp - Emit LLVM Code from ASTs for a Module --------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This coordinates the per-module state used while generating code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CodeGenModule.h"
14#include "CGBlocks.h"
15#include "CGCUDARuntime.h"
16#include "CGCXXABI.h"
17#include "CGCall.h"
18#include "CGDebugInfo.h"
19#include "CGObjCRuntime.h"
20#include "CGOpenCLRuntime.h"
21#include "CGOpenMPRuntime.h"
22#include "CGOpenMPRuntimeAMDGCN.h"
23#include "CGOpenMPRuntimeNVPTX.h"
24#include "CodeGenFunction.h"
25#include "CodeGenPGO.h"
26#include "ConstantEmitter.h"
27#include "CoverageMappingGen.h"
28#include "TargetInfo.h"
29#include "clang/AST/ASTContext.h"
30#include "clang/AST/CharUnits.h"
31#include "clang/AST/DeclCXX.h"
32#include "clang/AST/DeclObjC.h"
33#include "clang/AST/DeclTemplate.h"
34#include "clang/AST/Mangle.h"
35#include "clang/AST/RecordLayout.h"
36#include "clang/AST/RecursiveASTVisitor.h"
37#include "clang/AST/StmtVisitor.h"
38#include "clang/Basic/Builtins.h"
39#include "clang/Basic/CharInfo.h"
40#include "clang/Basic/CodeGenOptions.h"
41#include "clang/Basic/Diagnostic.h"
42#include "clang/Basic/FileManager.h"
43#include "clang/Basic/Module.h"
44#include "clang/Basic/SourceManager.h"
45#include "clang/Basic/TargetInfo.h"
46#include "clang/Basic/Version.h"
47#include "clang/CodeGen/ConstantInitBuilder.h"
48#include "clang/Frontend/FrontendDiagnostic.h"
49#include "llvm/ADT/StringSwitch.h"
50#include "llvm/ADT/Triple.h"
51#include "llvm/Analysis/TargetLibraryInfo.h"
52#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
53#include "llvm/IR/CallingConv.h"
54#include "llvm/IR/DataLayout.h"
55#include "llvm/IR/Intrinsics.h"
56#include "llvm/IR/LLVMContext.h"
57#include "llvm/IR/Module.h"
58#include "llvm/IR/ProfileSummary.h"
59#include "llvm/ProfileData/InstrProfReader.h"
60#include "llvm/Support/CodeGen.h"
61#include "llvm/Support/CommandLine.h"
62#include "llvm/Support/ConvertUTF.h"
63#include "llvm/Support/ErrorHandling.h"
64#include "llvm/Support/MD5.h"
65#include "llvm/Support/TimeProfiler.h"
66
67using namespace clang;
68using namespace CodeGen;
69
70static llvm::cl::opt<bool> LimitedCoverage(
71 "limited-coverage-experimental", llvm::cl::ZeroOrMore, llvm::cl::Hidden,
72 llvm::cl::desc("Emit limited coverage mapping information (experimental)"),
73 llvm::cl::init(false));
74
75static const char AnnotationSection[] = "llvm.metadata";
76
77static CGCXXABI *createCXXABI(CodeGenModule &CGM) {
78 switch (CGM.getContext().getCXXABIKind()) {
79 case TargetCXXABI::AppleARM64:
80 case TargetCXXABI::Fuchsia:
81 case TargetCXXABI::GenericAArch64:
82 case TargetCXXABI::GenericARM:
83 case TargetCXXABI::iOS:
84 case TargetCXXABI::WatchOS:
85 case TargetCXXABI::GenericMIPS:
86 case TargetCXXABI::GenericItanium:
87 case TargetCXXABI::WebAssembly:
88 case TargetCXXABI::XL:
89 return CreateItaniumCXXABI(CGM);
90 case TargetCXXABI::Microsoft:
91 return CreateMicrosoftCXXABI(CGM);
92 }
93
94 llvm_unreachable("invalid C++ ABI kind");
95}
96
97CodeGenModule::CodeGenModule(ASTContext &C, const HeaderSearchOptions &HSO,
98 const PreprocessorOptions &PPO,
99 const CodeGenOptions &CGO, llvm::Module &M,
100 DiagnosticsEngine &diags,
101 CoverageSourceInfo *CoverageInfo)
102 : Context(C), LangOpts(C.getLangOpts()), HeaderSearchOpts(HSO),
103 PreprocessorOpts(PPO), CodeGenOpts(CGO), TheModule(M), Diags(diags),
104 Target(C.getTargetInfo()), ABI(createCXXABI(*this)),
105 VMContext(M.getContext()), Types(*this), VTables(*this),
106 SanitizerMD(new SanitizerMetadata(*this)) {
107
108 // Initialize the type cache.
109 llvm::LLVMContext &LLVMContext = M.getContext();
110 VoidTy = llvm::Type::getVoidTy(LLVMContext);
111 Int8Ty = llvm::Type::getInt8Ty(LLVMContext);
112 Int16Ty = llvm::Type::getInt16Ty(LLVMContext);
113 Int32Ty = llvm::Type::getInt32Ty(LLVMContext);
114 Int64Ty = llvm::Type::getInt64Ty(LLVMContext);
115 HalfTy = llvm::Type::getHalfTy(LLVMContext);
116 BFloatTy = llvm::Type::getBFloatTy(LLVMContext);
117 FloatTy = llvm::Type::getFloatTy(LLVMContext);
118 DoubleTy = llvm::Type::getDoubleTy(LLVMContext);
119 PointerWidthInBits = C.getTargetInfo().getPointerWidth(0);
120 PointerAlignInBytes =
121 C.toCharUnitsFromBits(C.getTargetInfo().getPointerAlign(0)).getQuantity();
122 SizeSizeInBytes =
123 C.toCharUnitsFromBits(C.getTargetInfo().getMaxPointerWidth()).getQuantity();
124 IntAlignInBytes =
125 C.toCharUnitsFromBits(C.getTargetInfo().getIntAlign()).getQuantity();
126 CharTy =
127 llvm::IntegerType::get(LLVMContext, C.getTargetInfo().getCharWidth());
128 IntTy = llvm::IntegerType::get(LLVMContext, C.getTargetInfo().getIntWidth());
129 IntPtrTy = llvm::IntegerType::get(LLVMContext,
130 C.getTargetInfo().getMaxPointerWidth());
131 Int8PtrTy = Int8Ty->getPointerTo(0);
132 Int8PtrPtrTy = Int8PtrTy->getPointerTo(0);
133 AllocaInt8PtrTy = Int8Ty->getPointerTo(
134 M.getDataLayout().getAllocaAddrSpace());
135 ASTAllocaAddressSpace = getTargetCodeGenInfo().getASTAllocaAddressSpace();
136
137 RuntimeCC = getTargetCodeGenInfo().getABIInfo().getRuntimeCC();
138
139 if (LangOpts.ObjC)
140 createObjCRuntime();
141 if (LangOpts.OpenCL)
142 createOpenCLRuntime();
143 if (LangOpts.OpenMP)
144 createOpenMPRuntime();
145 if (LangOpts.CUDA)
146 createCUDARuntime();
147
148 // Enable TBAA unless it's suppressed. ThreadSanitizer needs TBAA even at O0.
149 if (LangOpts.Sanitize.has(SanitizerKind::Thread) ||
150 (!CodeGenOpts.RelaxedAliasing && CodeGenOpts.OptimizationLevel > 0))
151 TBAA.reset(new CodeGenTBAA(Context, TheModule, CodeGenOpts, getLangOpts(),
152 getCXXABI().getMangleContext()));
153
154 // If debug info or coverage generation is enabled, create the CGDebugInfo
155 // object.
156 if (CodeGenOpts.getDebugInfo() != codegenoptions::NoDebugInfo ||
157 CodeGenOpts.EmitGcovArcs || CodeGenOpts.EmitGcovNotes)
158 DebugInfo.reset(new CGDebugInfo(*this));
159
160 Block.GlobalUniqueCount = 0;
161
162 if (C.getLangOpts().ObjC)
163 ObjCData.reset(new ObjCEntrypoints());
164
165 if (CodeGenOpts.hasProfileClangUse()) {
166 auto ReaderOrErr = llvm::IndexedInstrProfReader::create(
167 CodeGenOpts.ProfileInstrumentUsePath, CodeGenOpts.ProfileRemappingFile);
168 if (auto E = ReaderOrErr.takeError()) {
169 unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
170 "Could not read profile %0: %1");
171 llvm::handleAllErrors(std::move(E), [&](const llvm::ErrorInfoBase &EI) {
172 getDiags().Report(DiagID) << CodeGenOpts.ProfileInstrumentUsePath
173 << EI.message();
174 });
175 } else
176 PGOReader = std::move(ReaderOrErr.get());
177 }
178
179 // If coverage mapping generation is enabled, create the
180 // CoverageMappingModuleGen object.
181 if (CodeGenOpts.CoverageMapping)
182 CoverageMapping.reset(new CoverageMappingModuleGen(*this, *CoverageInfo));
183
184 // Generate the module name hash here if needed.
185 if (CodeGenOpts.UniqueInternalLinkageNames &&
186 !getModule().getSourceFileName().empty()) {
187 std::string Path = getModule().getSourceFileName();
188 // Check if a path substitution is needed from the MacroPrefixMap.
189 for (const auto &Entry : PPO.MacroPrefixMap)
190 if (Path.rfind(Entry.first, 0) != std::string::npos) {
191 Path = Entry.second + Path.substr(Entry.first.size());
192 break;
193 }
194 llvm::MD5 Md5;
195 Md5.update(Path);
196 llvm::MD5::MD5Result R;
197 Md5.final(R);
198 SmallString<32> Str;
199 llvm::MD5::stringifyResult(R, Str);
200 // Convert MD5hash to Decimal. Demangler suffixes can either contain
201 // numbers or characters but not both.
202 llvm::APInt IntHash(128, Str.str(), 16);
203 // Prepend "__uniq" before the hash for tools like profilers to understand
204 // that this symbol is of internal linkage type. The "__uniq" is the
205 // pre-determined prefix that is used to tell tools that this symbol was
206 // created with -funique-internal-linakge-symbols and the tools can strip or
207 // keep the prefix as needed.
208 ModuleNameHash = (Twine(".__uniq.") +
209 Twine(IntHash.toString(/* Radix = */ 10, /* Signed = */false))).str();
210 }
211}
212
213CodeGenModule::~CodeGenModule() {}
214
215void CodeGenModule::createObjCRuntime() {
216 // This is just isGNUFamily(), but we want to force implementors of
217 // new ABIs to decide how best to do this.
218 switch (LangOpts.ObjCRuntime.getKind()) {
219 case ObjCRuntime::GNUstep:
220 case ObjCRuntime::GCC:
221 case ObjCRuntime::ObjFW:
222 ObjCRuntime.reset(CreateGNUObjCRuntime(*this));
223 return;
224
225 case ObjCRuntime::FragileMacOSX:
226 case ObjCRuntime::MacOSX:
227 case ObjCRuntime::iOS:
228 case ObjCRuntime::WatchOS:
229 ObjCRuntime.reset(CreateMacObjCRuntime(*this));
230 return;
231 }
232 llvm_unreachable("bad runtime kind");
233}
234
235void CodeGenModule::createOpenCLRuntime() {
236 OpenCLRuntime.reset(new CGOpenCLRuntime(*this));
237}
238
239void CodeGenModule::createOpenMPRuntime() {
240 // Select a specialized code generation class based on the target, if any.
241 // If it does not exist use the default implementation.
242 switch (getTriple().getArch()) {
243 case llvm::Triple::nvptx:
244 case llvm::Triple::nvptx64:
245 assert(getLangOpts().OpenMPIsDevice &&
246 "OpenMP NVPTX is only prepared to deal with device code.");
247 OpenMPRuntime.reset(new CGOpenMPRuntimeNVPTX(*this));
248 break;
249 case llvm::Triple::amdgcn:
250 assert(getLangOpts().OpenMPIsDevice &&
251 "OpenMP AMDGCN is only prepared to deal with device code.");
252 OpenMPRuntime.reset(new CGOpenMPRuntimeAMDGCN(*this));
253 break;
254 default:
255 if (LangOpts.OpenMPSimd)
256 OpenMPRuntime.reset(new CGOpenMPSIMDRuntime(*this));
257 else
258 OpenMPRuntime.reset(new CGOpenMPRuntime(*this));
259 break;
260 }
261}
262
263void CodeGenModule::createCUDARuntime() {
264 CUDARuntime.reset(CreateNVCUDARuntime(*this));
265}
266
267void CodeGenModule::addReplacement(StringRef Name, llvm::Constant *C) {
268 Replacements[Name] = C;
269}
270
271void CodeGenModule::applyReplacements() {
272 for (auto &I : Replacements) {
273 StringRef MangledName = I.first();
274 llvm::Constant *Replacement = I.second;
275 llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
276 if (!Entry)
277 continue;
278 auto *OldF = cast<llvm::Function>(Entry);
279 auto *NewF = dyn_cast<llvm::Function>(Replacement);
280 if (!NewF) {
281 if (auto *Alias = dyn_cast<llvm::GlobalAlias>(Replacement)) {
282 NewF = dyn_cast<llvm::Function>(Alias->getAliasee());
283 } else {
284 auto *CE = cast<llvm::ConstantExpr>(Replacement);
285 assert(CE->getOpcode() == llvm::Instruction::BitCast ||
286 CE->getOpcode() == llvm::Instruction::GetElementPtr);
287 NewF = dyn_cast<llvm::Function>(CE->getOperand(0));
288 }
289 }
290
291 // Replace old with new, but keep the old order.
292 OldF->replaceAllUsesWith(Replacement);
293 if (NewF) {
294 NewF->removeFromParent();
295 OldF->getParent()->getFunctionList().insertAfter(OldF->getIterator(),
296 NewF);
297 }
298 OldF->eraseFromParent();
299 }
300}
301
302void CodeGenModule::addGlobalValReplacement(llvm::GlobalValue *GV, llvm::Constant *C) {
303 GlobalValReplacements.push_back(std::make_pair(GV, C));
304}
305
306void CodeGenModule::applyGlobalValReplacements() {
307 for (auto &I : GlobalValReplacements) {
308 llvm::GlobalValue *GV = I.first;
309 llvm::Constant *C = I.second;
310
311 GV->replaceAllUsesWith(C);
312 GV->eraseFromParent();
313 }
314}
315
316// This is only used in aliases that we created and we know they have a
317// linear structure.
318static const llvm::GlobalObject *getAliasedGlobal(
319 const llvm::GlobalIndirectSymbol &GIS) {
320 llvm::SmallPtrSet<const llvm::GlobalIndirectSymbol*, 4> Visited;
321 const llvm::Constant *C = &GIS;
322 for (;;) {
323 C = C->stripPointerCasts();
324 if (auto *GO = dyn_cast<llvm::GlobalObject>(C))
325 return GO;
326 // stripPointerCasts will not walk over weak aliases.
327 auto *GIS2 = dyn_cast<llvm::GlobalIndirectSymbol>(C);
328 if (!GIS2)
329 return nullptr;
330 if (!Visited.insert(GIS2).second)
331 return nullptr;
332 C = GIS2->getIndirectSymbol();
333 }
334}
335
336void CodeGenModule::checkAliases() {
337 // Check if the constructed aliases are well formed. It is really unfortunate
338 // that we have to do this in CodeGen, but we only construct mangled names
339 // and aliases during codegen.
340 bool Error = false;
341 DiagnosticsEngine &Diags = getDiags();
342 for (const GlobalDecl &GD : Aliases) {
343 const auto *D = cast<ValueDecl>(GD.getDecl());
344 SourceLocation Location;
345 bool IsIFunc = D->hasAttr<IFuncAttr>();
346 if (const Attr *A = D->getDefiningAttr())
347 Location = A->getLocation();
348 else
349 llvm_unreachable("Not an alias or ifunc?");
350 StringRef MangledName = getMangledName(GD);
351 llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
352 auto *Alias = cast<llvm::GlobalIndirectSymbol>(Entry);
353 const llvm::GlobalValue *GV = getAliasedGlobal(*Alias);
354 if (!GV) {
355 Error = true;
356 Diags.Report(Location, diag::err_cyclic_alias) << IsIFunc;
357 } else if (GV->isDeclaration()) {
358 Error = true;
359 Diags.Report(Location, diag::err_alias_to_undefined)
360 << IsIFunc << IsIFunc;
361 } else if (IsIFunc) {
362 // Check resolver function type.
363 llvm::FunctionType *FTy = dyn_cast<llvm::FunctionType>(
364 GV->getType()->getPointerElementType());
365 assert(FTy);
366 if (!FTy->getReturnType()->isPointerTy())
367 Diags.Report(Location, diag::err_ifunc_resolver_return);
368 }
369
370 llvm::Constant *Aliasee = Alias->getIndirectSymbol();
371 llvm::GlobalValue *AliaseeGV;
372 if (auto CE = dyn_cast<llvm::ConstantExpr>(Aliasee))
373 AliaseeGV = cast<llvm::GlobalValue>(CE->getOperand(0));
374 else
375 AliaseeGV = cast<llvm::GlobalValue>(Aliasee);
376
377 if (const SectionAttr *SA = D->getAttr<SectionAttr>()) {
378 StringRef AliasSection = SA->getName();
379 if (AliasSection != AliaseeGV->getSection())
380 Diags.Report(SA->getLocation(), diag::warn_alias_with_section)
381 << AliasSection << IsIFunc << IsIFunc;
382 }
383
384 // We have to handle alias to weak aliases in here. LLVM itself disallows
385 // this since the object semantics would not match the IL one. For
386 // compatibility with gcc we implement it by just pointing the alias
387 // to its aliasee's aliasee. We also warn, since the user is probably
388 // expecting the link to be weak.
389 if (auto GA = dyn_cast<llvm::GlobalIndirectSymbol>(AliaseeGV)) {
390 if (GA->isInterposable()) {
391 Diags.Report(Location, diag::warn_alias_to_weak_alias)
392 << GV->getName() << GA->getName() << IsIFunc;
393 Aliasee = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
394 GA->getIndirectSymbol(), Alias->getType());
395 Alias->setIndirectSymbol(Aliasee);
396 }
397 }
398 }
399 if (!Error)
400 return;
401
402 for (const GlobalDecl &GD : Aliases) {
403 StringRef MangledName = getMangledName(GD);
404 llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
405 auto *Alias = cast<llvm::GlobalIndirectSymbol>(Entry);
406 Alias->replaceAllUsesWith(llvm::UndefValue::get(Alias->getType()));
407 Alias->eraseFromParent();
408 }
409}
410
411void CodeGenModule::clear() {
412 DeferredDeclsToEmit.clear();
413 if (OpenMPRuntime)
414 OpenMPRuntime->clear();
415}
416
417void InstrProfStats::reportDiagnostics(DiagnosticsEngine &Diags,
418 StringRef MainFile) {
419 if (!hasDiagnostics())
420 return;
421 if (VisitedInMainFile > 0 && VisitedInMainFile == MissingInMainFile) {
422 if (MainFile.empty())
423 MainFile = "<stdin>";
424 Diags.Report(diag::warn_profile_data_unprofiled) << MainFile;
425 } else {
426 if (Mismatched > 0)
427 Diags.Report(diag::warn_profile_data_out_of_date) << Visited << Mismatched;
428
429 if (Missing > 0)
430 Diags.Report(diag::warn_profile_data_missing) << Visited << Missing;
431 }
432}
433
434static void setVisibilityFromDLLStorageClass(const clang::LangOptions &LO,
435 llvm::Module &M) {
436 if (!LO.VisibilityFromDLLStorageClass)
437 return;
438
439 llvm::GlobalValue::VisibilityTypes DLLExportVisibility =
440 CodeGenModule::GetLLVMVisibility(LO.getDLLExportVisibility());
441 llvm::GlobalValue::VisibilityTypes NoDLLStorageClassVisibility =
442 CodeGenModule::GetLLVMVisibility(LO.getNoDLLStorageClassVisibility());
443 llvm::GlobalValue::VisibilityTypes ExternDeclDLLImportVisibility =
444 CodeGenModule::GetLLVMVisibility(LO.getExternDeclDLLImportVisibility());
445 llvm::GlobalValue::VisibilityTypes ExternDeclNoDLLStorageClassVisibility =
446 CodeGenModule::GetLLVMVisibility(
447 LO.getExternDeclNoDLLStorageClassVisibility());
448
449 for (llvm::GlobalValue &GV : M.global_values()) {
450 if (GV.hasAppendingLinkage() || GV.hasLocalLinkage())
451 continue;
452
453 // Reset DSO locality before setting the visibility. This removes
454 // any effects that visibility options and annotations may have
455 // had on the DSO locality. Setting the visibility will implicitly set
456 // appropriate globals to DSO Local; however, this will be pessimistic
457 // w.r.t. to the normal compiler IRGen.
458 GV.setDSOLocal(false);
459
460 if (GV.isDeclarationForLinker()) {
461 GV.setVisibility(GV.getDLLStorageClass() ==
462 llvm::GlobalValue::DLLImportStorageClass
463 ? ExternDeclDLLImportVisibility
464 : ExternDeclNoDLLStorageClassVisibility);
465 } else {
466 GV.setVisibility(GV.getDLLStorageClass() ==
467 llvm::GlobalValue::DLLExportStorageClass
468 ? DLLExportVisibility
469 : NoDLLStorageClassVisibility);
470 }
471
472 GV.setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
473 }
474}
475
476void CodeGenModule::Release() {
477 EmitDeferred();
478 EmitVTablesOpportunistically();
479 applyGlobalValReplacements();
480 applyReplacements();
481 checkAliases();
482 emitMultiVersionFunctions();
483 EmitCXXGlobalInitFunc();
484 EmitCXXGlobalCleanUpFunc();
485 registerGlobalDtorsWithAtExit();
486 EmitCXXThreadLocalInitFunc();
487 if (ObjCRuntime)
488 if (llvm::Function *ObjCInitFunction = ObjCRuntime->ModuleInitFunction())
489 AddGlobalCtor(ObjCInitFunction);
490 if (Context.getLangOpts().CUDA && CUDARuntime) {
491 if (llvm::Function *CudaCtorFunction = CUDARuntime->finalizeModule())
492 AddGlobalCtor(CudaCtorFunction);
493 }
494 if (OpenMPRuntime) {
495 if (llvm::Function *OpenMPRequiresDirectiveRegFun =
496 OpenMPRuntime->emitRequiresDirectiveRegFun()) {
497 AddGlobalCtor(OpenMPRequiresDirectiveRegFun, 0);
498 }
499 OpenMPRuntime->createOffloadEntriesAndInfoMetadata();
500 OpenMPRuntime->clear();
501 }
502 if (PGOReader) {
503 getModule().setProfileSummary(
504 PGOReader->getSummary(/* UseCS */ false).getMD(VMContext),
505 llvm::ProfileSummary::PSK_Instr);
506 if (PGOStats.hasDiagnostics())
507 PGOStats.reportDiagnostics(getDiags(), getCodeGenOpts().MainFileName);
508 }
509 EmitCtorList(GlobalCtors, "llvm.global_ctors");
510 EmitCtorList(GlobalDtors, "llvm.global_dtors");
511 EmitGlobalAnnotations();
512 EmitStaticExternCAliases();
513 EmitDeferredUnusedCoverageMappings();
514 if (CoverageMapping)
515 CoverageMapping->emit();
516 if (CodeGenOpts.SanitizeCfiCrossDso) {
517 CodeGenFunction(*this).EmitCfiCheckFail();
518 CodeGenFunction(*this).EmitCfiCheckStub();
519 }
520 emitAtAvailableLinkGuard();
521 if (Context.getTargetInfo().getTriple().isWasm() &&
522 !Context.getTargetInfo().getTriple().isOSEmscripten()) {
523 EmitMainVoidAlias();
524 }
525 emitLLVMUsed();
526 if (SanStats)
527 SanStats->finish();
528
529 if (CodeGenOpts.Autolink &&
530 (Context.getLangOpts().Modules || !LinkerOptionsMetadata.empty())) {
531 EmitModuleLinkOptions();
532 }
533
534 // On ELF we pass the dependent library specifiers directly to the linker
535 // without manipulating them. This is in contrast to other platforms where
536 // they are mapped to a specific linker option by the compiler. This
537 // difference is a result of the greater variety of ELF linkers and the fact
538 // that ELF linkers tend to handle libraries in a more complicated fashion
539 // than on other platforms. This forces us to defer handling the dependent
540 // libs to the linker.
541 //
542 // CUDA/HIP device and host libraries are different. Currently there is no
543 // way to differentiate dependent libraries for host or device. Existing
544 // usage of #pragma comment(lib, *) is intended for host libraries on
545 // Windows. Therefore emit llvm.dependent-libraries only for host.
546 if (!ELFDependentLibraries.empty() && !Context.getLangOpts().CUDAIsDevice) {
547 auto *NMD = getModule().getOrInsertNamedMetadata("llvm.dependent-libraries");
548 for (auto *MD : ELFDependentLibraries)
549 NMD->addOperand(MD);
550 }
551
552 // Record mregparm value now so it is visible through rest of codegen.
553 if (Context.getTargetInfo().getTriple().getArch() == llvm::Triple::x86)
554 getModule().addModuleFlag(llvm::Module::Error, "NumRegisterParameters",
555 CodeGenOpts.NumRegisterParameters);
556
557 if (CodeGenOpts.DwarfVersion) {
558 getModule().addModuleFlag(llvm::Module::Max, "Dwarf Version",
559 CodeGenOpts.DwarfVersion);
560 }
561
562 if (CodeGenOpts.Dwarf64)
563 getModule().addModuleFlag(llvm::Module::Max, "DWARF64", 1);
564
565 if (Context.getLangOpts().SemanticInterposition)
566 // Require various optimization to respect semantic interposition.
567 getModule().setSemanticInterposition(1);
568
569 if (CodeGenOpts.EmitCodeView) {
570 // Indicate that we want CodeView in the metadata.
571 getModule().addModuleFlag(llvm::Module::Warning, "CodeView", 1);
572 }
573 if (CodeGenOpts.CodeViewGHash) {
574 getModule().addModuleFlag(llvm::Module::Warning, "CodeViewGHash", 1);
575 }
576 if (CodeGenOpts.ControlFlowGuard) {
577 // Function ID tables and checks for Control Flow Guard (cfguard=2).
578 getModule().addModuleFlag(llvm::Module::Warning, "cfguard", 2);
579 } else if (CodeGenOpts.ControlFlowGuardNoChecks) {
580 // Function ID tables for Control Flow Guard (cfguard=1).
581 getModule().addModuleFlag(llvm::Module::Warning, "cfguard", 1);
582 }
583 if (CodeGenOpts.EHContGuard) {
584 // Function ID tables for EH Continuation Guard.
585 getModule().addModuleFlag(llvm::Module::Warning, "ehcontguard", 1);
586 }
587 if (CodeGenOpts.OptimizationLevel > 0 && CodeGenOpts.StrictVTablePointers) {
588 // We don't support LTO with 2 with different StrictVTablePointers
589 // FIXME: we could support it by stripping all the information introduced
590 // by StrictVTablePointers.
591
592 getModule().addModuleFlag(llvm::Module::Error, "StrictVTablePointers",1);
593
594 llvm::Metadata *Ops[2] = {
595 llvm::MDString::get(VMContext, "StrictVTablePointers"),
596 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
597 llvm::Type::getInt32Ty(VMContext), 1))};
598
599 getModule().addModuleFlag(llvm::Module::Require,
600 "StrictVTablePointersRequirement",
601 llvm::MDNode::get(VMContext, Ops));
602 }
603 if (getModuleDebugInfo())
604 // We support a single version in the linked module. The LLVM
605 // parser will drop debug info with a different version number
606 // (and warn about it, too).
607 getModule().addModuleFlag(llvm::Module::Warning, "Debug Info Version",
608 llvm::DEBUG_METADATA_VERSION);
609
610 // We need to record the widths of enums and wchar_t, so that we can generate
611 // the correct build attributes in the ARM backend. wchar_size is also used by
612 // TargetLibraryInfo.
613 uint64_t WCharWidth =
614 Context.getTypeSizeInChars(Context.getWideCharType()).getQuantity();
615 getModule().addModuleFlag(llvm::Module::Error, "wchar_size", WCharWidth);
616
617 llvm::Triple::ArchType Arch = Context.getTargetInfo().getTriple().getArch();
618 if ( Arch == llvm::Triple::arm
619 || Arch == llvm::Triple::armeb
620 || Arch == llvm::Triple::thumb
621 || Arch == llvm::Triple::thumbeb) {
622 // The minimum width of an enum in bytes
623 uint64_t EnumWidth = Context.getLangOpts().ShortEnums ? 1 : 4;
624 getModule().addModuleFlag(llvm::Module::Error, "min_enum_size", EnumWidth);
625 }
626
627 if (Arch == llvm::Triple::riscv32 || Arch == llvm::Triple::riscv64) {
628 StringRef ABIStr = Target.getABI();
629 llvm::LLVMContext &Ctx = TheModule.getContext();
630 getModule().addModuleFlag(llvm::Module::Error, "target-abi",
631 llvm::MDString::get(Ctx, ABIStr));
632 }
633
634 if (CodeGenOpts.SanitizeCfiCrossDso) {
635 // Indicate that we want cross-DSO control flow integrity checks.
636 getModule().addModuleFlag(llvm::Module::Override, "Cross-DSO CFI", 1);
637 }
638
639 if (CodeGenOpts.WholeProgramVTables) {
640 // Indicate whether VFE was enabled for this module, so that the
641 // vcall_visibility metadata added under whole program vtables is handled
642 // appropriately in the optimizer.
643 getModule().addModuleFlag(llvm::Module::Error, "Virtual Function Elim",
644 CodeGenOpts.VirtualFunctionElimination);
645 }
646
647 if (LangOpts.Sanitize.has(SanitizerKind::CFIICall)) {
648 getModule().addModuleFlag(llvm::Module::Override,
649 "CFI Canonical Jump Tables",
650 CodeGenOpts.SanitizeCfiCanonicalJumpTables);
651 }
652
653 if (CodeGenOpts.CFProtectionReturn &&
654 Target.checkCFProtectionReturnSupported(getDiags())) {
655 // Indicate that we want to instrument return control flow protection.
656 getModule().addModuleFlag(llvm::Module::Override, "cf-protection-return",
657 1);
658 }
659
660 if (CodeGenOpts.CFProtectionBranch &&
661 Target.checkCFProtectionBranchSupported(getDiags())) {
662 // Indicate that we want to instrument branch control flow protection.
663 getModule().addModuleFlag(llvm::Module::Override, "cf-protection-branch",
664 1);
665 }
666
667 if (Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_32 ||
668 Arch == llvm::Triple::aarch64_be) {
669 getModule().addModuleFlag(llvm::Module::Error,
670 "branch-target-enforcement",
671 LangOpts.BranchTargetEnforcement);
672
673 getModule().addModuleFlag(llvm::Module::Error, "sign-return-address",
674 LangOpts.hasSignReturnAddress());
675
676 getModule().addModuleFlag(llvm::Module::Error, "sign-return-address-all",
677 LangOpts.isSignReturnAddressScopeAll());
678
679 getModule().addModuleFlag(llvm::Module::Error,
680 "sign-return-address-with-bkey",
681 !LangOpts.isSignReturnAddressWithAKey());
682 }
683
684 if (!CodeGenOpts.MemoryProfileOutput.empty()) {
685 llvm::LLVMContext &Ctx = TheModule.getContext();
686 getModule().addModuleFlag(
687 llvm::Module::Error, "MemProfProfileFilename",
688 llvm::MDString::get(Ctx, CodeGenOpts.MemoryProfileOutput));
689 }
690
691 if (LangOpts.CUDAIsDevice && getTriple().isNVPTX()) {
692 // Indicate whether __nvvm_reflect should be configured to flush denormal
693 // floating point values to 0. (This corresponds to its "__CUDA_FTZ"
694 // property.)
695 getModule().addModuleFlag(llvm::Module::Override, "nvvm-reflect-ftz",
696 CodeGenOpts.FP32DenormalMode.Output !=
697 llvm::DenormalMode::IEEE);
698 }
699
700 // Emit OpenCL specific module metadata: OpenCL/SPIR version.
701 if (LangOpts.OpenCL) {
702 EmitOpenCLMetadata();
703 // Emit SPIR version.
704 if (getTriple().isSPIR()) {
705 // SPIR v2.0 s2.12 - The SPIR version used by the module is stored in the
706 // opencl.spir.version named metadata.
707 // C++ is backwards compatible with OpenCL v2.0.
708 auto Version = LangOpts.OpenCLCPlusPlus ? 200 : LangOpts.OpenCLVersion;
709 llvm::Metadata *SPIRVerElts[] = {
710 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
711 Int32Ty, Version / 100)),
712 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
713 Int32Ty, (Version / 100 > 1) ? 0 : 2))};
714 llvm::NamedMDNode *SPIRVerMD =
715 TheModule.getOrInsertNamedMetadata("opencl.spir.version");
716 llvm::LLVMContext &Ctx = TheModule.getContext();
717 SPIRVerMD->addOperand(llvm::MDNode::get(Ctx, SPIRVerElts));
718 }
719 }
720
721 if (uint32_t PLevel = Context.getLangOpts().PICLevel) {
722 assert(PLevel < 3 && "Invalid PIC Level");
723 getModule().setPICLevel(static_cast<llvm::PICLevel::Level>(PLevel));
724 if (Context.getLangOpts().PIE)
725 getModule().setPIELevel(static_cast<llvm::PIELevel::Level>(PLevel));
726 }
727
728 if (getCodeGenOpts().CodeModel.size() > 0) {
729 unsigned CM = llvm::StringSwitch<unsigned>(getCodeGenOpts().CodeModel)
730 .Case("tiny", llvm::CodeModel::Tiny)
731 .Case("small", llvm::CodeModel::Small)
732 .Case("kernel", llvm::CodeModel::Kernel)
733 .Case("medium", llvm::CodeModel::Medium)
734 .Case("large", llvm::CodeModel::Large)
735 .Default(~0u);
736 if (CM != ~0u) {
737 llvm::CodeModel::Model codeModel = static_cast<llvm::CodeModel::Model>(CM);
738 getModule().setCodeModel(codeModel);
739 }
740 }
741
742 if (CodeGenOpts.NoPLT)
743 getModule().setRtLibUseGOT();
744 if (CodeGenOpts.UnwindTables)
745 getModule().setUwtable();
746
747 switch (CodeGenOpts.getFramePointer()) {
748 case CodeGenOptions::FramePointerKind::None:
749 // 0 ("none") is the default.
750 break;
751 case CodeGenOptions::FramePointerKind::NonLeaf:
752 getModule().setFramePointer(llvm::FramePointerKind::NonLeaf);
753 break;
754 case CodeGenOptions::FramePointerKind::All:
755 getModule().setFramePointer(llvm::FramePointerKind::All);
756 break;
757 }
758
759 SimplifyPersonality();
760
761 if (getCodeGenOpts().EmitDeclMetadata)
762 EmitDeclMetadata();
763
764 if (getCodeGenOpts().EmitGcovArcs || getCodeGenOpts().EmitGcovNotes)
765 EmitCoverageFile();
766
767 if (CGDebugInfo *DI = getModuleDebugInfo())
768 DI->finalize();
769
770 if (getCodeGenOpts().EmitVersionIdentMetadata)
771 EmitVersionIdentMetadata();
772
773 if (!getCodeGenOpts().RecordCommandLine.empty())
774 EmitCommandLineMetadata();
775
776 getTargetCodeGenInfo().emitTargetMetadata(*this, MangledDeclNames);
777
778 EmitBackendOptionsMetadata(getCodeGenOpts());
779
780 // Set visibility from DLL storage class
781 // We do this at the end of LLVM IR generation; after any operation
782 // that might affect the DLL storage class or the visibility, and
783 // before anything that might act on these.
784 setVisibilityFromDLLStorageClass(LangOpts, getModule());
785}
786
787void CodeGenModule::EmitOpenCLMetadata() {
788 // SPIR v2.0 s2.13 - The OpenCL version used by the module is stored in the
789 // opencl.ocl.version named metadata node.
790 // C++ is backwards compatible with OpenCL v2.0.
791 // FIXME: We might need to add CXX version at some point too?
792 auto Version = LangOpts.OpenCLCPlusPlus ? 200 : LangOpts.OpenCLVersion;
793 llvm::Metadata *OCLVerElts[] = {
794 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
795 Int32Ty, Version / 100)),
796 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
797 Int32Ty, (Version % 100) / 10))};
798 llvm::NamedMDNode *OCLVerMD =
799 TheModule.getOrInsertNamedMetadata("opencl.ocl.version");
800 llvm::LLVMContext &Ctx = TheModule.getContext();
801 OCLVerMD->addOperand(llvm::MDNode::get(Ctx, OCLVerElts));
802}
803
804void CodeGenModule::EmitBackendOptionsMetadata(
805 const CodeGenOptions CodeGenOpts) {
806 switch (getTriple().getArch()) {
807 default:
808 break;
809 case llvm::Triple::riscv32:
810 case llvm::Triple::riscv64:
811 getModule().addModuleFlag(llvm::Module::Error, "SmallDataLimit",
812 CodeGenOpts.SmallDataLimit);
813 break;
814 }
815}
816
817void CodeGenModule::UpdateCompletedType(const TagDecl *TD) {
818 // Make sure that this type is translated.
819 Types.UpdateCompletedType(TD);
820}
821
822void CodeGenModule::RefreshTypeCacheForClass(const CXXRecordDecl *RD) {
823 // Make sure that this type is translated.
824 Types.RefreshTypeCacheForClass(RD);
825}
826
827llvm::MDNode *CodeGenModule::getTBAATypeInfo(QualType QTy) {
828 if (!TBAA)
829 return nullptr;
830 return TBAA->getTypeInfo(QTy);
831}
832
833TBAAAccessInfo CodeGenModule::getTBAAAccessInfo(QualType AccessType) {
834 if (!TBAA)
835 return TBAAAccessInfo();
836 if (getLangOpts().CUDAIsDevice) {
837 // As CUDA builtin surface/texture types are replaced, skip generating TBAA
838 // access info.
839 if (AccessType->isCUDADeviceBuiltinSurfaceType()) {
840 if (getTargetCodeGenInfo().getCUDADeviceBuiltinSurfaceDeviceType() !=
841 nullptr)
842 return TBAAAccessInfo();
843 } else if (AccessType->isCUDADeviceBuiltinTextureType()) {
844 if (getTargetCodeGenInfo().getCUDADeviceBuiltinTextureDeviceType() !=
845 nullptr)
846 return TBAAAccessInfo();
847 }
848 }
849 return TBAA->getAccessInfo(AccessType);
850}
851
852TBAAAccessInfo
853CodeGenModule::getTBAAVTablePtrAccessInfo(llvm::Type *VTablePtrType) {
854 if (!TBAA)
855 return TBAAAccessInfo();
856 return TBAA->getVTablePtrAccessInfo(VTablePtrType);
857}
858
859llvm::MDNode *CodeGenModule::getTBAAStructInfo(QualType QTy) {
860 if (!TBAA)
861 return nullptr;
862 return TBAA->getTBAAStructInfo(QTy);
863}
864
865llvm::MDNode *CodeGenModule::getTBAABaseTypeInfo(QualType QTy) {
866 if (!TBAA)
867 return nullptr;
868 return TBAA->getBaseTypeInfo(QTy);
869}
870
871llvm::MDNode *CodeGenModule::getTBAAAccessTagInfo(TBAAAccessInfo Info) {
872 if (!TBAA)
873 return nullptr;
874 return TBAA->getAccessTagInfo(Info);
875}
876
877TBAAAccessInfo CodeGenModule::mergeTBAAInfoForCast(TBAAAccessInfo SourceInfo,
878 TBAAAccessInfo TargetInfo) {
879 if (!TBAA)
880 return TBAAAccessInfo();
881 return TBAA->mergeTBAAInfoForCast(SourceInfo, TargetInfo);
882}
883
884TBAAAccessInfo
885CodeGenModule::mergeTBAAInfoForConditionalOperator(TBAAAccessInfo InfoA,
886 TBAAAccessInfo InfoB) {
887 if (!TBAA)
888 return TBAAAccessInfo();
889 return TBAA->mergeTBAAInfoForConditionalOperator(InfoA, InfoB);
890}
891
892TBAAAccessInfo
893CodeGenModule::mergeTBAAInfoForMemoryTransfer(TBAAAccessInfo DestInfo,
894 TBAAAccessInfo SrcInfo) {
895 if (!TBAA)
896 return TBAAAccessInfo();
897 return TBAA->mergeTBAAInfoForConditionalOperator(DestInfo, SrcInfo);
898}
899
900void CodeGenModule::DecorateInstructionWithTBAA(llvm::Instruction *Inst,
901 TBAAAccessInfo TBAAInfo) {
902 if (llvm::MDNode *Tag = getTBAAAccessTagInfo(TBAAInfo))
903 Inst->setMetadata(llvm::LLVMContext::MD_tbaa, Tag);
904}
905
906void CodeGenModule::DecorateInstructionWithInvariantGroup(
907 llvm::Instruction *I, const CXXRecordDecl *RD) {
908 I->setMetadata(llvm::LLVMContext::MD_invariant_group,
909 llvm::MDNode::get(getLLVMContext(), {}));
910}
911
912void CodeGenModule::Error(SourceLocation loc, StringRef message) {
913 unsigned diagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error, "%0");
914 getDiags().Report(Context.getFullLoc(loc), diagID) << message;
915}
916
917/// ErrorUnsupported - Print out an error that codegen doesn't support the
918/// specified stmt yet.
919void CodeGenModule::ErrorUnsupported(const Stmt *S, const char *Type) {
920 unsigned DiagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error,
921 "cannot compile this %0 yet");
922 std::string Msg = Type;
923 getDiags().Report(Context.getFullLoc(S->getBeginLoc()), DiagID)
924 << Msg << S->getSourceRange();
925}
926
927/// ErrorUnsupported - Print out an error that codegen doesn't support the
928/// specified decl yet.
929void CodeGenModule::ErrorUnsupported(const Decl *D, const char *Type) {
930 unsigned DiagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error,
931 "cannot compile this %0 yet");
932 std::string Msg = Type;
933 getDiags().Report(Context.getFullLoc(D->getLocation()), DiagID) << Msg;
934}
935
936llvm::ConstantInt *CodeGenModule::getSize(CharUnits size) {
937 return llvm::ConstantInt::get(SizeTy, size.getQuantity());
938}
939
940void CodeGenModule::setGlobalVisibility(llvm::GlobalValue *GV,
941 const NamedDecl *D) const {
942 if (GV->hasDLLImportStorageClass())
943 return;
944 // Internal definitions always have default visibility.
945 if (GV->hasLocalLinkage()) {
946 GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
947 return;
948 }
949 if (!D)
950 return;
951 // Set visibility for definitions, and for declarations if requested globally
952 // or set explicitly.
953 LinkageInfo LV = D->getLinkageAndVisibility();
954 if (LV.isVisibilityExplicit() || getLangOpts().SetVisibilityForExternDecls ||
955 !GV->isDeclarationForLinker())
956 GV->setVisibility(GetLLVMVisibility(LV.getVisibility()));
957}
958
959static bool shouldAssumeDSOLocal(const CodeGenModule &CGM,
960 llvm::GlobalValue *GV) {
961 if (GV->hasLocalLinkage())
962 return true;
963
964 if (!GV->hasDefaultVisibility() && !GV->hasExternalWeakLinkage())
965 return true;
966
967 // DLLImport explicitly marks the GV as external.
968 if (GV->hasDLLImportStorageClass())
969 return false;
970
971 const llvm::Triple &TT = CGM.getTriple();
972 if (TT.isWindowsGNUEnvironment()) {
973 // In MinGW, variables without DLLImport can still be automatically
974 // imported from a DLL by the linker; don't mark variables that
975 // potentially could come from another DLL as DSO local.
976 if (GV->isDeclarationForLinker() && isa<llvm::GlobalVariable>(GV) &&
977 !GV->isThreadLocal())
978 return false;
979 }
980
981 // On COFF, don't mark 'extern_weak' symbols as DSO local. If these symbols
982 // remain unresolved in the link, they can be resolved to zero, which is
983 // outside the current DSO.
984 if (TT.isOSBinFormatCOFF() && GV->hasExternalWeakLinkage())
985 return false;
986
987 // Every other GV is local on COFF.
988 // Make an exception for windows OS in the triple: Some firmware builds use
989 // *-win32-macho triples. This (accidentally?) produced windows relocations
990 // without GOT tables in older clang versions; Keep this behaviour.
991 // FIXME: even thread local variables?
992 if (TT.isOSBinFormatCOFF() || (TT.isOSWindows() && TT.isOSBinFormatMachO()))
993 return true;
994
995 // Only handle COFF and ELF for now.
996 if (!TT.isOSBinFormatELF())
997 return false;
998
999 // If this is not an executable, don't assume anything is local.
1000 const auto &CGOpts = CGM.getCodeGenOpts();
1001 llvm::Reloc::Model RM = CGOpts.RelocationModel;
1002 const auto &LOpts = CGM.getLangOpts();
1003 if (RM != llvm::Reloc::Static && !LOpts.PIE) {
1004 // On ELF, if -fno-semantic-interposition is specified and the target
1005 // supports local aliases, there will be neither CC1
1006 // -fsemantic-interposition nor -fhalf-no-semantic-interposition. Set
1007 // dso_local if using a local alias is preferable (can avoid GOT
1008 // indirection).
1009 if (!GV->canBenefitFromLocalAlias())
1010 return false;
1011 return !(CGM.getLangOpts().SemanticInterposition ||
1012 CGM.getLangOpts().HalfNoSemanticInterposition);
1013 }
1014
1015 // A definition cannot be preempted from an executable.
1016 if (!GV->isDeclarationForLinker())
1017 return true;
1018
1019 // Most PIC code sequences that assume that a symbol is local cannot produce a
1020 // 0 if it turns out the symbol is undefined. While this is ABI and relocation
1021 // depended, it seems worth it to handle it here.
1022 if (RM == llvm::Reloc::PIC_ && GV->hasExternalWeakLinkage())
1023 return false;
1024
1025 // PowerPC64 prefers TOC indirection to avoid copy relocations.
1026 if (TT.isPPC64())
1027 return false;
1028
1029 if (CGOpts.DirectAccessExternalData) {
1030 // If -fdirect-access-external-data (default for -fno-pic), set dso_local
1031 // for non-thread-local variables. If the symbol is not defined in the
1032 // executable, a copy relocation will be needed at link time. dso_local is
1033 // excluded for thread-local variables because they generally don't support
1034 // copy relocations.
1035 if (auto *Var = dyn_cast<llvm::GlobalVariable>(GV))
1036 if (!Var->isThreadLocal())
1037 return true;
1038
1039 // -fno-pic sets dso_local on a function declaration to allow direct
1040 // accesses when taking its address (similar to a data symbol). If the
1041 // function is not defined in the executable, a canonical PLT entry will be
1042 // needed at link time. -fno-direct-access-external-data can avoid the
1043 // canonical PLT entry. We don't generalize this condition to -fpie/-fpic as
1044 // it could just cause trouble without providing perceptible benefits.
1045 if (isa<llvm::Function>(GV) && !CGOpts.NoPLT && RM == llvm::Reloc::Static)
1046 return true;
1047 }
1048
1049 // If we can use copy relocations we can assume it is local.
1050
1051 // Otherwise don't assume it is local.
1052 return false;
1053}
1054
1055void CodeGenModule::setDSOLocal(llvm::GlobalValue *GV) const {
1056 GV->setDSOLocal(shouldAssumeDSOLocal(*this, GV));
1057}
1058
1059void CodeGenModule::setDLLImportDLLExport(llvm::GlobalValue *GV,
1060 GlobalDecl GD) const {
1061 const auto *D = dyn_cast<NamedDecl>(GD.getDecl());
1062 // C++ destructors have a few C++ ABI specific special cases.
1063 if (const auto *Dtor = dyn_cast_or_null<CXXDestructorDecl>(D)) {
1064 getCXXABI().setCXXDestructorDLLStorage(GV, Dtor, GD.getDtorType());
1065 return;
1066 }
1067 setDLLImportDLLExport(GV, D);
1068}
1069
1070void CodeGenModule::setDLLImportDLLExport(llvm::GlobalValue *GV,
1071 const NamedDecl *D) const {
1072 if (D && D->isExternallyVisible()) {
1073 if (D->hasAttr<DLLImportAttr>())
1074 GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
1075 else if (D->hasAttr<DLLExportAttr>() && !GV->isDeclarationForLinker())
1076 GV->setDLLStorageClass(llvm::GlobalVariable::DLLExportStorageClass);
1077 }
1078}
1079
1080void CodeGenModule::setGVProperties(llvm::GlobalValue *GV,
1081 GlobalDecl GD) const {
1082 setDLLImportDLLExport(GV, GD);
1083 setGVPropertiesAux(GV, dyn_cast<NamedDecl>(GD.getDecl()));
1084}
1085
1086void CodeGenModule::setGVProperties(llvm::GlobalValue *GV,
1087 const NamedDecl *D) const {
1088 setDLLImportDLLExport(GV, D);
1089 setGVPropertiesAux(GV, D);
1090}
1091
1092void CodeGenModule::setGVPropertiesAux(llvm::GlobalValue *GV,
1093 const NamedDecl *D) const {
1094 setGlobalVisibility(GV, D);
1095 setDSOLocal(GV);
1096 GV->setPartition(CodeGenOpts.SymbolPartition);
1097}
1098
1099static llvm::GlobalVariable::ThreadLocalMode GetLLVMTLSModel(StringRef S) {
1100 return llvm::StringSwitch<llvm::GlobalVariable::ThreadLocalMode>(S)
1101 .Case("global-dynamic", llvm::GlobalVariable::GeneralDynamicTLSModel)
1102 .Case("local-dynamic", llvm::GlobalVariable::LocalDynamicTLSModel)
1103 .Case("initial-exec", llvm::GlobalVariable::InitialExecTLSModel)
1104 .Case("local-exec", llvm::GlobalVariable::LocalExecTLSModel);
1105}
1106
1107llvm::GlobalVariable::ThreadLocalMode
1108CodeGenModule::GetDefaultLLVMTLSModel() const {
1109 switch (CodeGenOpts.getDefaultTLSModel()) {
1110 case CodeGenOptions::GeneralDynamicTLSModel:
1111 return llvm::GlobalVariable::GeneralDynamicTLSModel;
1112 case CodeGenOptions::LocalDynamicTLSModel:
1113 return llvm::GlobalVariable::LocalDynamicTLSModel;
1114 case CodeGenOptions::InitialExecTLSModel:
1115 return llvm::GlobalVariable::InitialExecTLSModel;
1116 case CodeGenOptions::LocalExecTLSModel:
1117 return llvm::GlobalVariable::LocalExecTLSModel;
1118 }
1119 llvm_unreachable("Invalid TLS model!");
1120}
1121
1122void CodeGenModule::setTLSMode(llvm::GlobalValue *GV, const VarDecl &D) const {
1123 assert(D.getTLSKind() && "setting TLS mode on non-TLS var!");
1124
1125 llvm::GlobalValue::ThreadLocalMode TLM;
1126 TLM = GetDefaultLLVMTLSModel();
1127
1128 // Override the TLS model if it is explicitly specified.
1129 if (const TLSModelAttr *Attr = D.getAttr<TLSModelAttr>()) {
1130 TLM = GetLLVMTLSModel(Attr->getModel());
1131 }
1132
1133 GV->setThreadLocalMode(TLM);
1134}
1135
1136static std::string getCPUSpecificMangling(const CodeGenModule &CGM,
1137 StringRef Name) {
1138 const TargetInfo &Target = CGM.getTarget();
1139 return (Twine('.') + Twine(Target.CPUSpecificManglingCharacter(Name))).str();
1140}
1141
1142static void AppendCPUSpecificCPUDispatchMangling(const CodeGenModule &CGM,
1143 const CPUSpecificAttr *Attr,
1144 unsigned CPUIndex,
1145 raw_ostream &Out) {
1146 // cpu_specific gets the current name, dispatch gets the resolver if IFunc is
1147 // supported.
1148 if (Attr)
1149 Out << getCPUSpecificMangling(CGM, Attr->getCPUName(CPUIndex)->getName());
1150 else if (CGM.getTarget().supportsIFunc())
1151 Out << ".resolver";
1152}
1153
1154static void AppendTargetMangling(const CodeGenModule &CGM,
1155 const TargetAttr *Attr, raw_ostream &Out) {
1156 if (Attr->isDefaultVersion())
1157 return;
1158
1159 Out << '.';
1160 const TargetInfo &Target = CGM.getTarget();
1161 ParsedTargetAttr Info =
1162 Attr->parse([&Target](StringRef LHS, StringRef RHS) {
1163 // Multiversioning doesn't allow "no-${feature}", so we can
1164 // only have "+" prefixes here.
1165 assert(LHS.startswith("+") && RHS.startswith("+") &&
1166 "Features should always have a prefix.");
1167 return Target.multiVersionSortPriority(LHS.substr(1)) >
1168 Target.multiVersionSortPriority(RHS.substr(1));
1169 });
1170
1171 bool IsFirst = true;
1172
1173 if (!Info.Architecture.empty()) {
1174 IsFirst = false;
1175 Out << "arch_" << Info.Architecture;
1176 }
1177
1178 for (StringRef Feat : Info.Features) {
1179 if (!IsFirst)
1180 Out << '_';
1181 IsFirst = false;
1182 Out << Feat.substr(1);
1183 }
1184}
1185
1186// Returns true if GD is a function decl with internal linkage and
1187// needs a unique suffix after the mangled name.
1188static bool isUniqueInternalLinkageDecl(GlobalDecl GD,
1189 CodeGenModule &CGM) {
1190 const Decl *D = GD.getDecl();
1191 return !CGM.getModuleNameHash().empty() && isa<FunctionDecl>(D) &&
1192 (CGM.getFunctionLinkage(GD) == llvm::GlobalValue::InternalLinkage);
1193}
1194
1195static std::string getMangledNameImpl(CodeGenModule &CGM, GlobalDecl GD,
1196 const NamedDecl *ND,
1197 bool OmitMultiVersionMangling = false) {
1198 SmallString<256> Buffer;
1199 llvm::raw_svector_ostream Out(Buffer);
1200 MangleContext &MC = CGM.getCXXABI().getMangleContext();
1201 if (!CGM.getModuleNameHash().empty())
1202 MC.needsUniqueInternalLinkageNames();
1203 bool ShouldMangle = MC.shouldMangleDeclName(ND);
1204 if (ShouldMangle)
1205 MC.mangleName(GD.getWithDecl(ND), Out);
1206 else {
1207 IdentifierInfo *II = ND->getIdentifier();
1208 assert(II && "Attempt to mangle unnamed decl.");
1209 const auto *FD = dyn_cast<FunctionDecl>(ND);
1210
1211 if (FD &&
1212 FD->getType()->castAs<FunctionType>()->getCallConv() == CC_X86RegCall) {
1213 Out << "__regcall3__" << II->getName();
1214 } else if (FD && FD->hasAttr<CUDAGlobalAttr>() &&
1215 GD.getKernelReferenceKind() == KernelReferenceKind::Stub) {
1216 Out << "__device_stub__" << II->getName();
1217 } else {
1218 Out << II->getName();
1219 }
1220 }
1221
1222 // Check if the module name hash should be appended for internal linkage
1223 // symbols. This should come before multi-version target suffixes are
1224 // appended. This is to keep the name and module hash suffix of the
1225 // internal linkage function together. The unique suffix should only be
1226 // added when name mangling is done to make sure that the final name can
1227 // be properly demangled. For example, for C functions without prototypes,
1228 // name mangling is not done and the unique suffix should not be appeneded
1229 // then.
1230 if (ShouldMangle && isUniqueInternalLinkageDecl(GD, CGM)) {
1231 assert(CGM.getCodeGenOpts().UniqueInternalLinkageNames &&
1232 "Hash computed when not explicitly requested");
1233 Out << CGM.getModuleNameHash();
1234 }
1235
1236 if (const auto *FD = dyn_cast<FunctionDecl>(ND))
1237 if (FD->isMultiVersion() && !OmitMultiVersionMangling) {
1238 switch (FD->getMultiVersionKind()) {
1239 case MultiVersionKind::CPUDispatch:
1240 case MultiVersionKind::CPUSpecific:
1241 AppendCPUSpecificCPUDispatchMangling(CGM,
1242 FD->getAttr<CPUSpecificAttr>(),
1243 GD.getMultiVersionIndex(), Out);
1244 break;
1245 case MultiVersionKind::Target:
1246 AppendTargetMangling(CGM, FD->getAttr<TargetAttr>(), Out);
1247 break;
1248 case MultiVersionKind::None:
1249 llvm_unreachable("None multiversion type isn't valid here");
1250 }
1251 }
1252
1253 // Make unique name for device side static file-scope variable for HIP.
1254 if (CGM.getContext().shouldExternalizeStaticVar(ND) &&
1255 CGM.getLangOpts().GPURelocatableDeviceCode &&
1256 CGM.getLangOpts().CUDAIsDevice && !CGM.getLangOpts().CUID.empty())
1257 CGM.printPostfixForExternalizedStaticVar(Out);
1258 return std::string(Out.str());
1259}
1260
1261void CodeGenModule::UpdateMultiVersionNames(GlobalDecl GD,
1262 const FunctionDecl *FD) {
1263 if (!FD->isMultiVersion())
1264 return;
1265
1266 // Get the name of what this would be without the 'target' attribute. This
1267 // allows us to lookup the version that was emitted when this wasn't a
1268 // multiversion function.
1269 std::string NonTargetName =
1270 getMangledNameImpl(*this, GD, FD, /*OmitMultiVersionMangling=*/true);
1271 GlobalDecl OtherGD;
1272 if (lookupRepresentativeDecl(NonTargetName, OtherGD)) {
1273 assert(OtherGD.getCanonicalDecl()
1274 .getDecl()
1275 ->getAsFunction()
1276 ->isMultiVersion() &&
1277 "Other GD should now be a multiversioned function");
1278 // OtherFD is the version of this function that was mangled BEFORE
1279 // becoming a MultiVersion function. It potentially needs to be updated.
1280 const FunctionDecl *OtherFD = OtherGD.getCanonicalDecl()
1281 .getDecl()
1282 ->getAsFunction()
1283 ->getMostRecentDecl();
1284 std::string OtherName = getMangledNameImpl(*this, OtherGD, OtherFD);
1285 // This is so that if the initial version was already the 'default'
1286 // version, we don't try to update it.
1287 if (OtherName != NonTargetName) {
1288 // Remove instead of erase, since others may have stored the StringRef
1289 // to this.
1290 const auto ExistingRecord = Manglings.find(NonTargetName);
1291 if (ExistingRecord != std::end(Manglings))
1292 Manglings.remove(&(*ExistingRecord));
1293 auto Result = Manglings.insert(std::make_pair(OtherName, OtherGD));
1294 MangledDeclNames[OtherGD.getCanonicalDecl()] = Result.first->first();
1295 if (llvm::GlobalValue *Entry = GetGlobalValue(NonTargetName))
1296 Entry->setName(OtherName);
1297 }
1298 }
1299}
1300
1301StringRef CodeGenModule::getMangledName(GlobalDecl GD) {
1302 GlobalDecl CanonicalGD = GD.getCanonicalDecl();
1303
1304 // Some ABIs don't have constructor variants. Make sure that base and
1305 // complete constructors get mangled the same.
1306 if (const auto *CD = dyn_cast<CXXConstructorDecl>(CanonicalGD.getDecl())) {
1307 if (!getTarget().getCXXABI().hasConstructorVariants()) {
1308 CXXCtorType OrigCtorType = GD.getCtorType();
1309 assert(OrigCtorType == Ctor_Base || OrigCtorType == Ctor_Complete);
1310 if (OrigCtorType == Ctor_Base)
1311 CanonicalGD = GlobalDecl(CD, Ctor_Complete);
1312 }
1313 }
1314
1315 // In CUDA/HIP device compilation with -fgpu-rdc, the mangled name of a
1316 // static device variable depends on whether the variable is referenced by
1317 // a host or device host function. Therefore the mangled name cannot be
1318 // cached.
1319 if (!LangOpts.CUDAIsDevice ||
1320 !getContext().mayExternalizeStaticVar(GD.getDecl())) {
1321 auto FoundName = MangledDeclNames.find(CanonicalGD);
1322 if (FoundName != MangledDeclNames.end())
1323 return FoundName->second;
1324 }
1325
1326 // Keep the first result in the case of a mangling collision.
1327 const auto *ND = cast<NamedDecl>(GD.getDecl());
1328 std::string MangledName = getMangledNameImpl(*this, GD, ND);
1329
1330 // Ensure either we have different ABIs between host and device compilations,
1331 // says host compilation following MSVC ABI but device compilation follows
1332 // Itanium C++ ABI or, if they follow the same ABI, kernel names after
1333 // mangling should be the same after name stubbing. The later checking is
1334 // very important as the device kernel name being mangled in host-compilation
1335 // is used to resolve the device binaries to be executed. Inconsistent naming
1336 // result in undefined behavior. Even though we cannot check that naming
1337 // directly between host- and device-compilations, the host- and
1338 // device-mangling in host compilation could help catching certain ones.
1339 assert(!isa<FunctionDecl>(ND) || !ND->hasAttr<CUDAGlobalAttr>() ||
1340 getLangOpts().CUDAIsDevice ||
1341 (getContext().getAuxTargetInfo() &&
1342 (getContext().getAuxTargetInfo()->getCXXABI() !=
1343 getContext().getTargetInfo().getCXXABI())) ||
1344 getCUDARuntime().getDeviceSideName(ND) ==
1345 getMangledNameImpl(
1346 *this,
1347 GD.getWithKernelReferenceKind(KernelReferenceKind::Kernel),
1348 ND));
1349
1350 auto Result = Manglings.insert(std::make_pair(MangledName, GD));
1351 return MangledDeclNames[CanonicalGD] = Result.first->first();
1352}
1353
1354StringRef CodeGenModule::getBlockMangledName(GlobalDecl GD,
1355 const BlockDecl *BD) {
1356 MangleContext &MangleCtx = getCXXABI().getMangleContext();
1357 const Decl *D = GD.getDecl();
1358
1359 SmallString<256> Buffer;
1360 llvm::raw_svector_ostream Out(Buffer);
1361 if (!D)
1362 MangleCtx.mangleGlobalBlock(BD,
1363 dyn_cast_or_null<VarDecl>(initializedGlobalDecl.getDecl()), Out);
1364 else if (const auto *CD = dyn_cast<CXXConstructorDecl>(D))
1365 MangleCtx.mangleCtorBlock(CD, GD.getCtorType(), BD, Out);
1366 else if (const auto *DD = dyn_cast<CXXDestructorDecl>(D))
1367 MangleCtx.mangleDtorBlock(DD, GD.getDtorType(), BD, Out);
1368 else
1369 MangleCtx.mangleBlock(cast<DeclContext>(D), BD, Out);
1370
1371 auto Result = Manglings.insert(std::make_pair(Out.str(), BD));
1372 return Result.first->first();
1373}
1374
1375llvm::GlobalValue *CodeGenModule::GetGlobalValue(StringRef Name) {
1376 return getModule().getNamedValue(Name);
1377}
1378
1379/// AddGlobalCtor - Add a function to the list that will be called before
1380/// main() runs.
1381void CodeGenModule::AddGlobalCtor(llvm::Function *Ctor, int Priority,
1382 llvm::Constant *AssociatedData) {
1383 // FIXME: Type coercion of void()* types.
1384 GlobalCtors.push_back(Structor(Priority, Ctor, AssociatedData));
1385}
1386
1387/// AddGlobalDtor - Add a function to the list that will be called
1388/// when the module is unloaded.
1389void CodeGenModule::AddGlobalDtor(llvm::Function *Dtor, int Priority,
1390 bool IsDtorAttrFunc) {
1391 if (CodeGenOpts.RegisterGlobalDtorsWithAtExit &&
1392 (!getContext().getTargetInfo().getTriple().isOSAIX() || IsDtorAttrFunc)) {
1393 DtorsUsingAtExit[Priority].push_back(Dtor);
1394 return;
1395 }
1396
1397 // FIXME: Type coercion of void()* types.
1398 GlobalDtors.push_back(Structor(Priority, Dtor, nullptr));
1399}
1400
1401void CodeGenModule::EmitCtorList(CtorList &Fns, const char *GlobalName) {
1402 if (Fns.empty()) return;
1403
1404 // Ctor function type is void()*.
1405 llvm::FunctionType* CtorFTy = llvm::FunctionType::get(VoidTy, false);
1406 llvm::Type *CtorPFTy = llvm::PointerType::get(CtorFTy,
1407 TheModule.getDataLayout().getProgramAddressSpace());
1408
1409 // Get the type of a ctor entry, { i32, void ()*, i8* }.
1410 llvm::StructType *CtorStructTy = llvm::StructType::get(
1411 Int32Ty, CtorPFTy, VoidPtrTy);
1412
1413 // Construct the constructor and destructor arrays.
1414 ConstantInitBuilder builder(*this);
1415 auto ctors = builder.beginArray(CtorStructTy);
1416 for (const auto &I : Fns) {
1417 auto ctor = ctors.beginStruct(CtorStructTy);
1418 ctor.addInt(Int32Ty, I.Priority);
1419 ctor.add(llvm::ConstantExpr::getBitCast(I.Initializer, CtorPFTy));
1420 if (I.AssociatedData)
1421 ctor.add(llvm::ConstantExpr::getBitCast(I.AssociatedData, VoidPtrTy));
1422 else
1423 ctor.addNullPointer(VoidPtrTy);
1424 ctor.finishAndAddTo(ctors);
1425 }
1426
1427 auto list =
1428 ctors.finishAndCreateGlobal(GlobalName, getPointerAlign(),
1429 /*constant*/ false,
1430 llvm::GlobalValue::AppendingLinkage);
1431
1432 // The LTO linker doesn't seem to like it when we set an alignment
1433 // on appending variables. Take it off as a workaround.
1434 list->setAlignment(llvm::None);
1435
1436 Fns.clear();
1437}
1438
1439llvm::GlobalValue::LinkageTypes
1440CodeGenModule::getFunctionLinkage(GlobalDecl GD) {
1441 const auto *D = cast<FunctionDecl>(GD.getDecl());
1442
1443 GVALinkage Linkage = getContext().GetGVALinkageForFunction(D);
1444
1445 if (const auto *Dtor = dyn_cast<CXXDestructorDecl>(D))
1446 return getCXXABI().getCXXDestructorLinkage(Linkage, Dtor, GD.getDtorType());
1447
1448 if (isa<CXXConstructorDecl>(D) &&
1449 cast<CXXConstructorDecl>(D)->isInheritingConstructor() &&
1450 Context.getTargetInfo().getCXXABI().isMicrosoft()) {
1451 // Our approach to inheriting constructors is fundamentally different from
1452 // that used by the MS ABI, so keep our inheriting constructor thunks
1453 // internal rather than trying to pick an unambiguous mangling for them.
1454 return llvm::GlobalValue::InternalLinkage;
1455 }
1456
1457 return getLLVMLinkageForDeclarator(D, Linkage, /*IsConstantVariable=*/false);
1458}
1459
1460llvm::ConstantInt *CodeGenModule::CreateCrossDsoCfiTypeId(llvm::Metadata *MD) {
1461 llvm::MDString *MDS = dyn_cast<llvm::MDString>(MD);
1462 if (!MDS) return nullptr;
1463
1464 return llvm::ConstantInt::get(Int64Ty, llvm::MD5Hash(MDS->getString()));
1465}
1466
1467void CodeGenModule::SetLLVMFunctionAttributes(GlobalDecl GD,
1468 const CGFunctionInfo &Info,
1469 llvm::Function *F) {
1470 unsigned CallingConv;
1471 llvm::AttributeList PAL;
1472 ConstructAttributeList(F->getName(), Info, GD, PAL, CallingConv, false);
1473 F->setAttributes(PAL);
1474 F->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
1475}
1476
1477static void removeImageAccessQualifier(std::string& TyName) {
1478 std::string ReadOnlyQual("__read_only");
1479 std::string::size_type ReadOnlyPos = TyName.find(ReadOnlyQual);
1480 if (ReadOnlyPos != std::string::npos)
1481 // "+ 1" for the space after access qualifier.
1482 TyName.erase(ReadOnlyPos, ReadOnlyQual.size() + 1);
1483 else {
1484 std::string WriteOnlyQual("__write_only");
1485 std::string::size_type WriteOnlyPos = TyName.find(WriteOnlyQual);
1486 if (WriteOnlyPos != std::string::npos)
1487 TyName.erase(WriteOnlyPos, WriteOnlyQual.size() + 1);
1488 else {
1489 std::string ReadWriteQual("__read_write");
1490 std::string::size_type ReadWritePos = TyName.find(ReadWriteQual);
1491 if (ReadWritePos != std::string::npos)
1492 TyName.erase(ReadWritePos, ReadWriteQual.size() + 1);
1493 }
1494 }
1495}
1496
1497// Returns the address space id that should be produced to the
1498// kernel_arg_addr_space metadata. This is always fixed to the ids
1499// as specified in the SPIR 2.0 specification in order to differentiate
1500// for example in clGetKernelArgInfo() implementation between the address
1501// spaces with targets without unique mapping to the OpenCL address spaces
1502// (basically all single AS CPUs).
1503static unsigned ArgInfoAddressSpace(LangAS AS) {
1504 switch (AS) {
1505 case LangAS::opencl_global:
1506 return 1;
1507 case LangAS::opencl_constant:
1508 return 2;
1509 case LangAS::opencl_local:
1510 return 3;
1511 case LangAS::opencl_generic:
1512 return 4; // Not in SPIR 2.0 specs.
1513 case LangAS::opencl_global_device:
1514 return 5;
1515 case LangAS::opencl_global_host:
1516 return 6;
1517 default:
1518 return 0; // Assume private.
1519 }
1520}
1521
1522void CodeGenModule::GenOpenCLArgMetadata(llvm::Function *Fn,
1523 const FunctionDecl *FD,
1524 CodeGenFunction *CGF) {
1525 assert(((FD && CGF) || (!FD && !CGF)) &&
1526 "Incorrect use - FD and CGF should either be both null or not!");
1527 // Create MDNodes that represent the kernel arg metadata.
1528 // Each MDNode is a list in the form of "key", N number of values which is
1529 // the same number of values as their are kernel arguments.
1530
1531 const PrintingPolicy &Policy = Context.getPrintingPolicy();
1532
1533 // MDNode for the kernel argument address space qualifiers.
1534 SmallVector<llvm::Metadata *, 8> addressQuals;
1535
1536 // MDNode for the kernel argument access qualifiers (images only).
1537 SmallVector<llvm::Metadata *, 8> accessQuals;
1538
1539 // MDNode for the kernel argument type names.
1540 SmallVector<llvm::Metadata *, 8> argTypeNames;
1541
1542 // MDNode for the kernel argument base type names.
1543 SmallVector<llvm::Metadata *, 8> argBaseTypeNames;
1544
1545 // MDNode for the kernel argument type qualifiers.
1546 SmallVector<llvm::Metadata *, 8> argTypeQuals;
1547
1548 // MDNode for the kernel argument names.
1549 SmallVector<llvm::Metadata *, 8> argNames;
1550
1551 if (FD && CGF)
1552 for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) {
1553 const ParmVarDecl *parm = FD->getParamDecl(i);
1554 QualType ty = parm->getType();
1555 std::string typeQuals;
1556
1557 // Get image and pipe access qualifier:
1558 if (ty->isImageType() || ty->isPipeType()) {
1559 const Decl *PDecl = parm;
1560 if (auto *TD = dyn_cast<TypedefType>(ty))
1561 PDecl = TD->getDecl();
1562 const OpenCLAccessAttr *A = PDecl->getAttr<OpenCLAccessAttr>();
1563 if (A && A->isWriteOnly())
1564 accessQuals.push_back(llvm::MDString::get(VMContext, "write_only"));
1565 else if (A && A->isReadWrite())
1566 accessQuals.push_back(llvm::MDString::get(VMContext, "read_write"));
1567 else
1568 accessQuals.push_back(llvm::MDString::get(VMContext, "read_only"));
1569 } else
1570 accessQuals.push_back(llvm::MDString::get(VMContext, "none"));
1571
1572 // Get argument name.
1573 argNames.push_back(llvm::MDString::get(VMContext, parm->getName()));
1574
1575 auto getTypeSpelling = [&](QualType Ty) {
1576 auto typeName = Ty.getUnqualifiedType().getAsString(Policy);
1577
1578 if (Ty.isCanonical()) {
1579 StringRef typeNameRef = typeName;
1580 // Turn "unsigned type" to "utype"
1581 if (typeNameRef.consume_front("unsigned "))
1582 return std::string("u") + typeNameRef.str();
1583 if (typeNameRef.consume_front("signed "))
1584 return typeNameRef.str();
1585 }
1586
1587 return typeName;
1588 };
1589
1590 if (ty->isPointerType()) {
1591 QualType pointeeTy = ty->getPointeeType();
1592
1593 // Get address qualifier.
1594 addressQuals.push_back(
1595 llvm::ConstantAsMetadata::get(CGF->Builder.getInt32(
1596 ArgInfoAddressSpace(pointeeTy.getAddressSpace()))));
1597
1598 // Get argument type name.
1599 std::string typeName = getTypeSpelling(pointeeTy) + "*";
1600 std::string baseTypeName =
1601 getTypeSpelling(pointeeTy.getCanonicalType()) + "*";
1602 argTypeNames.push_back(llvm::MDString::get(VMContext, typeName));
1603 argBaseTypeNames.push_back(
1604 llvm::MDString::get(VMContext, baseTypeName));
1605
1606 // Get argument type qualifiers:
1607 if (ty.isRestrictQualified())
1608 typeQuals = "restrict";
1609 if (pointeeTy.isConstQualified() ||
1610 (pointeeTy.getAddressSpace() == LangAS::opencl_constant))
1611 typeQuals += typeQuals.empty() ? "const" : " const";
1612 if (pointeeTy.isVolatileQualified())
1613 typeQuals += typeQuals.empty() ? "volatile" : " volatile";
1614 } else {
1615 uint32_t AddrSpc = 0;
1616 bool isPipe = ty->isPipeType();
1617 if (ty->isImageType() || isPipe)
1618 AddrSpc = ArgInfoAddressSpace(LangAS::opencl_global);
1619
1620 addressQuals.push_back(
1621 llvm::ConstantAsMetadata::get(CGF->Builder.getInt32(AddrSpc)));
1622
1623 // Get argument type name.
1624 ty = isPipe ? ty->castAs<PipeType>()->getElementType() : ty;
1625 std::string typeName = getTypeSpelling(ty);
1626 std::string baseTypeName = getTypeSpelling(ty.getCanonicalType());
1627
1628 // Remove access qualifiers on images
1629 // (as they are inseparable from type in clang implementation,
1630 // but OpenCL spec provides a special query to get access qualifier
1631 // via clGetKernelArgInfo with CL_KERNEL_ARG_ACCESS_QUALIFIER):
1632 if (ty->isImageType()) {
1633 removeImageAccessQualifier(typeName);
1634 removeImageAccessQualifier(baseTypeName);
1635 }
1636
1637 argTypeNames.push_back(llvm::MDString::get(VMContext, typeName));
1638 argBaseTypeNames.push_back(
1639 llvm::MDString::get(VMContext, baseTypeName));
1640
1641 if (isPipe)
1642 typeQuals = "pipe";
1643 }
1644 argTypeQuals.push_back(llvm::MDString::get(VMContext, typeQuals));
1645 }
1646
1647 Fn->setMetadata("kernel_arg_addr_space",
1648 llvm::MDNode::get(VMContext, addressQuals));
1649 Fn->setMetadata("kernel_arg_access_qual",
1650 llvm::MDNode::get(VMContext, accessQuals));
1651 Fn->setMetadata("kernel_arg_type",
1652 llvm::MDNode::get(VMContext, argTypeNames));
1653 Fn->setMetadata("kernel_arg_base_type",
1654 llvm::MDNode::get(VMContext, argBaseTypeNames));
1655 Fn->setMetadata("kernel_arg_type_qual",
1656 llvm::MDNode::get(VMContext, argTypeQuals));
1657 if (getCodeGenOpts().EmitOpenCLArgMetadata)
1658 Fn->setMetadata("kernel_arg_name",
1659 llvm::MDNode::get(VMContext, argNames));
1660}
1661
1662/// Determines whether the language options require us to model
1663/// unwind exceptions. We treat -fexceptions as mandating this
1664/// except under the fragile ObjC ABI with only ObjC exceptions
1665/// enabled. This means, for example, that C with -fexceptions
1666/// enables this.
1667static bool hasUnwindExceptions(const LangOptions &LangOpts) {
1668 // If exceptions are completely disabled, obviously this is false.
1669 if (!LangOpts.Exceptions) return false;
1670
1671 // If C++ exceptions are enabled, this is true.
1672 if (LangOpts.CXXExceptions) return true;
1673
1674 // If ObjC exceptions are enabled, this depends on the ABI.
1675 if (LangOpts.ObjCExceptions) {
1676 return LangOpts.ObjCRuntime.hasUnwindExceptions();
1677 }
1678
1679 return true;
1680}
1681
1682static bool requiresMemberFunctionPointerTypeMetadata(CodeGenModule &CGM,
1683 const CXXMethodDecl *MD) {
1684 // Check that the type metadata can ever actually be used by a call.
1685 if (!CGM.getCodeGenOpts().LTOUnit ||
1686 !CGM.HasHiddenLTOVisibility(MD->getParent()))
1687 return false;
1688
1689 // Only functions whose address can be taken with a member function pointer
1690 // need this sort of type metadata.
1691 return !MD->isStatic() && !MD->isVirtual() && !isa<CXXConstructorDecl>(MD) &&
1692 !isa<CXXDestructorDecl>(MD);
1693}
1694
1695std::vector<const CXXRecordDecl *>
1696CodeGenModule::getMostBaseClasses(const CXXRecordDecl *RD) {
1697 llvm::SetVector<const CXXRecordDecl *> MostBases;
1698
1699 std::function<void (const CXXRecordDecl *)> CollectMostBases;
1700 CollectMostBases = [&](const CXXRecordDecl *RD) {
1701 if (RD->getNumBases() == 0)
1702 MostBases.insert(RD);
1703 for (const CXXBaseSpecifier &B : RD->bases())
1704 CollectMostBases(B.getType()->getAsCXXRecordDecl());
1705 };
1706 CollectMostBases(RD);
1707 return MostBases.takeVector();
1708}
1709
1710void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
1711 llvm::Function *F) {
1712 llvm::AttrBuilder B;
1713
1714 if (CodeGenOpts.UnwindTables)
1715 B.addAttribute(llvm::Attribute::UWTable);
1716
1717 if (CodeGenOpts.StackClashProtector)
1718 B.addAttribute("probe-stack", "inline-asm");
1719
1720 if (!hasUnwindExceptions(LangOpts))
1721 B.addAttribute(llvm::Attribute::NoUnwind);
1722
1723 if (!D || !D->hasAttr<NoStackProtectorAttr>()) {
1724 if (LangOpts.getStackProtector() == LangOptions::SSPOn)
1725 B.addAttribute(llvm::Attribute::StackProtect);
1726 else if (LangOpts.getStackProtector() == LangOptions::SSPStrong)
1727 B.addAttribute(llvm::Attribute::StackProtectStrong);
1728 else if (LangOpts.getStackProtector() == LangOptions::SSPReq)
1729 B.addAttribute(llvm::Attribute::StackProtectReq);
1730 }
1731
1732 if (!D) {
1733 // If we don't have a declaration to control inlining, the function isn't
1734 // explicitly marked as alwaysinline for semantic reasons, and inlining is
1735 // disabled, mark the function as noinline.
1736 if (!F->hasFnAttribute(llvm::Attribute::AlwaysInline) &&
1737 CodeGenOpts.getInlining() == CodeGenOptions::OnlyAlwaysInlining)
1738 B.addAttribute(llvm::Attribute::NoInline);
1739
1740 F->addAttributes(llvm::AttributeList::FunctionIndex, B);
1741 return;
1742 }
1743
1744 // Track whether we need to add the optnone LLVM attribute,
1745 // starting with the default for this optimization level.
1746 bool ShouldAddOptNone =
1747 !CodeGenOpts.DisableO0ImplyOptNone && CodeGenOpts.OptimizationLevel == 0;
1748 // We can't add optnone in the following cases, it won't pass the verifier.
1749 ShouldAddOptNone &= !D->hasAttr<MinSizeAttr>();
1750 ShouldAddOptNone &= !D->hasAttr<AlwaysInlineAttr>();
1751
1752 // Add optnone, but do so only if the function isn't always_inline.
1753 if ((ShouldAddOptNone || D->hasAttr<OptimizeNoneAttr>()) &&
1754 !F->hasFnAttribute(llvm::Attribute::AlwaysInline)) {
1755 B.addAttribute(llvm::Attribute::OptimizeNone);
1756
1757 // OptimizeNone implies noinline; we should not be inlining such functions.
1758 B.addAttribute(llvm::Attribute::NoInline);
1759
1760 // We still need to handle naked functions even though optnone subsumes
1761 // much of their semantics.
1762 if (D->hasAttr<NakedAttr>())
1763 B.addAttribute(llvm::Attribute::Naked);
1764
1765 // OptimizeNone wins over OptimizeForSize and MinSize.
1766 F->removeFnAttr(llvm::Attribute::OptimizeForSize);
1767 F->removeFnAttr(llvm::Attribute::MinSize);
1768 } else if (D->hasAttr<NakedAttr>()) {
1769 // Naked implies noinline: we should not be inlining such functions.
1770 B.addAttribute(llvm::Attribute::Naked);
1771 B.addAttribute(llvm::Attribute::NoInline);
1772 } else if (D->hasAttr<NoDuplicateAttr>()) {
1773 B.addAttribute(llvm::Attribute::NoDuplicate);
1774 } else if (D->hasAttr<NoInlineAttr>() && !F->hasFnAttribute(llvm::Attribute::AlwaysInline)) {
1775 // Add noinline if the function isn't always_inline.
1776 B.addAttribute(llvm::Attribute::NoInline);
1777 } else if (D->hasAttr<AlwaysInlineAttr>() &&
1778 !F->hasFnAttribute(llvm::Attribute::NoInline)) {
1779 // (noinline wins over always_inline, and we can't specify both in IR)
1780 B.addAttribute(llvm::Attribute::AlwaysInline);
1781 } else if (CodeGenOpts.getInlining() == CodeGenOptions::OnlyAlwaysInlining) {
1782 // If we're not inlining, then force everything that isn't always_inline to
1783 // carry an explicit noinline attribute.
1784 if (!F->hasFnAttribute(llvm::Attribute::AlwaysInline))
1785 B.addAttribute(llvm::Attribute::NoInline);
1786 } else {
1787 // Otherwise, propagate the inline hint attribute and potentially use its
1788 // absence to mark things as noinline.
1789 if (auto *FD = dyn_cast<FunctionDecl>(D)) {
1790 // Search function and template pattern redeclarations for inline.
1791 auto CheckForInline = [](const FunctionDecl *FD) {
1792 auto CheckRedeclForInline = [](const FunctionDecl *Redecl) {
1793 return Redecl->isInlineSpecified();
1794 };
1795 if (any_of(FD->redecls(), CheckRedeclForInline))
1796 return true;
1797 const FunctionDecl *Pattern = FD->getTemplateInstantiationPattern();
1798 if (!Pattern)
1799 return false;
1800 return any_of(Pattern->redecls(), CheckRedeclForInline);
1801 };
1802 if (CheckForInline(FD)) {
1803 B.addAttribute(llvm::Attribute::InlineHint);
1804 } else if (CodeGenOpts.getInlining() ==
1805 CodeGenOptions::OnlyHintInlining &&
1806 !FD->isInlined() &&
1807 !F->hasFnAttribute(llvm::Attribute::AlwaysInline)) {
1808 B.addAttribute(llvm::Attribute::NoInline);
1809 }
1810 }
1811 }
1812
1813 // Add other optimization related attributes if we are optimizing this
1814 // function.
1815 if (!D->hasAttr<OptimizeNoneAttr>()) {
1816 if (D->hasAttr<ColdAttr>()) {
1817 if (!ShouldAddOptNone)
1818 B.addAttribute(llvm::Attribute::OptimizeForSize);
1819 B.addAttribute(llvm::Attribute::Cold);
1820 }
1821 if (D->hasAttr<HotAttr>())
1822 B.addAttribute(llvm::Attribute::Hot);
1823 if (D->hasAttr<MinSizeAttr>())
1824 B.addAttribute(llvm::Attribute::MinSize);
1825 }
1826
1827 F->addAttributes(llvm::AttributeList::FunctionIndex, B);
1828
1829 unsigned alignment = D->getMaxAlignment() / Context.getCharWidth();
1830 if (alignment)
1831 F->setAlignment(llvm::Align(alignment));
1832
1833 if (!D->hasAttr<AlignedAttr>())
1834 if (LangOpts.FunctionAlignment)
1835 F->setAlignment(llvm::Align(1ull << LangOpts.FunctionAlignment));
1836
1837 // Some C++ ABIs require 2-byte alignment for member functions, in order to
1838 // reserve a bit for differentiating between virtual and non-virtual member
1839 // functions. If the current target's C++ ABI requires this and this is a
1840 // member function, set its alignment accordingly.
1841 if (getTarget().getCXXABI().areMemberFunctionsAligned()) {
1842 if (F->getAlignment() < 2 && isa<CXXMethodDecl>(D))
1843 F->setAlignment(llvm::Align(2));
1844 }
1845
1846 // In the cross-dso CFI mode with canonical jump tables, we want !type
1847 // attributes on definitions only.
1848 if (CodeGenOpts.SanitizeCfiCrossDso &&
1849 CodeGenOpts.SanitizeCfiCanonicalJumpTables) {
1850 if (auto *FD = dyn_cast<FunctionDecl>(D)) {
1851 // Skip available_externally functions. They won't be codegen'ed in the
1852 // current module anyway.
1853 if (getContext().GetGVALinkageForFunction(FD) != GVA_AvailableExternally)
1854 CreateFunctionTypeMetadataForIcall(FD, F);
1855 }
1856 }
1857
1858 // Emit type metadata on member functions for member function pointer checks.
1859 // These are only ever necessary on definitions; we're guaranteed that the
1860 // definition will be present in the LTO unit as a result of LTO visibility.
1861 auto *MD = dyn_cast<CXXMethodDecl>(D);
1862 if (MD && requiresMemberFunctionPointerTypeMetadata(*this, MD)) {
1863 for (const CXXRecordDecl *Base : getMostBaseClasses(MD->getParent())) {
1864 llvm::Metadata *Id =
1865 CreateMetadataIdentifierForType(Context.getMemberPointerType(
1866 MD->getType(), Context.getRecordType(Base).getTypePtr()));
1867 F->addTypeMetadata(0, Id);
1868 }
1869 }
1870}
1871
1872void CodeGenModule::setLLVMFunctionFEnvAttributes(const FunctionDecl *D,
1873 llvm::Function *F) {
1874 if (D->hasAttr<StrictFPAttr>()) {
1875 llvm::AttrBuilder FuncAttrs;
1876 FuncAttrs.addAttribute("strictfp");
1877 F->addAttributes(llvm::AttributeList::FunctionIndex, FuncAttrs);
1878 }
1879}
1880
1881void CodeGenModule::SetCommonAttributes(GlobalDecl GD, llvm::GlobalValue *GV) {
1882 const Decl *D = GD.getDecl();
1883 if (dyn_cast_or_null<NamedDecl>(D))
1884 setGVProperties(GV, GD);
1885 else
1886 GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
1887
1888 if (D && D->hasAttr<UsedAttr>())
1889 addUsedOrCompilerUsedGlobal(GV);
1890
1891 if (CodeGenOpts.KeepStaticConsts && D && isa<VarDecl>(D)) {
1892 const auto *VD = cast<VarDecl>(D);
1893 if (VD->getType().isConstQualified() &&
1894 VD->getStorageDuration() == SD_Static)
1895 addUsedOrCompilerUsedGlobal(GV);
1896 }
1897}
1898
1899bool CodeGenModule::GetCPUAndFeaturesAttributes(GlobalDecl GD,
1900 llvm::AttrBuilder &Attrs) {
1901 // Add target-cpu and target-features attributes to functions. If
1902 // we have a decl for the function and it has a target attribute then
1903 // parse that and add it to the feature set.
1904 StringRef TargetCPU = getTarget().getTargetOpts().CPU;
1905 StringRef TuneCPU = getTarget().getTargetOpts().TuneCPU;
1906 std::vector<std::string> Features;
1907 const auto *FD = dyn_cast_or_null<FunctionDecl>(GD.getDecl());
1908 FD = FD ? FD->getMostRecentDecl() : FD;
1909 const auto *TD = FD ? FD->getAttr<TargetAttr>() : nullptr;
1910 const auto *SD = FD ? FD->getAttr<CPUSpecificAttr>() : nullptr;
1911 bool AddedAttr = false;
1912 if (TD || SD) {
1913 llvm::StringMap<bool> FeatureMap;
1914 getContext().getFunctionFeatureMap(FeatureMap, GD);
1915
1916 // Produce the canonical string for this set of features.
1917 for (const llvm::StringMap<bool>::value_type &Entry : FeatureMap)
1918 Features.push_back((Entry.getValue() ? "+" : "-") + Entry.getKey().str());
1919
1920 // Now add the target-cpu and target-features to the function.
1921 // While we populated the feature map above, we still need to
1922 // get and parse the target attribute so we can get the cpu for
1923 // the function.
1924 if (TD) {
1925 ParsedTargetAttr ParsedAttr = TD->parse();
1926 if (!ParsedAttr.Architecture.empty() &&
1927 getTarget().isValidCPUName(ParsedAttr.Architecture)) {
1928 TargetCPU = ParsedAttr.Architecture;
1929 TuneCPU = ""; // Clear the tune CPU.
1930 }
1931 if (!ParsedAttr.Tune.empty() &&
1932 getTarget().isValidCPUName(ParsedAttr.Tune))
1933 TuneCPU = ParsedAttr.Tune;
1934 }
1935 } else {
1936 // Otherwise just add the existing target cpu and target features to the
1937 // function.
1938 Features = getTarget().getTargetOpts().Features;
1939 }
1940
1941 if (!TargetCPU.empty()) {
1942 Attrs.addAttribute("target-cpu", TargetCPU);
1943 AddedAttr = true;
1944 }
1945 if (!TuneCPU.empty()) {
1946 Attrs.addAttribute("tune-cpu", TuneCPU);
1947 AddedAttr = true;
1948 }
1949 if (!Features.empty()) {
1950 llvm::sort(Features);
1951 Attrs.addAttribute("target-features", llvm::join(Features, ","));
1952 AddedAttr = true;
1953 }
1954
1955 return AddedAttr;
1956}
1957
1958void CodeGenModule::setNonAliasAttributes(GlobalDecl GD,
1959 llvm::GlobalObject *GO) {
1960 const Decl *D = GD.getDecl();
1961 SetCommonAttributes(GD, GO);
1962
1963 if (D) {
1964 if (auto *GV = dyn_cast<llvm::GlobalVariable>(GO)) {
1965 if (D->hasAttr<RetainAttr>())
1966 addUsedGlobal(GV);
1967 if (auto *SA = D->getAttr<PragmaClangBSSSectionAttr>())
1968 GV->addAttribute("bss-section", SA->getName());
1969 if (auto *SA = D->getAttr<PragmaClangDataSectionAttr>())
1970 GV->addAttribute("data-section", SA->getName());
1971 if (auto *SA = D->getAttr<PragmaClangRodataSectionAttr>())
1972 GV->addAttribute("rodata-section", SA->getName());
1973 if (auto *SA = D->getAttr<PragmaClangRelroSectionAttr>())
1974 GV->addAttribute("relro-section", SA->getName());
1975 }
1976
1977 if (auto *F = dyn_cast<llvm::Function>(GO)) {
1978 if (D->hasAttr<RetainAttr>())
1979 addUsedGlobal(F);
1980 if (auto *SA = D->getAttr<PragmaClangTextSectionAttr>())
1981 if (!D->getAttr<SectionAttr>())
1982 F->addFnAttr("implicit-section-name", SA->getName());
1983
1984 llvm::AttrBuilder Attrs;
1985 if (GetCPUAndFeaturesAttributes(GD, Attrs)) {
1986 // We know that GetCPUAndFeaturesAttributes will always have the
1987 // newest set, since it has the newest possible FunctionDecl, so the
1988 // new ones should replace the old.
1989 llvm::AttrBuilder RemoveAttrs;
1990 RemoveAttrs.addAttribute("target-cpu");
1991 RemoveAttrs.addAttribute("target-features");
1992 RemoveAttrs.addAttribute("tune-cpu");
1993 F->removeAttributes(llvm::AttributeList::FunctionIndex, RemoveAttrs);
1994 F->addAttributes(llvm::AttributeList::FunctionIndex, Attrs);
1995 }
1996 }
1997
1998 if (const auto *CSA = D->getAttr<CodeSegAttr>())
1999 GO->setSection(CSA->getName());
2000 else if (const auto *SA = D->getAttr<SectionAttr>())
2001 GO->setSection(SA->getName());
2002 }
2003
2004 getTargetCodeGenInfo().setTargetAttributes(D, GO, *this);
2005}
2006
2007void CodeGenModule::SetInternalFunctionAttributes(GlobalDecl GD,
2008 llvm::Function *F,
2009 const CGFunctionInfo &FI) {
2010 const Decl *D = GD.getDecl();
2011 SetLLVMFunctionAttributes(GD, FI, F);
2012 SetLLVMFunctionAttributesForDefinition(D, F);
2013
2014 F->setLinkage(llvm::Function::InternalLinkage);
2015
2016 setNonAliasAttributes(GD, F);
2017}
2018
2019static void setLinkageForGV(llvm::GlobalValue *GV, const NamedDecl *ND) {
2020 // Set linkage and visibility in case we never see a definition.
2021 LinkageInfo LV = ND->getLinkageAndVisibility();
2022 // Don't set internal linkage on declarations.
2023 // "extern_weak" is overloaded in LLVM; we probably should have
2024 // separate linkage types for this.
2025 if (isExternallyVisible(LV.getLinkage()) &&
2026 (ND->hasAttr<WeakAttr>() || ND->isWeakImported()))
2027 GV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
2028}
2029
2030void CodeGenModule::CreateFunctionTypeMetadataForIcall(const FunctionDecl *FD,
2031 llvm::Function *F) {
2032 // Only if we are checking indirect calls.
2033 if (!LangOpts.Sanitize.has(SanitizerKind::CFIICall))
2034 return;
2035
2036 // Non-static class methods are handled via vtable or member function pointer
2037 // checks elsewhere.
2038 if (isa<CXXMethodDecl>(FD) && !cast<CXXMethodDecl>(FD)->isStatic())
2039 return;
2040
2041 llvm::Metadata *MD = CreateMetadataIdentifierForType(FD->getType());
2042 F->addTypeMetadata(0, MD);
2043 F->addTypeMetadata(0, CreateMetadataIdentifierGeneralized(FD->getType()));
2044
2045 // Emit a hash-based bit set entry for cross-DSO calls.
2046 if (CodeGenOpts.SanitizeCfiCrossDso)
2047 if (auto CrossDsoTypeId = CreateCrossDsoCfiTypeId(MD))
2048 F->addTypeMetadata(0, llvm::ConstantAsMetadata::get(CrossDsoTypeId));
2049}
2050
2051void CodeGenModule::SetFunctionAttributes(GlobalDecl GD, llvm::Function *F,
2052 bool IsIncompleteFunction,
2053 bool IsThunk) {
2054
2055 if (llvm::Intrinsic::ID IID = F->getIntrinsicID()) {
2056 // If this is an intrinsic function, set the function's attributes
2057 // to the intrinsic's attributes.
2058 F->setAttributes(llvm::Intrinsic::getAttributes(getLLVMContext(), IID));
2059 return;
2060 }
2061
2062 const auto *FD = cast<FunctionDecl>(GD.getDecl());
2063
2064 if (!IsIncompleteFunction)
2065 SetLLVMFunctionAttributes(GD, getTypes().arrangeGlobalDeclaration(GD), F);
2066
2067 // Add the Returned attribute for "this", except for iOS 5 and earlier
2068 // where substantial code, including the libstdc++ dylib, was compiled with
2069 // GCC and does not actually return "this".
2070 if (!IsThunk && getCXXABI().HasThisReturn(GD) &&
2071 !(getTriple().isiOS() && getTriple().isOSVersionLT(6))) {
2072 assert(!F->arg_empty() &&
2073 F->arg_begin()->getType()
2074 ->canLosslesslyBitCastTo(F->getReturnType()) &&
2075 "unexpected this return");
2076 F->addAttribute(1, llvm::Attribute::Returned);
2077 }
2078
2079 // Only a few attributes are set on declarations; these may later be
2080 // overridden by a definition.
2081
2082 setLinkageForGV(F, FD);
2083 setGVProperties(F, FD);
2084
2085 // Setup target-specific attributes.
2086 if (!IsIncompleteFunction && F->isDeclaration())
2087 getTargetCodeGenInfo().setTargetAttributes(FD, F, *this);
2088
2089 if (const auto *CSA = FD->getAttr<CodeSegAttr>())
2090 F->setSection(CSA->getName());
2091 else if (const auto *SA = FD->getAttr<SectionAttr>())
2092 F->setSection(SA->getName());
2093
2094 // If we plan on emitting this inline builtin, we can't treat it as a builtin.
2095 if (FD->isInlineBuiltinDeclaration()) {
2096 const FunctionDecl *FDBody;
2097 bool HasBody = FD->hasBody(FDBody);
2098 (void)HasBody;
2099 assert(HasBody && "Inline builtin declarations should always have an "
2100 "available body!");
2101 if (shouldEmitFunction(FDBody))
2102 F->addAttribute(llvm::AttributeList::FunctionIndex,
2103 llvm::Attribute::NoBuiltin);
2104 }
2105
2106 if (FD->isReplaceableGlobalAllocationFunction()) {
2107 // A replaceable global allocation function does not act like a builtin by
2108 // default, only if it is invoked by a new-expression or delete-expression.
2109 F->addAttribute(llvm::AttributeList::FunctionIndex,
2110 llvm::Attribute::NoBuiltin);
2111 }
2112
2113 if (isa<CXXConstructorDecl>(FD) || isa<CXXDestructorDecl>(FD))
2114 F->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
2115 else if (const auto *MD = dyn_cast<CXXMethodDecl>(FD))
2116 if (MD->isVirtual())
2117