1//===--- CGDecl.cpp - Emit LLVM Code for declarations ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Decl nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGBlocks.h"
14#include "CGCXXABI.h"
15#include "CGCleanup.h"
16#include "CGDebugInfo.h"
17#include "CGOpenCLRuntime.h"
18#include "CGOpenMPRuntime.h"
19#include "CodeGenFunction.h"
20#include "CodeGenModule.h"
21#include "ConstantEmitter.h"
22#include "PatternInit.h"
23#include "TargetInfo.h"
24#include "clang/AST/ASTContext.h"
25#include "clang/AST/Attr.h"
26#include "clang/AST/CharUnits.h"
27#include "clang/AST/Decl.h"
28#include "clang/AST/DeclObjC.h"
29#include "clang/AST/DeclOpenMP.h"
30#include "clang/Basic/CodeGenOptions.h"
31#include "clang/Basic/SourceManager.h"
32#include "clang/Basic/TargetInfo.h"
33#include "clang/CodeGen/CGFunctionInfo.h"
34#include "clang/Sema/Sema.h"
35#include "llvm/Analysis/ValueTracking.h"
36#include "llvm/IR/DataLayout.h"
37#include "llvm/IR/GlobalVariable.h"
38#include "llvm/IR/Intrinsics.h"
39#include "llvm/IR/Type.h"
40#include <optional>
41
42using namespace clang;
43using namespace CodeGen;
44
45static_assert(clang::Sema::MaximumAlignment <= llvm::Value::MaximumAlignment,
46 "Clang max alignment greater than what LLVM supports?");
47
48void CodeGenFunction::EmitDecl(const Decl &D) {
49 switch (D.getKind()) {
50 case Decl::BuiltinTemplate:
51 case Decl::TranslationUnit:
52 case Decl::ExternCContext:
53 case Decl::Namespace:
54 case Decl::UnresolvedUsingTypename:
55 case Decl::ClassTemplateSpecialization:
56 case Decl::ClassTemplatePartialSpecialization:
57 case Decl::VarTemplateSpecialization:
58 case Decl::VarTemplatePartialSpecialization:
59 case Decl::TemplateTypeParm:
60 case Decl::UnresolvedUsingValue:
61 case Decl::NonTypeTemplateParm:
62 case Decl::CXXDeductionGuide:
63 case Decl::CXXMethod:
64 case Decl::CXXConstructor:
65 case Decl::CXXDestructor:
66 case Decl::CXXConversion:
67 case Decl::Field:
68 case Decl::MSProperty:
69 case Decl::IndirectField:
70 case Decl::ObjCIvar:
71 case Decl::ObjCAtDefsField:
72 case Decl::ParmVar:
73 case Decl::ImplicitParam:
74 case Decl::ClassTemplate:
75 case Decl::VarTemplate:
76 case Decl::FunctionTemplate:
77 case Decl::TypeAliasTemplate:
78 case Decl::TemplateTemplateParm:
79 case Decl::ObjCMethod:
80 case Decl::ObjCCategory:
81 case Decl::ObjCProtocol:
82 case Decl::ObjCInterface:
83 case Decl::ObjCCategoryImpl:
84 case Decl::ObjCImplementation:
85 case Decl::ObjCProperty:
86 case Decl::ObjCCompatibleAlias:
87 case Decl::PragmaComment:
88 case Decl::PragmaDetectMismatch:
89 case Decl::AccessSpec:
90 case Decl::LinkageSpec:
91 case Decl::Export:
92 case Decl::ObjCPropertyImpl:
93 case Decl::FileScopeAsm:
94 case Decl::TopLevelStmt:
95 case Decl::Friend:
96 case Decl::FriendTemplate:
97 case Decl::Block:
98 case Decl::Captured:
99 case Decl::UsingShadow:
100 case Decl::ConstructorUsingShadow:
101 case Decl::ObjCTypeParam:
102 case Decl::Binding:
103 case Decl::UnresolvedUsingIfExists:
104 case Decl::HLSLBuffer:
105 llvm_unreachable("Declaration should not be in declstmts!");
106 case Decl::Record: // struct/union/class X;
107 case Decl::CXXRecord: // struct/union/class X; [C++]
108 if (CGDebugInfo *DI = getDebugInfo())
109 if (cast<RecordDecl>(Val: D).getDefinition())
110 DI->EmitAndRetainType(Ty: getContext().getRecordType(Decl: cast<RecordDecl>(Val: &D)));
111 return;
112 case Decl::Enum: // enum X;
113 if (CGDebugInfo *DI = getDebugInfo())
114 if (cast<EnumDecl>(Val: D).getDefinition())
115 DI->EmitAndRetainType(Ty: getContext().getEnumType(Decl: cast<EnumDecl>(Val: &D)));
116 return;
117 case Decl::Function: // void X();
118 case Decl::EnumConstant: // enum ? { X = ? }
119 case Decl::StaticAssert: // static_assert(X, ""); [C++0x]
120 case Decl::Label: // __label__ x;
121 case Decl::Import:
122 case Decl::MSGuid: // __declspec(uuid("..."))
123 case Decl::UnnamedGlobalConstant:
124 case Decl::TemplateParamObject:
125 case Decl::OMPThreadPrivate:
126 case Decl::OMPAllocate:
127 case Decl::OMPCapturedExpr:
128 case Decl::OMPRequires:
129 case Decl::Empty:
130 case Decl::Concept:
131 case Decl::ImplicitConceptSpecialization:
132 case Decl::LifetimeExtendedTemporary:
133 case Decl::RequiresExprBody:
134 // None of these decls require codegen support.
135 return;
136
137 case Decl::NamespaceAlias:
138 if (CGDebugInfo *DI = getDebugInfo())
139 DI->EmitNamespaceAlias(NA: cast<NamespaceAliasDecl>(Val: D));
140 return;
141 case Decl::Using: // using X; [C++]
142 if (CGDebugInfo *DI = getDebugInfo())
143 DI->EmitUsingDecl(UD: cast<UsingDecl>(Val: D));
144 return;
145 case Decl::UsingEnum: // using enum X; [C++]
146 if (CGDebugInfo *DI = getDebugInfo())
147 DI->EmitUsingEnumDecl(UD: cast<UsingEnumDecl>(Val: D));
148 return;
149 case Decl::UsingPack:
150 for (auto *Using : cast<UsingPackDecl>(Val: D).expansions())
151 EmitDecl(*Using);
152 return;
153 case Decl::UsingDirective: // using namespace X; [C++]
154 if (CGDebugInfo *DI = getDebugInfo())
155 DI->EmitUsingDirective(UD: cast<UsingDirectiveDecl>(Val: D));
156 return;
157 case Decl::Var:
158 case Decl::Decomposition: {
159 const VarDecl &VD = cast<VarDecl>(Val: D);
160 assert(VD.isLocalVarDecl() &&
161 "Should not see file-scope variables inside a function!");
162 EmitVarDecl(D: VD);
163 if (auto *DD = dyn_cast<DecompositionDecl>(Val: &VD))
164 for (auto *B : DD->bindings())
165 if (auto *HD = B->getHoldingVar())
166 EmitVarDecl(D: *HD);
167 return;
168 }
169
170 case Decl::OMPDeclareReduction:
171 return CGM.EmitOMPDeclareReduction(D: cast<OMPDeclareReductionDecl>(Val: &D), CGF: this);
172
173 case Decl::OMPDeclareMapper:
174 return CGM.EmitOMPDeclareMapper(D: cast<OMPDeclareMapperDecl>(Val: &D), CGF: this);
175
176 case Decl::Typedef: // typedef int X;
177 case Decl::TypeAlias: { // using X = int; [C++0x]
178 QualType Ty = cast<TypedefNameDecl>(Val: D).getUnderlyingType();
179 if (CGDebugInfo *DI = getDebugInfo())
180 DI->EmitAndRetainType(Ty);
181 if (Ty->isVariablyModifiedType())
182 EmitVariablyModifiedType(Ty);
183 return;
184 }
185 }
186}
187
188/// EmitVarDecl - This method handles emission of any variable declaration
189/// inside a function, including static vars etc.
190void CodeGenFunction::EmitVarDecl(const VarDecl &D) {
191 if (D.hasExternalStorage())
192 // Don't emit it now, allow it to be emitted lazily on its first use.
193 return;
194
195 // Some function-scope variable does not have static storage but still
196 // needs to be emitted like a static variable, e.g. a function-scope
197 // variable in constant address space in OpenCL.
198 if (D.getStorageDuration() != SD_Automatic) {
199 // Static sampler variables translated to function calls.
200 if (D.getType()->isSamplerT())
201 return;
202
203 llvm::GlobalValue::LinkageTypes Linkage =
204 CGM.getLLVMLinkageVarDefinition(VD: &D);
205
206 // FIXME: We need to force the emission/use of a guard variable for
207 // some variables even if we can constant-evaluate them because
208 // we can't guarantee every translation unit will constant-evaluate them.
209
210 return EmitStaticVarDecl(D, Linkage);
211 }
212
213 if (D.getType().getAddressSpace() == LangAS::opencl_local)
214 return CGM.getOpenCLRuntime().EmitWorkGroupLocalVarDecl(CGF&: *this, D);
215
216 assert(D.hasLocalStorage());
217 return EmitAutoVarDecl(D);
218}
219
220static std::string getStaticDeclName(CodeGenModule &CGM, const VarDecl &D) {
221 if (CGM.getLangOpts().CPlusPlus)
222 return CGM.getMangledName(GD: &D).str();
223
224 // If this isn't C++, we don't need a mangled name, just a pretty one.
225 assert(!D.isExternallyVisible() && "name shouldn't matter");
226 std::string ContextName;
227 const DeclContext *DC = D.getDeclContext();
228 if (auto *CD = dyn_cast<CapturedDecl>(DC))
229 DC = cast<DeclContext>(CD->getNonClosureContext());
230 if (const auto *FD = dyn_cast<FunctionDecl>(DC))
231 ContextName = std::string(CGM.getMangledName(GD: FD));
232 else if (const auto *BD = dyn_cast<BlockDecl>(DC))
233 ContextName = std::string(CGM.getBlockMangledName(GD: GlobalDecl(), BD: BD));
234 else if (const auto *OMD = dyn_cast<ObjCMethodDecl>(DC))
235 ContextName = OMD->getSelector().getAsString();
236 else
237 llvm_unreachable("Unknown context for static var decl");
238
239 ContextName += "." + D.getNameAsString();
240 return ContextName;
241}
242
243llvm::Constant *CodeGenModule::getOrCreateStaticVarDecl(
244 const VarDecl &D, llvm::GlobalValue::LinkageTypes Linkage) {
245 // In general, we don't always emit static var decls once before we reference
246 // them. It is possible to reference them before emitting the function that
247 // contains them, and it is possible to emit the containing function multiple
248 // times.
249 if (llvm::Constant *ExistingGV = StaticLocalDeclMap[&D])
250 return ExistingGV;
251
252 QualType Ty = D.getType();
253 assert(Ty->isConstantSizeType() && "VLAs can't be static");
254
255 // Use the label if the variable is renamed with the asm-label extension.
256 std::string Name;
257 if (D.hasAttr<AsmLabelAttr>())
258 Name = std::string(getMangledName(GD: &D));
259 else
260 Name = getStaticDeclName(CGM&: *this, D);
261
262 llvm::Type *LTy = getTypes().ConvertTypeForMem(T: Ty);
263 LangAS AS = GetGlobalVarAddressSpace(D: &D);
264 unsigned TargetAS = getContext().getTargetAddressSpace(AS);
265
266 // OpenCL variables in local address space and CUDA shared
267 // variables cannot have an initializer.
268 llvm::Constant *Init = nullptr;
269 if (Ty.getAddressSpace() == LangAS::opencl_local ||
270 D.hasAttr<CUDASharedAttr>() || D.hasAttr<LoaderUninitializedAttr>())
271 Init = llvm::UndefValue::get(T: LTy);
272 else
273 Init = EmitNullConstant(T: Ty);
274
275 llvm::GlobalVariable *GV = new llvm::GlobalVariable(
276 getModule(), LTy, Ty.isConstant(Ctx: getContext()), Linkage, Init, Name,
277 nullptr, llvm::GlobalVariable::NotThreadLocal, TargetAS);
278 GV->setAlignment(getContext().getDeclAlign(&D).getAsAlign());
279
280 if (supportsCOMDAT() && GV->isWeakForLinker())
281 GV->setComdat(TheModule.getOrInsertComdat(Name: GV->getName()));
282
283 if (D.getTLSKind())
284 setTLSMode(GV, D);
285
286 setGVProperties(GV, GD: &D);
287 getTargetCodeGenInfo().setTargetAttributes(D: cast<Decl>(Val: &D), GV, M&: *this);
288
289 // Make sure the result is of the correct type.
290 LangAS ExpectedAS = Ty.getAddressSpace();
291 llvm::Constant *Addr = GV;
292 if (AS != ExpectedAS) {
293 Addr = getTargetCodeGenInfo().performAddrSpaceCast(
294 CGM&: *this, V: GV, SrcAddr: AS, DestAddr: ExpectedAS,
295 DestTy: llvm::PointerType::get(C&: getLLVMContext(),
296 AddressSpace: getContext().getTargetAddressSpace(AS: ExpectedAS)));
297 }
298
299 setStaticLocalDeclAddress(D: &D, C: Addr);
300
301 // Ensure that the static local gets initialized by making sure the parent
302 // function gets emitted eventually.
303 const Decl *DC = cast<Decl>(D.getDeclContext());
304
305 // We can't name blocks or captured statements directly, so try to emit their
306 // parents.
307 if (isa<BlockDecl>(Val: DC) || isa<CapturedDecl>(Val: DC)) {
308 DC = DC->getNonClosureContext();
309 // FIXME: Ensure that global blocks get emitted.
310 if (!DC)
311 return Addr;
312 }
313
314 GlobalDecl GD;
315 if (const auto *CD = dyn_cast<CXXConstructorDecl>(DC))
316 GD = GlobalDecl(CD, Ctor_Base);
317 else if (const auto *DD = dyn_cast<CXXDestructorDecl>(DC))
318 GD = GlobalDecl(DD, Dtor_Base);
319 else if (const auto *FD = dyn_cast<FunctionDecl>(DC))
320 GD = GlobalDecl(FD);
321 else {
322 // Don't do anything for Obj-C method decls or global closures. We should
323 // never defer them.
324 assert(isa<ObjCMethodDecl>(DC) && "unexpected parent code decl");
325 }
326 if (GD.getDecl()) {
327 // Disable emission of the parent function for the OpenMP device codegen.
328 CGOpenMPRuntime::DisableAutoDeclareTargetRAII NoDeclTarget(*this);
329 (void)GetAddrOfGlobal(GD);
330 }
331
332 return Addr;
333}
334
335/// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
336/// global variable that has already been created for it. If the initializer
337/// has a different type than GV does, this may free GV and return a different
338/// one. Otherwise it just returns GV.
339llvm::GlobalVariable *
340CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl &D,
341 llvm::GlobalVariable *GV) {
342 ConstantEmitter emitter(*this);
343 llvm::Constant *Init = emitter.tryEmitForInitializer(D);
344
345 // If constant emission failed, then this should be a C++ static
346 // initializer.
347 if (!Init) {
348 if (!getLangOpts().CPlusPlus)
349 CGM.ErrorUnsupported(D.getInit(), "constant l-value expression");
350 else if (D.hasFlexibleArrayInit(Ctx: getContext()))
351 CGM.ErrorUnsupported(D.getInit(), "flexible array initializer");
352 else if (HaveInsertPoint()) {
353 // Since we have a static initializer, this global variable can't
354 // be constant.
355 GV->setConstant(false);
356
357 EmitCXXGuardedInit(D, DeclPtr: GV, /*PerformInit*/true);
358 }
359 return GV;
360 }
361
362#ifndef NDEBUG
363 CharUnits VarSize = CGM.getContext().getTypeSizeInChars(D.getType()) +
364 D.getFlexibleArrayInitChars(Ctx: getContext());
365 CharUnits CstSize = CharUnits::fromQuantity(
366 Quantity: CGM.getDataLayout().getTypeAllocSize(Ty: Init->getType()));
367 assert(VarSize == CstSize && "Emitted constant has unexpected size");
368#endif
369
370 // The initializer may differ in type from the global. Rewrite
371 // the global to match the initializer. (We have to do this
372 // because some types, like unions, can't be completely represented
373 // in the LLVM type system.)
374 if (GV->getValueType() != Init->getType()) {
375 llvm::GlobalVariable *OldGV = GV;
376
377 GV = new llvm::GlobalVariable(
378 CGM.getModule(), Init->getType(), OldGV->isConstant(),
379 OldGV->getLinkage(), Init, "",
380 /*InsertBefore*/ OldGV, OldGV->getThreadLocalMode(),
381 OldGV->getType()->getPointerAddressSpace());
382 GV->setVisibility(OldGV->getVisibility());
383 GV->setDSOLocal(OldGV->isDSOLocal());
384 GV->setComdat(OldGV->getComdat());
385
386 // Steal the name of the old global
387 GV->takeName(V: OldGV);
388
389 // Replace all uses of the old global with the new global
390 OldGV->replaceAllUsesWith(V: GV);
391
392 // Erase the old global, since it is no longer used.
393 OldGV->eraseFromParent();
394 }
395
396 bool NeedsDtor =
397 D.needsDestruction(Ctx: getContext()) == QualType::DK_cxx_destructor;
398
399 GV->setConstant(
400 D.getType().isConstantStorage(getContext(), true, !NeedsDtor));
401 GV->setInitializer(Init);
402
403 emitter.finalize(global: GV);
404
405 if (NeedsDtor && HaveInsertPoint()) {
406 // We have a constant initializer, but a nontrivial destructor. We still
407 // need to perform a guarded "initialization" in order to register the
408 // destructor.
409 EmitCXXGuardedInit(D, DeclPtr: GV, /*PerformInit*/false);
410 }
411
412 return GV;
413}
414
415void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
416 llvm::GlobalValue::LinkageTypes Linkage) {
417 // Check to see if we already have a global variable for this
418 // declaration. This can happen when double-emitting function
419 // bodies, e.g. with complete and base constructors.
420 llvm::Constant *addr = CGM.getOrCreateStaticVarDecl(D, Linkage);
421 CharUnits alignment = getContext().getDeclAlign(&D);
422
423 // Store into LocalDeclMap before generating initializer to handle
424 // circular references.
425 llvm::Type *elemTy = ConvertTypeForMem(T: D.getType());
426 setAddrOfLocalVar(VD: &D, Addr: Address(addr, elemTy, alignment));
427
428 // We can't have a VLA here, but we can have a pointer to a VLA,
429 // even though that doesn't really make any sense.
430 // Make sure to evaluate VLA bounds now so that we have them for later.
431 if (D.getType()->isVariablyModifiedType())
432 EmitVariablyModifiedType(Ty: D.getType());
433
434 // Save the type in case adding the initializer forces a type change.
435 llvm::Type *expectedType = addr->getType();
436
437 llvm::GlobalVariable *var =
438 cast<llvm::GlobalVariable>(Val: addr->stripPointerCasts());
439
440 // CUDA's local and local static __shared__ variables should not
441 // have any non-empty initializers. This is ensured by Sema.
442 // Whatever initializer such variable may have when it gets here is
443 // a no-op and should not be emitted.
444 bool isCudaSharedVar = getLangOpts().CUDA && getLangOpts().CUDAIsDevice &&
445 D.hasAttr<CUDASharedAttr>();
446 // If this value has an initializer, emit it.
447 if (D.getInit() && !isCudaSharedVar)
448 var = AddInitializerToStaticVarDecl(D, GV: var);
449
450 var->setAlignment(alignment.getAsAlign());
451
452 if (D.hasAttr<AnnotateAttr>())
453 CGM.AddGlobalAnnotations(&D, var);
454
455 if (auto *SA = D.getAttr<PragmaClangBSSSectionAttr>())
456 var->addAttribute("bss-section", SA->getName());
457 if (auto *SA = D.getAttr<PragmaClangDataSectionAttr>())
458 var->addAttribute("data-section", SA->getName());
459 if (auto *SA = D.getAttr<PragmaClangRodataSectionAttr>())
460 var->addAttribute("rodata-section", SA->getName());
461 if (auto *SA = D.getAttr<PragmaClangRelroSectionAttr>())
462 var->addAttribute("relro-section", SA->getName());
463
464 if (const SectionAttr *SA = D.getAttr<SectionAttr>())
465 var->setSection(SA->getName());
466
467 if (D.hasAttr<RetainAttr>())
468 CGM.addUsedGlobal(GV: var);
469 else if (D.hasAttr<UsedAttr>())
470 CGM.addUsedOrCompilerUsedGlobal(GV: var);
471
472 if (CGM.getCodeGenOpts().KeepPersistentStorageVariables)
473 CGM.addUsedOrCompilerUsedGlobal(GV: var);
474
475 // We may have to cast the constant because of the initializer
476 // mismatch above.
477 //
478 // FIXME: It is really dangerous to store this in the map; if anyone
479 // RAUW's the GV uses of this constant will be invalid.
480 llvm::Constant *castedAddr =
481 llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(C: var, Ty: expectedType);
482 LocalDeclMap.find(&D)->second = Address(castedAddr, elemTy, alignment);
483 CGM.setStaticLocalDeclAddress(D: &D, C: castedAddr);
484
485 CGM.getSanitizerMetadata()->reportGlobal(GV: var, D);
486
487 // Emit global variable debug descriptor for static vars.
488 CGDebugInfo *DI = getDebugInfo();
489 if (DI && CGM.getCodeGenOpts().hasReducedDebugInfo()) {
490 DI->setLocation(D.getLocation());
491 DI->EmitGlobalVariable(GV: var, Decl: &D);
492 }
493}
494
495namespace {
496 struct DestroyObject final : EHScopeStack::Cleanup {
497 DestroyObject(Address addr, QualType type,
498 CodeGenFunction::Destroyer *destroyer,
499 bool useEHCleanupForArray)
500 : addr(addr), type(type), destroyer(destroyer),
501 useEHCleanupForArray(useEHCleanupForArray) {}
502
503 Address addr;
504 QualType type;
505 CodeGenFunction::Destroyer *destroyer;
506 bool useEHCleanupForArray;
507
508 void Emit(CodeGenFunction &CGF, Flags flags) override {
509 // Don't use an EH cleanup recursively from an EH cleanup.
510 bool useEHCleanupForArray =
511 flags.isForNormalCleanup() && this->useEHCleanupForArray;
512
513 CGF.emitDestroy(addr, type, destroyer, useEHCleanupForArray);
514 }
515 };
516
517 template <class Derived>
518 struct DestroyNRVOVariable : EHScopeStack::Cleanup {
519 DestroyNRVOVariable(Address addr, QualType type, llvm::Value *NRVOFlag)
520 : NRVOFlag(NRVOFlag), Loc(addr), Ty(type) {}
521
522 llvm::Value *NRVOFlag;
523 Address Loc;
524 QualType Ty;
525
526 void Emit(CodeGenFunction &CGF, Flags flags) override {
527 // Along the exceptions path we always execute the dtor.
528 bool NRVO = flags.isForNormalCleanup() && NRVOFlag;
529
530 llvm::BasicBlock *SkipDtorBB = nullptr;
531 if (NRVO) {
532 // If we exited via NRVO, we skip the destructor call.
533 llvm::BasicBlock *RunDtorBB = CGF.createBasicBlock(name: "nrvo.unused");
534 SkipDtorBB = CGF.createBasicBlock(name: "nrvo.skipdtor");
535 llvm::Value *DidNRVO =
536 CGF.Builder.CreateFlagLoad(Addr: NRVOFlag, Name: "nrvo.val");
537 CGF.Builder.CreateCondBr(Cond: DidNRVO, True: SkipDtorBB, False: RunDtorBB);
538 CGF.EmitBlock(BB: RunDtorBB);
539 }
540
541 static_cast<Derived *>(this)->emitDestructorCall(CGF);
542
543 if (NRVO) CGF.EmitBlock(BB: SkipDtorBB);
544 }
545
546 virtual ~DestroyNRVOVariable() = default;
547 };
548
549 struct DestroyNRVOVariableCXX final
550 : DestroyNRVOVariable<DestroyNRVOVariableCXX> {
551 DestroyNRVOVariableCXX(Address addr, QualType type,
552 const CXXDestructorDecl *Dtor, llvm::Value *NRVOFlag)
553 : DestroyNRVOVariable<DestroyNRVOVariableCXX>(addr, type, NRVOFlag),
554 Dtor(Dtor) {}
555
556 const CXXDestructorDecl *Dtor;
557
558 void emitDestructorCall(CodeGenFunction &CGF) {
559 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
560 /*ForVirtualBase=*/false,
561 /*Delegating=*/false, Loc, Ty);
562 }
563 };
564
565 struct DestroyNRVOVariableC final
566 : DestroyNRVOVariable<DestroyNRVOVariableC> {
567 DestroyNRVOVariableC(Address addr, llvm::Value *NRVOFlag, QualType Ty)
568 : DestroyNRVOVariable<DestroyNRVOVariableC>(addr, Ty, NRVOFlag) {}
569
570 void emitDestructorCall(CodeGenFunction &CGF) {
571 CGF.destroyNonTrivialCStruct(CGF, Loc, Ty);
572 }
573 };
574
575 struct CallStackRestore final : EHScopeStack::Cleanup {
576 Address Stack;
577 CallStackRestore(Address Stack) : Stack(Stack) {}
578 bool isRedundantBeforeReturn() override { return true; }
579 void Emit(CodeGenFunction &CGF, Flags flags) override {
580 llvm::Value *V = CGF.Builder.CreateLoad(Addr: Stack);
581 CGF.Builder.CreateStackRestore(Ptr: V);
582 }
583 };
584
585 struct KmpcAllocFree final : EHScopeStack::Cleanup {
586 std::pair<llvm::Value *, llvm::Value *> AddrSizePair;
587 KmpcAllocFree(const std::pair<llvm::Value *, llvm::Value *> &AddrSizePair)
588 : AddrSizePair(AddrSizePair) {}
589 void Emit(CodeGenFunction &CGF, Flags EmissionFlags) override {
590 auto &RT = CGF.CGM.getOpenMPRuntime();
591 RT.getKmpcFreeShared(CGF, AddrSizePair);
592 }
593 };
594
595 struct ExtendGCLifetime final : EHScopeStack::Cleanup {
596 const VarDecl &Var;
597 ExtendGCLifetime(const VarDecl *var) : Var(*var) {}
598
599 void Emit(CodeGenFunction &CGF, Flags flags) override {
600 // Compute the address of the local variable, in case it's a
601 // byref or something.
602 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(&Var), false,
603 Var.getType(), VK_LValue, SourceLocation());
604 llvm::Value *value = CGF.EmitLoadOfScalar(lvalue: CGF.EmitDeclRefLValue(E: &DRE),
605 Loc: SourceLocation());
606 CGF.EmitExtendGCLifetime(object: value);
607 }
608 };
609
610 struct CallCleanupFunction final : EHScopeStack::Cleanup {
611 llvm::Constant *CleanupFn;
612 const CGFunctionInfo &FnInfo;
613 const VarDecl &Var;
614
615 CallCleanupFunction(llvm::Constant *CleanupFn, const CGFunctionInfo *Info,
616 const VarDecl *Var)
617 : CleanupFn(CleanupFn), FnInfo(*Info), Var(*Var) {}
618
619 void Emit(CodeGenFunction &CGF, Flags flags) override {
620 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(&Var), false,
621 Var.getType(), VK_LValue, SourceLocation());
622 // Compute the address of the local variable, in case it's a byref
623 // or something.
624 llvm::Value *Addr = CGF.EmitDeclRefLValue(E: &DRE).getPointer(CGF);
625
626 // In some cases, the type of the function argument will be different from
627 // the type of the pointer. An example of this is
628 // void f(void* arg);
629 // __attribute__((cleanup(f))) void *g;
630 //
631 // To fix this we insert a bitcast here.
632 QualType ArgTy = FnInfo.arg_begin()->type;
633 llvm::Value *Arg =
634 CGF.Builder.CreateBitCast(V: Addr, DestTy: CGF.ConvertType(T: ArgTy));
635
636 CallArgList Args;
637 Args.add(rvalue: RValue::get(V: Arg),
638 type: CGF.getContext().getPointerType(Var.getType()));
639 auto Callee = CGCallee::forDirect(functionPtr: CleanupFn);
640 CGF.EmitCall(CallInfo: FnInfo, Callee, ReturnValue: ReturnValueSlot(), Args);
641 }
642 };
643} // end anonymous namespace
644
645/// EmitAutoVarWithLifetime - Does the setup required for an automatic
646/// variable with lifetime.
647static void EmitAutoVarWithLifetime(CodeGenFunction &CGF, const VarDecl &var,
648 Address addr,
649 Qualifiers::ObjCLifetime lifetime) {
650 switch (lifetime) {
651 case Qualifiers::OCL_None:
652 llvm_unreachable("present but none");
653
654 case Qualifiers::OCL_ExplicitNone:
655 // nothing to do
656 break;
657
658 case Qualifiers::OCL_Strong: {
659 CodeGenFunction::Destroyer *destroyer =
660 (var.hasAttr<ObjCPreciseLifetimeAttr>()
661 ? CodeGenFunction::destroyARCStrongPrecise
662 : CodeGenFunction::destroyARCStrongImprecise);
663
664 CleanupKind cleanupKind = CGF.getARCCleanupKind();
665 CGF.pushDestroy(cleanupKind, addr, var.getType(), destroyer,
666 cleanupKind & EHCleanup);
667 break;
668 }
669 case Qualifiers::OCL_Autoreleasing:
670 // nothing to do
671 break;
672
673 case Qualifiers::OCL_Weak:
674 // __weak objects always get EH cleanups; otherwise, exceptions
675 // could cause really nasty crashes instead of mere leaks.
676 CGF.pushDestroy(NormalAndEHCleanup, addr, var.getType(),
677 CodeGenFunction::destroyARCWeak,
678 /*useEHCleanup*/ true);
679 break;
680 }
681}
682
683static bool isAccessedBy(const VarDecl &var, const Stmt *s) {
684 if (const Expr *e = dyn_cast<Expr>(Val: s)) {
685 // Skip the most common kinds of expressions that make
686 // hierarchy-walking expensive.
687 s = e = e->IgnoreParenCasts();
688
689 if (const DeclRefExpr *ref = dyn_cast<DeclRefExpr>(Val: e))
690 return (ref->getDecl() == &var);
691 if (const BlockExpr *be = dyn_cast<BlockExpr>(Val: e)) {
692 const BlockDecl *block = be->getBlockDecl();
693 for (const auto &I : block->captures()) {
694 if (I.getVariable() == &var)
695 return true;
696 }
697 }
698 }
699
700 for (const Stmt *SubStmt : s->children())
701 // SubStmt might be null; as in missing decl or conditional of an if-stmt.
702 if (SubStmt && isAccessedBy(var, s: SubStmt))
703 return true;
704
705 return false;
706}
707
708static bool isAccessedBy(const ValueDecl *decl, const Expr *e) {
709 if (!decl) return false;
710 if (!isa<VarDecl>(Val: decl)) return false;
711 const VarDecl *var = cast<VarDecl>(Val: decl);
712 return isAccessedBy(*var, e);
713}
714
715static bool tryEmitARCCopyWeakInit(CodeGenFunction &CGF,
716 const LValue &destLV, const Expr *init) {
717 bool needsCast = false;
718
719 while (auto castExpr = dyn_cast<CastExpr>(Val: init->IgnoreParens())) {
720 switch (castExpr->getCastKind()) {
721 // Look through casts that don't require representation changes.
722 case CK_NoOp:
723 case CK_BitCast:
724 case CK_BlockPointerToObjCPointerCast:
725 needsCast = true;
726 break;
727
728 // If we find an l-value to r-value cast from a __weak variable,
729 // emit this operation as a copy or move.
730 case CK_LValueToRValue: {
731 const Expr *srcExpr = castExpr->getSubExpr();
732 if (srcExpr->getType().getObjCLifetime() != Qualifiers::OCL_Weak)
733 return false;
734
735 // Emit the source l-value.
736 LValue srcLV = CGF.EmitLValue(E: srcExpr);
737
738 // Handle a formal type change to avoid asserting.
739 auto srcAddr = srcLV.getAddress(CGF);
740 if (needsCast) {
741 srcAddr =
742 srcAddr.withElementType(ElemTy: destLV.getAddress(CGF).getElementType());
743 }
744
745 // If it was an l-value, use objc_copyWeak.
746 if (srcExpr->isLValue()) {
747 CGF.EmitARCCopyWeak(dst: destLV.getAddress(CGF), src: srcAddr);
748 } else {
749 assert(srcExpr->isXValue());
750 CGF.EmitARCMoveWeak(dst: destLV.getAddress(CGF), src: srcAddr);
751 }
752 return true;
753 }
754
755 // Stop at anything else.
756 default:
757 return false;
758 }
759
760 init = castExpr->getSubExpr();
761 }
762 return false;
763}
764
765static void drillIntoBlockVariable(CodeGenFunction &CGF,
766 LValue &lvalue,
767 const VarDecl *var) {
768 lvalue.setAddress(CGF.emitBlockByrefAddress(baseAddr: lvalue.getAddress(CGF), V: var));
769}
770
771void CodeGenFunction::EmitNullabilityCheck(LValue LHS, llvm::Value *RHS,
772 SourceLocation Loc) {
773 if (!SanOpts.has(K: SanitizerKind::NullabilityAssign))
774 return;
775
776 auto Nullability = LHS.getType()->getNullability();
777 if (!Nullability || *Nullability != NullabilityKind::NonNull)
778 return;
779
780 // Check if the right hand side of the assignment is nonnull, if the left
781 // hand side must be nonnull.
782 SanitizerScope SanScope(this);
783 llvm::Value *IsNotNull = Builder.CreateIsNotNull(Arg: RHS);
784 llvm::Constant *StaticData[] = {
785 EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(T: LHS.getType()),
786 llvm::ConstantInt::get(Ty: Int8Ty, V: 0), // The LogAlignment info is unused.
787 llvm::ConstantInt::get(Ty: Int8Ty, V: TCK_NonnullAssign)};
788 EmitCheck(Checked: {{IsNotNull, SanitizerKind::NullabilityAssign}},
789 Check: SanitizerHandler::TypeMismatch, StaticArgs: StaticData, DynamicArgs: RHS);
790}
791
792void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D,
793 LValue lvalue, bool capturedByInit) {
794 Qualifiers::ObjCLifetime lifetime = lvalue.getObjCLifetime();
795 if (!lifetime) {
796 llvm::Value *value = EmitScalarExpr(E: init);
797 if (capturedByInit)
798 drillIntoBlockVariable(CGF&: *this, lvalue, var: cast<VarDecl>(Val: D));
799 EmitNullabilityCheck(LHS: lvalue, RHS: value, Loc: init->getExprLoc());
800 EmitStoreThroughLValue(Src: RValue::get(V: value), Dst: lvalue, isInit: true);
801 return;
802 }
803
804 if (const CXXDefaultInitExpr *DIE = dyn_cast<CXXDefaultInitExpr>(Val: init))
805 init = DIE->getExpr();
806
807 // If we're emitting a value with lifetime, we have to do the
808 // initialization *before* we leave the cleanup scopes.
809 if (auto *EWC = dyn_cast<ExprWithCleanups>(Val: init)) {
810 CodeGenFunction::RunCleanupsScope Scope(*this);
811 return EmitScalarInit(init: EWC->getSubExpr(), D, lvalue, capturedByInit);
812 }
813
814 // We have to maintain the illusion that the variable is
815 // zero-initialized. If the variable might be accessed in its
816 // initializer, zero-initialize before running the initializer, then
817 // actually perform the initialization with an assign.
818 bool accessedByInit = false;
819 if (lifetime != Qualifiers::OCL_ExplicitNone)
820 accessedByInit = (capturedByInit || isAccessedBy(decl: D, e: init));
821 if (accessedByInit) {
822 LValue tempLV = lvalue;
823 // Drill down to the __block object if necessary.
824 if (capturedByInit) {
825 // We can use a simple GEP for this because it can't have been
826 // moved yet.
827 tempLV.setAddress(emitBlockByrefAddress(baseAddr: tempLV.getAddress(CGF&: *this),
828 V: cast<VarDecl>(Val: D),
829 /*follow*/ followForward: false));
830 }
831
832 auto ty =
833 cast<llvm::PointerType>(Val: tempLV.getAddress(CGF&: *this).getElementType());
834 llvm::Value *zero = CGM.getNullPointer(T: ty, QT: tempLV.getType());
835
836 // If __weak, we want to use a barrier under certain conditions.
837 if (lifetime == Qualifiers::OCL_Weak)
838 EmitARCInitWeak(addr: tempLV.getAddress(CGF&: *this), value: zero);
839
840 // Otherwise just do a simple store.
841 else
842 EmitStoreOfScalar(value: zero, lvalue: tempLV, /* isInitialization */ isInit: true);
843 }
844
845 // Emit the initializer.
846 llvm::Value *value = nullptr;
847
848 switch (lifetime) {
849 case Qualifiers::OCL_None:
850 llvm_unreachable("present but none");
851
852 case Qualifiers::OCL_Strong: {
853 if (!D || !isa<VarDecl>(Val: D) || !cast<VarDecl>(Val: D)->isARCPseudoStrong()) {
854 value = EmitARCRetainScalarExpr(expr: init);
855 break;
856 }
857 // If D is pseudo-strong, treat it like __unsafe_unretained here. This means
858 // that we omit the retain, and causes non-autoreleased return values to be
859 // immediately released.
860 [[fallthrough]];
861 }
862
863 case Qualifiers::OCL_ExplicitNone:
864 value = EmitARCUnsafeUnretainedScalarExpr(expr: init);
865 break;
866
867 case Qualifiers::OCL_Weak: {
868 // If it's not accessed by the initializer, try to emit the
869 // initialization with a copy or move.
870 if (!accessedByInit && tryEmitARCCopyWeakInit(CGF&: *this, destLV: lvalue, init)) {
871 return;
872 }
873
874 // No way to optimize a producing initializer into this. It's not
875 // worth optimizing for, because the value will immediately
876 // disappear in the common case.
877 value = EmitScalarExpr(E: init);
878
879 if (capturedByInit) drillIntoBlockVariable(CGF&: *this, lvalue, var: cast<VarDecl>(Val: D));
880 if (accessedByInit)
881 EmitARCStoreWeak(addr: lvalue.getAddress(CGF&: *this), value, /*ignored*/ true);
882 else
883 EmitARCInitWeak(addr: lvalue.getAddress(CGF&: *this), value);
884 return;
885 }
886
887 case Qualifiers::OCL_Autoreleasing:
888 value = EmitARCRetainAutoreleaseScalarExpr(expr: init);
889 break;
890 }
891
892 if (capturedByInit) drillIntoBlockVariable(CGF&: *this, lvalue, var: cast<VarDecl>(Val: D));
893
894 EmitNullabilityCheck(LHS: lvalue, RHS: value, Loc: init->getExprLoc());
895
896 // If the variable might have been accessed by its initializer, we
897 // might have to initialize with a barrier. We have to do this for
898 // both __weak and __strong, but __weak got filtered out above.
899 if (accessedByInit && lifetime == Qualifiers::OCL_Strong) {
900 llvm::Value *oldValue = EmitLoadOfScalar(lvalue, Loc: init->getExprLoc());
901 EmitStoreOfScalar(value, lvalue, /* isInitialization */ isInit: true);
902 EmitARCRelease(value: oldValue, precise: ARCImpreciseLifetime);
903 return;
904 }
905
906 EmitStoreOfScalar(value, lvalue, /* isInitialization */ isInit: true);
907}
908
909/// Decide whether we can emit the non-zero parts of the specified initializer
910/// with equal or fewer than NumStores scalar stores.
911static bool canEmitInitWithFewStoresAfterBZero(llvm::Constant *Init,
912 unsigned &NumStores) {
913 // Zero and Undef never requires any extra stores.
914 if (isa<llvm::ConstantAggregateZero>(Val: Init) ||
915 isa<llvm::ConstantPointerNull>(Val: Init) ||
916 isa<llvm::UndefValue>(Val: Init))
917 return true;
918 if (isa<llvm::ConstantInt>(Val: Init) || isa<llvm::ConstantFP>(Val: Init) ||
919 isa<llvm::ConstantVector>(Val: Init) || isa<llvm::BlockAddress>(Val: Init) ||
920 isa<llvm::ConstantExpr>(Val: Init))
921 return Init->isNullValue() || NumStores--;
922
923 // See if we can emit each element.
924 if (isa<llvm::ConstantArray>(Val: Init) || isa<llvm::ConstantStruct>(Val: Init)) {
925 for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
926 llvm::Constant *Elt = cast<llvm::Constant>(Val: Init->getOperand(i));
927 if (!canEmitInitWithFewStoresAfterBZero(Init: Elt, NumStores))
928 return false;
929 }
930 return true;
931 }
932
933 if (llvm::ConstantDataSequential *CDS =
934 dyn_cast<llvm::ConstantDataSequential>(Val: Init)) {
935 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
936 llvm::Constant *Elt = CDS->getElementAsConstant(i);
937 if (!canEmitInitWithFewStoresAfterBZero(Init: Elt, NumStores))
938 return false;
939 }
940 return true;
941 }
942
943 // Anything else is hard and scary.
944 return false;
945}
946
947/// For inits that canEmitInitWithFewStoresAfterBZero returned true for, emit
948/// the scalar stores that would be required.
949static void emitStoresForInitAfterBZero(CodeGenModule &CGM,
950 llvm::Constant *Init, Address Loc,
951 bool isVolatile, CGBuilderTy &Builder,
952 bool IsAutoInit) {
953 assert(!Init->isNullValue() && !isa<llvm::UndefValue>(Init) &&
954 "called emitStoresForInitAfterBZero for zero or undef value.");
955
956 if (isa<llvm::ConstantInt>(Val: Init) || isa<llvm::ConstantFP>(Val: Init) ||
957 isa<llvm::ConstantVector>(Val: Init) || isa<llvm::BlockAddress>(Val: Init) ||
958 isa<llvm::ConstantExpr>(Val: Init)) {
959 auto *I = Builder.CreateStore(Val: Init, Addr: Loc, IsVolatile: isVolatile);
960 if (IsAutoInit)
961 I->addAnnotationMetadata(Annotation: "auto-init");
962 return;
963 }
964
965 if (llvm::ConstantDataSequential *CDS =
966 dyn_cast<llvm::ConstantDataSequential>(Val: Init)) {
967 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
968 llvm::Constant *Elt = CDS->getElementAsConstant(i);
969
970 // If necessary, get a pointer to the element and emit it.
971 if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Val: Elt))
972 emitStoresForInitAfterBZero(
973 CGM, Init: Elt, Loc: Builder.CreateConstInBoundsGEP2_32(Addr: Loc, Idx0: 0, Idx1: i), isVolatile,
974 Builder, IsAutoInit);
975 }
976 return;
977 }
978
979 assert((isa<llvm::ConstantStruct>(Init) || isa<llvm::ConstantArray>(Init)) &&
980 "Unknown value type!");
981
982 for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
983 llvm::Constant *Elt = cast<llvm::Constant>(Val: Init->getOperand(i));
984
985 // If necessary, get a pointer to the element and emit it.
986 if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Val: Elt))
987 emitStoresForInitAfterBZero(CGM, Init: Elt,
988 Loc: Builder.CreateConstInBoundsGEP2_32(Addr: Loc, Idx0: 0, Idx1: i),
989 isVolatile, Builder, IsAutoInit);
990 }
991}
992
993/// Decide whether we should use bzero plus some stores to initialize a local
994/// variable instead of using a memcpy from a constant global. It is beneficial
995/// to use bzero if the global is all zeros, or mostly zeros and large.
996static bool shouldUseBZeroPlusStoresToInitialize(llvm::Constant *Init,
997 uint64_t GlobalSize) {
998 // If a global is all zeros, always use a bzero.
999 if (isa<llvm::ConstantAggregateZero>(Val: Init)) return true;
1000
1001 // If a non-zero global is <= 32 bytes, always use a memcpy. If it is large,
1002 // do it if it will require 6 or fewer scalar stores.
1003 // TODO: Should budget depends on the size? Avoiding a large global warrants
1004 // plopping in more stores.
1005 unsigned StoreBudget = 6;
1006 uint64_t SizeLimit = 32;
1007
1008 return GlobalSize > SizeLimit &&
1009 canEmitInitWithFewStoresAfterBZero(Init, NumStores&: StoreBudget);
1010}
1011
1012/// Decide whether we should use memset to initialize a local variable instead
1013/// of using a memcpy from a constant global. Assumes we've already decided to
1014/// not user bzero.
1015/// FIXME We could be more clever, as we are for bzero above, and generate
1016/// memset followed by stores. It's unclear that's worth the effort.
1017static llvm::Value *shouldUseMemSetToInitialize(llvm::Constant *Init,
1018 uint64_t GlobalSize,
1019 const llvm::DataLayout &DL) {
1020 uint64_t SizeLimit = 32;
1021 if (GlobalSize <= SizeLimit)
1022 return nullptr;
1023 return llvm::isBytewiseValue(V: Init, DL);
1024}
1025
1026/// Decide whether we want to split a constant structure or array store into a
1027/// sequence of its fields' stores. This may cost us code size and compilation
1028/// speed, but plays better with store optimizations.
1029static bool shouldSplitConstantStore(CodeGenModule &CGM,
1030 uint64_t GlobalByteSize) {
1031 // Don't break things that occupy more than one cacheline.
1032 uint64_t ByteSizeLimit = 64;
1033 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
1034 return false;
1035 if (GlobalByteSize <= ByteSizeLimit)
1036 return true;
1037 return false;
1038}
1039
1040enum class IsPattern { No, Yes };
1041
1042/// Generate a constant filled with either a pattern or zeroes.
1043static llvm::Constant *patternOrZeroFor(CodeGenModule &CGM, IsPattern isPattern,
1044 llvm::Type *Ty) {
1045 if (isPattern == IsPattern::Yes)
1046 return initializationPatternFor(CGM, Ty);
1047 else
1048 return llvm::Constant::getNullValue(Ty);
1049}
1050
1051static llvm::Constant *constWithPadding(CodeGenModule &CGM, IsPattern isPattern,
1052 llvm::Constant *constant);
1053
1054/// Helper function for constWithPadding() to deal with padding in structures.
1055static llvm::Constant *constStructWithPadding(CodeGenModule &CGM,
1056 IsPattern isPattern,
1057 llvm::StructType *STy,
1058 llvm::Constant *constant) {
1059 const llvm::DataLayout &DL = CGM.getDataLayout();
1060 const llvm::StructLayout *Layout = DL.getStructLayout(Ty: STy);
1061 llvm::Type *Int8Ty = llvm::IntegerType::getInt8Ty(C&: CGM.getLLVMContext());
1062 unsigned SizeSoFar = 0;
1063 SmallVector<llvm::Constant *, 8> Values;
1064 bool NestedIntact = true;
1065 for (unsigned i = 0, e = STy->getNumElements(); i != e; i++) {
1066 unsigned CurOff = Layout->getElementOffset(Idx: i);
1067 if (SizeSoFar < CurOff) {
1068 assert(!STy->isPacked());
1069 auto *PadTy = llvm::ArrayType::get(ElementType: Int8Ty, NumElements: CurOff - SizeSoFar);
1070 Values.push_back(Elt: patternOrZeroFor(CGM, isPattern, Ty: PadTy));
1071 }
1072 llvm::Constant *CurOp;
1073 if (constant->isZeroValue())
1074 CurOp = llvm::Constant::getNullValue(Ty: STy->getElementType(N: i));
1075 else
1076 CurOp = cast<llvm::Constant>(Val: constant->getAggregateElement(Elt: i));
1077 auto *NewOp = constWithPadding(CGM, isPattern, constant: CurOp);
1078 if (CurOp != NewOp)
1079 NestedIntact = false;
1080 Values.push_back(Elt: NewOp);
1081 SizeSoFar = CurOff + DL.getTypeAllocSize(Ty: CurOp->getType());
1082 }
1083 unsigned TotalSize = Layout->getSizeInBytes();
1084 if (SizeSoFar < TotalSize) {
1085 auto *PadTy = llvm::ArrayType::get(ElementType: Int8Ty, NumElements: TotalSize - SizeSoFar);
1086 Values.push_back(Elt: patternOrZeroFor(CGM, isPattern, Ty: PadTy));
1087 }
1088 if (NestedIntact && Values.size() == STy->getNumElements())
1089 return constant;
1090 return llvm::ConstantStruct::getAnon(V: Values, Packed: STy->isPacked());
1091}
1092
1093/// Replace all padding bytes in a given constant with either a pattern byte or
1094/// 0x00.
1095static llvm::Constant *constWithPadding(CodeGenModule &CGM, IsPattern isPattern,
1096 llvm::Constant *constant) {
1097 llvm::Type *OrigTy = constant->getType();
1098 if (const auto STy = dyn_cast<llvm::StructType>(Val: OrigTy))
1099 return constStructWithPadding(CGM, isPattern, STy, constant);
1100 if (auto *ArrayTy = dyn_cast<llvm::ArrayType>(Val: OrigTy)) {
1101 llvm::SmallVector<llvm::Constant *, 8> Values;
1102 uint64_t Size = ArrayTy->getNumElements();
1103 if (!Size)
1104 return constant;
1105 llvm::Type *ElemTy = ArrayTy->getElementType();
1106 bool ZeroInitializer = constant->isNullValue();
1107 llvm::Constant *OpValue, *PaddedOp;
1108 if (ZeroInitializer) {
1109 OpValue = llvm::Constant::getNullValue(Ty: ElemTy);
1110 PaddedOp = constWithPadding(CGM, isPattern, constant: OpValue);
1111 }
1112 for (unsigned Op = 0; Op != Size; ++Op) {
1113 if (!ZeroInitializer) {
1114 OpValue = constant->getAggregateElement(Elt: Op);
1115 PaddedOp = constWithPadding(CGM, isPattern, constant: OpValue);
1116 }
1117 Values.push_back(Elt: PaddedOp);
1118 }
1119 auto *NewElemTy = Values[0]->getType();
1120 if (NewElemTy == ElemTy)
1121 return constant;
1122 auto *NewArrayTy = llvm::ArrayType::get(ElementType: NewElemTy, NumElements: Size);
1123 return llvm::ConstantArray::get(T: NewArrayTy, V: Values);
1124 }
1125 // FIXME: Add handling for tail padding in vectors. Vectors don't
1126 // have padding between or inside elements, but the total amount of
1127 // data can be less than the allocated size.
1128 return constant;
1129}
1130
1131Address CodeGenModule::createUnnamedGlobalFrom(const VarDecl &D,
1132 llvm::Constant *Constant,
1133 CharUnits Align) {
1134 auto FunctionName = [&](const DeclContext *DC) -> std::string {
1135 if (const auto *FD = dyn_cast<FunctionDecl>(Val: DC)) {
1136 if (const auto *CC = dyn_cast<CXXConstructorDecl>(Val: FD))
1137 return CC->getNameAsString();
1138 if (const auto *CD = dyn_cast<CXXDestructorDecl>(Val: FD))
1139 return CD->getNameAsString();
1140 return std::string(getMangledName(GD: FD));
1141 } else if (const auto *OM = dyn_cast<ObjCMethodDecl>(Val: DC)) {
1142 return OM->getNameAsString();
1143 } else if (isa<BlockDecl>(Val: DC)) {
1144 return "<block>";
1145 } else if (isa<CapturedDecl>(Val: DC)) {
1146 return "<captured>";
1147 } else {
1148 llvm_unreachable("expected a function or method");
1149 }
1150 };
1151
1152 // Form a simple per-variable cache of these values in case we find we
1153 // want to reuse them.
1154 llvm::GlobalVariable *&CacheEntry = InitializerConstants[&D];
1155 if (!CacheEntry || CacheEntry->getInitializer() != Constant) {
1156 auto *Ty = Constant->getType();
1157 bool isConstant = true;
1158 llvm::GlobalVariable *InsertBefore = nullptr;
1159 unsigned AS =
1160 getContext().getTargetAddressSpace(AS: GetGlobalConstantAddressSpace());
1161 std::string Name;
1162 if (D.hasGlobalStorage())
1163 Name = getMangledName(GD: &D).str() + ".const";
1164 else if (const DeclContext *DC = D.getParentFunctionOrMethod())
1165 Name = ("__const." + FunctionName(DC) + "." + D.getName()).str();
1166 else
1167 llvm_unreachable("local variable has no parent function or method");
1168 llvm::GlobalVariable *GV = new llvm::GlobalVariable(
1169 getModule(), Ty, isConstant, llvm::GlobalValue::PrivateLinkage,
1170 Constant, Name, InsertBefore, llvm::GlobalValue::NotThreadLocal, AS);
1171 GV->setAlignment(Align.getAsAlign());
1172 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1173 CacheEntry = GV;
1174 } else if (CacheEntry->getAlignment() < uint64_t(Align.getQuantity())) {
1175 CacheEntry->setAlignment(Align.getAsAlign());
1176 }
1177
1178 return Address(CacheEntry, CacheEntry->getValueType(), Align);
1179}
1180
1181static Address createUnnamedGlobalForMemcpyFrom(CodeGenModule &CGM,
1182 const VarDecl &D,
1183 CGBuilderTy &Builder,
1184 llvm::Constant *Constant,
1185 CharUnits Align) {
1186 Address SrcPtr = CGM.createUnnamedGlobalFrom(D, Constant, Align);
1187 return SrcPtr.withElementType(ElemTy: CGM.Int8Ty);
1188}
1189
1190static void emitStoresForConstant(CodeGenModule &CGM, const VarDecl &D,
1191 Address Loc, bool isVolatile,
1192 CGBuilderTy &Builder,
1193 llvm::Constant *constant, bool IsAutoInit) {
1194 auto *Ty = constant->getType();
1195 uint64_t ConstantSize = CGM.getDataLayout().getTypeAllocSize(Ty);
1196 if (!ConstantSize)
1197 return;
1198
1199 bool canDoSingleStore = Ty->isIntOrIntVectorTy() ||
1200 Ty->isPtrOrPtrVectorTy() || Ty->isFPOrFPVectorTy();
1201 if (canDoSingleStore) {
1202 auto *I = Builder.CreateStore(Val: constant, Addr: Loc, IsVolatile: isVolatile);
1203 if (IsAutoInit)
1204 I->addAnnotationMetadata(Annotation: "auto-init");
1205 return;
1206 }
1207
1208 auto *SizeVal = llvm::ConstantInt::get(Ty: CGM.IntPtrTy, V: ConstantSize);
1209
1210 // If the initializer is all or mostly the same, codegen with bzero / memset
1211 // then do a few stores afterward.
1212 if (shouldUseBZeroPlusStoresToInitialize(Init: constant, GlobalSize: ConstantSize)) {
1213 auto *I = Builder.CreateMemSet(Dest: Loc, Value: llvm::ConstantInt::get(Ty: CGM.Int8Ty, V: 0),
1214 Size: SizeVal, IsVolatile: isVolatile);
1215 if (IsAutoInit)
1216 I->addAnnotationMetadata(Annotation: "auto-init");
1217
1218 bool valueAlreadyCorrect =
1219 constant->isNullValue() || isa<llvm::UndefValue>(Val: constant);
1220 if (!valueAlreadyCorrect) {
1221 Loc = Loc.withElementType(ElemTy: Ty);
1222 emitStoresForInitAfterBZero(CGM, Init: constant, Loc, isVolatile, Builder,
1223 IsAutoInit);
1224 }
1225 return;
1226 }
1227
1228 // If the initializer is a repeated byte pattern, use memset.
1229 llvm::Value *Pattern =
1230 shouldUseMemSetToInitialize(Init: constant, GlobalSize: ConstantSize, DL: CGM.getDataLayout());
1231 if (Pattern) {
1232 uint64_t Value = 0x00;
1233 if (!isa<llvm::UndefValue>(Val: Pattern)) {
1234 const llvm::APInt &AP = cast<llvm::ConstantInt>(Val: Pattern)->getValue();
1235 assert(AP.getBitWidth() <= 8);
1236 Value = AP.getLimitedValue();
1237 }
1238 auto *I = Builder.CreateMemSet(
1239 Dest: Loc, Value: llvm::ConstantInt::get(Ty: CGM.Int8Ty, V: Value), Size: SizeVal, IsVolatile: isVolatile);
1240 if (IsAutoInit)
1241 I->addAnnotationMetadata(Annotation: "auto-init");
1242 return;
1243 }
1244
1245 // If the initializer is small or trivialAutoVarInit is set, use a handful of
1246 // stores.
1247 bool IsTrivialAutoVarInitPattern =
1248 CGM.getContext().getLangOpts().getTrivialAutoVarInit() ==
1249 LangOptions::TrivialAutoVarInitKind::Pattern;
1250 if (shouldSplitConstantStore(CGM, GlobalByteSize: ConstantSize)) {
1251 if (auto *STy = dyn_cast<llvm::StructType>(Val: Ty)) {
1252 if (STy == Loc.getElementType() ||
1253 (STy != Loc.getElementType() && IsTrivialAutoVarInitPattern)) {
1254 const llvm::StructLayout *Layout =
1255 CGM.getDataLayout().getStructLayout(Ty: STy);
1256 for (unsigned i = 0; i != constant->getNumOperands(); i++) {
1257 CharUnits CurOff =
1258 CharUnits::fromQuantity(Quantity: Layout->getElementOffset(Idx: i));
1259 Address EltPtr = Builder.CreateConstInBoundsByteGEP(
1260 Addr: Loc.withElementType(ElemTy: CGM.Int8Ty), Offset: CurOff);
1261 emitStoresForConstant(CGM, D, Loc: EltPtr, isVolatile, Builder,
1262 constant: constant->getAggregateElement(Elt: i), IsAutoInit);
1263 }
1264 return;
1265 }
1266 } else if (auto *ATy = dyn_cast<llvm::ArrayType>(Val: Ty)) {
1267 if (ATy == Loc.getElementType() ||
1268 (ATy != Loc.getElementType() && IsTrivialAutoVarInitPattern)) {
1269 for (unsigned i = 0; i != ATy->getNumElements(); i++) {
1270 Address EltPtr = Builder.CreateConstGEP(
1271 Addr: Loc.withElementType(ElemTy: ATy->getElementType()), Index: i);
1272 emitStoresForConstant(CGM, D, Loc: EltPtr, isVolatile, Builder,
1273 constant: constant->getAggregateElement(Elt: i), IsAutoInit);
1274 }
1275 return;
1276 }
1277 }
1278 }
1279
1280 // Copy from a global.
1281 auto *I =
1282 Builder.CreateMemCpy(Dest: Loc,
1283 Src: createUnnamedGlobalForMemcpyFrom(
1284 CGM, D, Builder, Constant: constant, Align: Loc.getAlignment()),
1285 Size: SizeVal, IsVolatile: isVolatile);
1286 if (IsAutoInit)
1287 I->addAnnotationMetadata(Annotation: "auto-init");
1288}
1289
1290static void emitStoresForZeroInit(CodeGenModule &CGM, const VarDecl &D,
1291 Address Loc, bool isVolatile,
1292 CGBuilderTy &Builder) {
1293 llvm::Type *ElTy = Loc.getElementType();
1294 llvm::Constant *constant =
1295 constWithPadding(CGM, isPattern: IsPattern::No, constant: llvm::Constant::getNullValue(Ty: ElTy));
1296 emitStoresForConstant(CGM, D, Loc, isVolatile, Builder, constant,
1297 /*IsAutoInit=*/true);
1298}
1299
1300static void emitStoresForPatternInit(CodeGenModule &CGM, const VarDecl &D,
1301 Address Loc, bool isVolatile,
1302 CGBuilderTy &Builder) {
1303 llvm::Type *ElTy = Loc.getElementType();
1304 llvm::Constant *constant = constWithPadding(
1305 CGM, isPattern: IsPattern::Yes, constant: initializationPatternFor(CGM, ElTy));
1306 assert(!isa<llvm::UndefValue>(constant));
1307 emitStoresForConstant(CGM, D, Loc, isVolatile, Builder, constant,
1308 /*IsAutoInit=*/true);
1309}
1310
1311static bool containsUndef(llvm::Constant *constant) {
1312 auto *Ty = constant->getType();
1313 if (isa<llvm::UndefValue>(Val: constant))
1314 return true;
1315 if (Ty->isStructTy() || Ty->isArrayTy() || Ty->isVectorTy())
1316 for (llvm::Use &Op : constant->operands())
1317 if (containsUndef(constant: cast<llvm::Constant>(Val&: Op)))
1318 return true;
1319 return false;
1320}
1321
1322static llvm::Constant *replaceUndef(CodeGenModule &CGM, IsPattern isPattern,
1323 llvm::Constant *constant) {
1324 auto *Ty = constant->getType();
1325 if (isa<llvm::UndefValue>(Val: constant))
1326 return patternOrZeroFor(CGM, isPattern, Ty);
1327 if (!(Ty->isStructTy() || Ty->isArrayTy() || Ty->isVectorTy()))
1328 return constant;
1329 if (!containsUndef(constant))
1330 return constant;
1331 llvm::SmallVector<llvm::Constant *, 8> Values(constant->getNumOperands());
1332 for (unsigned Op = 0, NumOp = constant->getNumOperands(); Op != NumOp; ++Op) {
1333 auto *OpValue = cast<llvm::Constant>(Val: constant->getOperand(i: Op));
1334 Values[Op] = replaceUndef(CGM, isPattern, constant: OpValue);
1335 }
1336 if (Ty->isStructTy())
1337 return llvm::ConstantStruct::get(T: cast<llvm::StructType>(Val: Ty), V: Values);
1338 if (Ty->isArrayTy())
1339 return llvm::ConstantArray::get(T: cast<llvm::ArrayType>(Val: Ty), V: Values);
1340 assert(Ty->isVectorTy());
1341 return llvm::ConstantVector::get(V: Values);
1342}
1343
1344/// EmitAutoVarDecl - Emit code and set up an entry in LocalDeclMap for a
1345/// variable declaration with auto, register, or no storage class specifier.
1346/// These turn into simple stack objects, or GlobalValues depending on target.
1347void CodeGenFunction::EmitAutoVarDecl(const VarDecl &D) {
1348 AutoVarEmission emission = EmitAutoVarAlloca(var: D);
1349 EmitAutoVarInit(emission);
1350 EmitAutoVarCleanups(emission);
1351}
1352
1353/// Emit a lifetime.begin marker if some criteria are satisfied.
1354/// \return a pointer to the temporary size Value if a marker was emitted, null
1355/// otherwise
1356llvm::Value *CodeGenFunction::EmitLifetimeStart(llvm::TypeSize Size,
1357 llvm::Value *Addr) {
1358 if (!ShouldEmitLifetimeMarkers)
1359 return nullptr;
1360
1361 assert(Addr->getType()->getPointerAddressSpace() ==
1362 CGM.getDataLayout().getAllocaAddrSpace() &&
1363 "Pointer should be in alloca address space");
1364 llvm::Value *SizeV = llvm::ConstantInt::get(
1365 Ty: Int64Ty, V: Size.isScalable() ? -1 : Size.getFixedValue());
1366 llvm::CallInst *C =
1367 Builder.CreateCall(Callee: CGM.getLLVMLifetimeStartFn(), Args: {SizeV, Addr});
1368 C->setDoesNotThrow();
1369 return SizeV;
1370}
1371
1372void CodeGenFunction::EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr) {
1373 assert(Addr->getType()->getPointerAddressSpace() ==
1374 CGM.getDataLayout().getAllocaAddrSpace() &&
1375 "Pointer should be in alloca address space");
1376 llvm::CallInst *C =
1377 Builder.CreateCall(Callee: CGM.getLLVMLifetimeEndFn(), Args: {Size, Addr});
1378 C->setDoesNotThrow();
1379}
1380
1381void CodeGenFunction::EmitAndRegisterVariableArrayDimensions(
1382 CGDebugInfo *DI, const VarDecl &D, bool EmitDebugInfo) {
1383 // For each dimension stores its QualType and corresponding
1384 // size-expression Value.
1385 SmallVector<CodeGenFunction::VlaSizePair, 4> Dimensions;
1386 SmallVector<const IdentifierInfo *, 4> VLAExprNames;
1387
1388 // Break down the array into individual dimensions.
1389 QualType Type1D = D.getType();
1390 while (getContext().getAsVariableArrayType(T: Type1D)) {
1391 auto VlaSize = getVLAElements1D(vla: Type1D);
1392 if (auto *C = dyn_cast<llvm::ConstantInt>(VlaSize.NumElts))
1393 Dimensions.emplace_back(C, Type1D.getUnqualifiedType());
1394 else {
1395 // Generate a locally unique name for the size expression.
1396 Twine Name = Twine("__vla_expr") + Twine(VLAExprCounter++);
1397 SmallString<12> Buffer;
1398 StringRef NameRef = Name.toStringRef(Out&: Buffer);
1399 auto &Ident = getContext().Idents.getOwn(Name: NameRef);
1400 VLAExprNames.push_back(Elt: &Ident);
1401 auto SizeExprAddr =
1402 CreateDefaultAlignTempAlloca(Ty: VlaSize.NumElts->getType(), Name: NameRef);
1403 Builder.CreateStore(Val: VlaSize.NumElts, Addr: SizeExprAddr);
1404 Dimensions.emplace_back(SizeExprAddr.getPointer(),
1405 Type1D.getUnqualifiedType());
1406 }
1407 Type1D = VlaSize.Type;
1408 }
1409
1410 if (!EmitDebugInfo)
1411 return;
1412
1413 // Register each dimension's size-expression with a DILocalVariable,
1414 // so that it can be used by CGDebugInfo when instantiating a DISubrange
1415 // to describe this array.
1416 unsigned NameIdx = 0;
1417 for (auto &VlaSize : Dimensions) {
1418 llvm::Metadata *MD;
1419 if (auto *C = dyn_cast<llvm::ConstantInt>(Val: VlaSize.NumElts))
1420 MD = llvm::ConstantAsMetadata::get(C);
1421 else {
1422 // Create an artificial VarDecl to generate debug info for.
1423 const IdentifierInfo *NameIdent = VLAExprNames[NameIdx++];
1424 auto QT = getContext().getIntTypeForBitwidth(
1425 DestWidth: SizeTy->getScalarSizeInBits(), Signed: false);
1426 auto *ArtificialDecl = VarDecl::Create(
1427 C&: getContext(), DC: const_cast<DeclContext *>(D.getDeclContext()),
1428 StartLoc: D.getLocation(), IdLoc: D.getLocation(), Id: NameIdent, T: QT,
1429 TInfo: getContext().CreateTypeSourceInfo(T: QT), S: SC_Auto);
1430 ArtificialDecl->setImplicit();
1431
1432 MD = DI->EmitDeclareOfAutoVariable(Decl: ArtificialDecl, AI: VlaSize.NumElts,
1433 Builder);
1434 }
1435 assert(MD && "No Size expression debug node created");
1436 DI->registerVLASizeExpression(Ty: VlaSize.Type, SizeExpr: MD);
1437 }
1438}
1439
1440/// EmitAutoVarAlloca - Emit the alloca and debug information for a
1441/// local variable. Does not emit initialization or destruction.
1442CodeGenFunction::AutoVarEmission
1443CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
1444 QualType Ty = D.getType();
1445 assert(
1446 Ty.getAddressSpace() == LangAS::Default ||
1447 (Ty.getAddressSpace() == LangAS::opencl_private && getLangOpts().OpenCL));
1448
1449 AutoVarEmission emission(D);
1450
1451 bool isEscapingByRef = D.isEscapingByref();
1452 emission.IsEscapingByRef = isEscapingByRef;
1453
1454 CharUnits alignment = getContext().getDeclAlign(&D);
1455
1456 // If the type is variably-modified, emit all the VLA sizes for it.
1457 if (Ty->isVariablyModifiedType())
1458 EmitVariablyModifiedType(Ty);
1459
1460 auto *DI = getDebugInfo();
1461 bool EmitDebugInfo = DI && CGM.getCodeGenOpts().hasReducedDebugInfo();
1462
1463 Address address = Address::invalid();
1464 RawAddress AllocaAddr = RawAddress::invalid();
1465 Address OpenMPLocalAddr = Address::invalid();
1466 if (CGM.getLangOpts().OpenMPIRBuilder)
1467 OpenMPLocalAddr = OMPBuilderCBHelpers::getAddressOfLocalVariable(CGF&: *this, VD: &D);
1468 else
1469 OpenMPLocalAddr =
1470 getLangOpts().OpenMP
1471 ? CGM.getOpenMPRuntime().getAddressOfLocalVariable(CGF&: *this, VD: &D)
1472 : Address::invalid();
1473
1474 bool NRVO = getLangOpts().ElideConstructors && D.isNRVOVariable();
1475
1476 if (getLangOpts().OpenMP && OpenMPLocalAddr.isValid()) {
1477 address = OpenMPLocalAddr;
1478 AllocaAddr = OpenMPLocalAddr;
1479 } else if (Ty->isConstantSizeType()) {
1480 // If this value is an array or struct with a statically determinable
1481 // constant initializer, there are optimizations we can do.
1482 //
1483 // TODO: We should constant-evaluate the initializer of any variable,
1484 // as long as it is initialized by a constant expression. Currently,
1485 // isConstantInitializer produces wrong answers for structs with
1486 // reference or bitfield members, and a few other cases, and checking
1487 // for POD-ness protects us from some of these.
1488 if (D.getInit() && (Ty->isArrayType() || Ty->isRecordType()) &&
1489 (D.isConstexpr() ||
1490 ((Ty.isPODType(Context: getContext()) ||
1491 getContext().getBaseElementType(QT: Ty)->isObjCObjectPointerType()) &&
1492 D.getInit()->isConstantInitializer(Ctx&: getContext(), ForRef: false)))) {
1493
1494 // If the variable's a const type, and it's neither an NRVO
1495 // candidate nor a __block variable and has no mutable members,
1496 // emit it as a global instead.
1497 // Exception is if a variable is located in non-constant address space
1498 // in OpenCL.
1499 bool NeedsDtor =
1500 D.needsDestruction(Ctx: getContext()) == QualType::DK_cxx_destructor;
1501 if ((!getLangOpts().OpenCL ||
1502 Ty.getAddressSpace() == LangAS::opencl_constant) &&
1503 (CGM.getCodeGenOpts().MergeAllConstants && !NRVO &&
1504 !isEscapingByRef &&
1505 Ty.isConstantStorage(Ctx: getContext(), ExcludeCtor: true, ExcludeDtor: !NeedsDtor))) {
1506 EmitStaticVarDecl(D, Linkage: llvm::GlobalValue::InternalLinkage);
1507
1508 // Signal this condition to later callbacks.
1509 emission.Addr = Address::invalid();
1510 assert(emission.wasEmittedAsGlobal());
1511 return emission;
1512 }
1513
1514 // Otherwise, tell the initialization code that we're in this case.
1515 emission.IsConstantAggregate = true;
1516 }
1517
1518 // A normal fixed sized variable becomes an alloca in the entry block,
1519 // unless:
1520 // - it's an NRVO variable.
1521 // - we are compiling OpenMP and it's an OpenMP local variable.
1522 if (NRVO) {
1523 // The named return value optimization: allocate this variable in the
1524 // return slot, so that we can elide the copy when returning this
1525 // variable (C++0x [class.copy]p34).
1526 address = ReturnValue;
1527 AllocaAddr =
1528 RawAddress(ReturnValue.emitRawPointer(CGF&: *this),
1529 ReturnValue.getElementType(), ReturnValue.getAlignment());
1530 ;
1531
1532 if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
1533 const auto *RD = RecordTy->getDecl();
1534 const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD);
1535 if ((CXXRD && !CXXRD->hasTrivialDestructor()) ||
1536 RD->isNonTrivialToPrimitiveDestroy()) {
1537 // Create a flag that is used to indicate when the NRVO was applied
1538 // to this variable. Set it to zero to indicate that NRVO was not
1539 // applied.
1540 llvm::Value *Zero = Builder.getFalse();
1541 RawAddress NRVOFlag =
1542 CreateTempAlloca(Ty: Zero->getType(), align: CharUnits::One(), Name: "nrvo");
1543 EnsureInsertPoint();
1544 Builder.CreateStore(Val: Zero, Addr: NRVOFlag);
1545
1546 // Record the NRVO flag for this variable.
1547 NRVOFlags[&D] = NRVOFlag.getPointer();
1548 emission.NRVOFlag = NRVOFlag.getPointer();
1549 }
1550 }
1551 } else {
1552 CharUnits allocaAlignment;
1553 llvm::Type *allocaTy;
1554 if (isEscapingByRef) {
1555 auto &byrefInfo = getBlockByrefInfo(var: &D);
1556 allocaTy = byrefInfo.Type;
1557 allocaAlignment = byrefInfo.ByrefAlignment;
1558 } else {
1559 allocaTy = ConvertTypeForMem(T: Ty);
1560 allocaAlignment = alignment;
1561 }
1562
1563 // Create the alloca. Note that we set the name separately from
1564 // building the instruction so that it's there even in no-asserts
1565 // builds.
1566 address = CreateTempAlloca(allocaTy, allocaAlignment, D.getName(),
1567 /*ArraySize=*/nullptr, &AllocaAddr);
1568
1569 // Don't emit lifetime markers for MSVC catch parameters. The lifetime of
1570 // the catch parameter starts in the catchpad instruction, and we can't
1571 // insert code in those basic blocks.
1572 bool IsMSCatchParam =
1573 D.isExceptionVariable() && getTarget().getCXXABI().isMicrosoft();
1574
1575 // Emit a lifetime intrinsic if meaningful. There's no point in doing this
1576 // if we don't have a valid insertion point (?).
1577 if (HaveInsertPoint() && !IsMSCatchParam) {
1578 // If there's a jump into the lifetime of this variable, its lifetime
1579 // gets broken up into several regions in IR, which requires more work
1580 // to handle correctly. For now, just omit the intrinsics; this is a
1581 // rare case, and it's better to just be conservatively correct.
1582 // PR28267.
1583 //
1584 // We have to do this in all language modes if there's a jump past the
1585 // declaration. We also have to do it in C if there's a jump to an
1586 // earlier point in the current block because non-VLA lifetimes begin as
1587 // soon as the containing block is entered, not when its variables
1588 // actually come into scope; suppressing the lifetime annotations
1589 // completely in this case is unnecessarily pessimistic, but again, this
1590 // is rare.
1591 if (!Bypasses.IsBypassed(D: &D) &&
1592 !(!getLangOpts().CPlusPlus && hasLabelBeenSeenInCurrentScope())) {
1593 llvm::TypeSize Size = CGM.getDataLayout().getTypeAllocSize(Ty: allocaTy);
1594 emission.SizeForLifetimeMarkers =
1595 EmitLifetimeStart(Size, Addr: AllocaAddr.getPointer());
1596 }
1597 } else {
1598 assert(!emission.useLifetimeMarkers());
1599 }
1600 }
1601 } else {
1602 EnsureInsertPoint();
1603
1604 // Delayed globalization for variable length declarations. This ensures that
1605 // the expression representing the length has been emitted and can be used
1606 // by the definition of the VLA. Since this is an escaped declaration, in
1607 // OpenMP we have to use a call to __kmpc_alloc_shared(). The matching
1608 // deallocation call to __kmpc_free_shared() is emitted later.
1609 bool VarAllocated = false;
1610 if (getLangOpts().OpenMPIsTargetDevice) {
1611 auto &RT = CGM.getOpenMPRuntime();
1612 if (RT.isDelayedVariableLengthDecl(CGF&: *this, VD: &D)) {
1613 // Emit call to __kmpc_alloc_shared() instead of the alloca.
1614 std::pair<llvm::Value *, llvm::Value *> AddrSizePair =
1615 RT.getKmpcAllocShared(CGF&: *this, VD: &D);
1616
1617 // Save the address of the allocation:
1618 LValue Base = MakeAddrLValue(AddrSizePair.first, D.getType(),
1619 CGM.getContext().getDeclAlign(&D),
1620 AlignmentSource::Decl);
1621 address = Base.getAddress(CGF&: *this);
1622
1623 // Push a cleanup block to emit the call to __kmpc_free_shared in the
1624 // appropriate location at the end of the scope of the
1625 // __kmpc_alloc_shared functions:
1626 pushKmpcAllocFree(Kind: NormalCleanup, AddrSizePair);
1627
1628 // Mark variable as allocated:
1629 VarAllocated = true;
1630 }
1631 }
1632
1633 if (!VarAllocated) {
1634 if (!DidCallStackSave) {
1635 // Save the stack.
1636 Address Stack =
1637 CreateDefaultAlignTempAlloca(Ty: AllocaInt8PtrTy, Name: "saved_stack");
1638
1639 llvm::Value *V = Builder.CreateStackSave();
1640 assert(V->getType() == AllocaInt8PtrTy);
1641 Builder.CreateStore(Val: V, Addr: Stack);
1642
1643 DidCallStackSave = true;
1644
1645 // Push a cleanup block and restore the stack there.
1646 // FIXME: in general circumstances, this should be an EH cleanup.
1647 pushStackRestore(kind: NormalCleanup, SPMem: Stack);
1648 }
1649
1650 auto VlaSize = getVLASize(vla: Ty);
1651 llvm::Type *llvmTy = ConvertTypeForMem(T: VlaSize.Type);
1652
1653 // Allocate memory for the array.
1654 address = CreateTempAlloca(llvmTy, alignment, "vla", VlaSize.NumElts,
1655 &AllocaAddr);
1656 }
1657
1658 // If we have debug info enabled, properly describe the VLA dimensions for
1659 // this type by registering the vla size expression for each of the
1660 // dimensions.
1661 EmitAndRegisterVariableArrayDimensions(DI, D, EmitDebugInfo);
1662 }
1663
1664 setAddrOfLocalVar(VD: &D, Addr: address);
1665 emission.Addr = address;
1666 emission.AllocaAddr = AllocaAddr;
1667
1668 // Emit debug info for local var declaration.
1669 if (EmitDebugInfo && HaveInsertPoint()) {
1670 Address DebugAddr = address;
1671 bool UsePointerValue = NRVO && ReturnValuePointer.isValid();
1672 DI->setLocation(D.getLocation());
1673
1674 // If NRVO, use a pointer to the return address.
1675 if (UsePointerValue) {
1676 DebugAddr = ReturnValuePointer;
1677 AllocaAddr = ReturnValuePointer;
1678 }
1679 (void)DI->EmitDeclareOfAutoVariable(Decl: &D, AI: AllocaAddr.getPointer(), Builder,
1680 UsePointerValue);
1681 }
1682
1683 if (D.hasAttr<AnnotateAttr>() && HaveInsertPoint())
1684 EmitVarAnnotations(D: &D, V: address.emitRawPointer(CGF&: *this));
1685
1686 // Make sure we call @llvm.lifetime.end.
1687 if (emission.useLifetimeMarkers())
1688 EHStack.pushCleanup<CallLifetimeEnd>(Kind: NormalEHLifetimeMarker,
1689 A: emission.getOriginalAllocatedAddress(),
1690 A: emission.getSizeForLifetimeMarkers());
1691
1692 return emission;
1693}
1694
1695static bool isCapturedBy(const VarDecl &, const Expr *);
1696
1697/// Determines whether the given __block variable is potentially
1698/// captured by the given statement.
1699static bool isCapturedBy(const VarDecl &Var, const Stmt *S) {
1700 if (const Expr *E = dyn_cast<Expr>(Val: S))
1701 return isCapturedBy(Var, E);
1702 for (const Stmt *SubStmt : S->children())
1703 if (isCapturedBy(Var, S: SubStmt))
1704 return true;
1705 return false;
1706}
1707
1708/// Determines whether the given __block variable is potentially
1709/// captured by the given expression.
1710static bool isCapturedBy(const VarDecl &Var, const Expr *E) {
1711 // Skip the most common kinds of expressions that make
1712 // hierarchy-walking expensive.
1713 E = E->IgnoreParenCasts();
1714
1715 if (const BlockExpr *BE = dyn_cast<BlockExpr>(Val: E)) {
1716 const BlockDecl *Block = BE->getBlockDecl();
1717 for (const auto &I : Block->captures()) {
1718 if (I.getVariable() == &Var)
1719 return true;
1720 }
1721
1722 // No need to walk into the subexpressions.
1723 return false;
1724 }
1725
1726 if (const StmtExpr *SE = dyn_cast<StmtExpr>(Val: E)) {
1727 const CompoundStmt *CS = SE->getSubStmt();
1728 for (const auto *BI : CS->body())
1729 if (const auto *BIE = dyn_cast<Expr>(Val: BI)) {
1730 if (isCapturedBy(Var, E: BIE))
1731 return true;
1732 }
1733 else if (const auto *DS = dyn_cast<DeclStmt>(Val: BI)) {
1734 // special case declarations
1735 for (const auto *I : DS->decls()) {
1736 if (const auto *VD = dyn_cast<VarDecl>(Val: (I))) {
1737 const Expr *Init = VD->getInit();
1738 if (Init && isCapturedBy(Var, E: Init))
1739 return true;
1740 }
1741 }
1742 }
1743 else
1744 // FIXME. Make safe assumption assuming arbitrary statements cause capturing.
1745 // Later, provide code to poke into statements for capture analysis.
1746 return true;
1747 return false;
1748 }
1749
1750 for (const Stmt *SubStmt : E->children())
1751 if (isCapturedBy(Var, SubStmt))
1752 return true;
1753
1754 return false;
1755}
1756
1757/// Determine whether the given initializer is trivial in the sense
1758/// that it requires no code to be generated.
1759bool CodeGenFunction::isTrivialInitializer(const Expr *Init) {
1760 if (!Init)
1761 return true;
1762
1763 if (const CXXConstructExpr *Construct = dyn_cast<CXXConstructExpr>(Val: Init))
1764 if (CXXConstructorDecl *Constructor = Construct->getConstructor())
1765 if (Constructor->isTrivial() &&
1766 Constructor->isDefaultConstructor() &&
1767 !Construct->requiresZeroInitialization())
1768 return true;
1769
1770 return false;
1771}
1772
1773void CodeGenFunction::emitZeroOrPatternForAutoVarInit(QualType type,
1774 const VarDecl &D,
1775 Address Loc) {
1776 auto trivialAutoVarInit = getContext().getLangOpts().getTrivialAutoVarInit();
1777 auto trivialAutoVarInitMaxSize =
1778 getContext().getLangOpts().TrivialAutoVarInitMaxSize;
1779 CharUnits Size = getContext().getTypeSizeInChars(T: type);
1780 bool isVolatile = type.isVolatileQualified();
1781 if (!Size.isZero()) {
1782 // We skip auto-init variables by their alloc size. Take this as an example:
1783 // "struct Foo {int x; char buff[1024];}" Assume the max-size flag is 1023.
1784 // All Foo type variables will be skipped. Ideally, we only skip the buff
1785 // array and still auto-init X in this example.
1786 // TODO: Improve the size filtering to by member size.
1787 auto allocSize = CGM.getDataLayout().getTypeAllocSize(Ty: Loc.getElementType());
1788 switch (trivialAutoVarInit) {
1789 case LangOptions::TrivialAutoVarInitKind::Uninitialized:
1790 llvm_unreachable("Uninitialized handled by caller");
1791 case LangOptions::TrivialAutoVarInitKind::Zero:
1792 if (CGM.stopAutoInit())
1793 return;
1794 if (trivialAutoVarInitMaxSize > 0 &&
1795 allocSize > trivialAutoVarInitMaxSize)
1796 return;
1797 emitStoresForZeroInit(CGM, D, Loc, isVolatile, Builder);
1798 break;
1799 case LangOptions::TrivialAutoVarInitKind::Pattern:
1800 if (CGM.stopAutoInit())
1801 return;
1802 if (trivialAutoVarInitMaxSize > 0 &&
1803 allocSize > trivialAutoVarInitMaxSize)
1804 return;
1805 emitStoresForPatternInit(CGM, D, Loc, isVolatile, Builder);
1806 break;
1807 }
1808 return;
1809 }
1810
1811 // VLAs look zero-sized to getTypeInfo. We can't emit constant stores to
1812 // them, so emit a memcpy with the VLA size to initialize each element.
1813 // Technically zero-sized or negative-sized VLAs are undefined, and UBSan
1814 // will catch that code, but there exists code which generates zero-sized
1815 // VLAs. Be nice and initialize whatever they requested.
1816 const auto *VlaType = getContext().getAsVariableArrayType(T: type);
1817 if (!VlaType)
1818 return;
1819 auto VlaSize = getVLASize(vla: VlaType);
1820 auto SizeVal = VlaSize.NumElts;
1821 CharUnits EltSize = getContext().getTypeSizeInChars(VlaSize.Type);
1822 switch (trivialAutoVarInit) {
1823 case LangOptions::TrivialAutoVarInitKind::Uninitialized:
1824 llvm_unreachable("Uninitialized handled by caller");
1825
1826 case LangOptions::TrivialAutoVarInitKind::Zero: {
1827 if (CGM.stopAutoInit())
1828 return;
1829 if (!EltSize.isOne())
1830 SizeVal = Builder.CreateNUWMul(LHS: SizeVal, RHS: CGM.getSize(numChars: EltSize));
1831 auto *I = Builder.CreateMemSet(Dest: Loc, Value: llvm::ConstantInt::get(Ty: Int8Ty, V: 0),
1832 Size: SizeVal, IsVolatile: isVolatile);
1833 I->addAnnotationMetadata(Annotation: "auto-init");
1834 break;
1835 }
1836
1837 case LangOptions::TrivialAutoVarInitKind::Pattern: {
1838 if (CGM.stopAutoInit())
1839 return;
1840 llvm::Type *ElTy = Loc.getElementType();
1841 llvm::Constant *Constant = constWithPadding(
1842 CGM, isPattern: IsPattern::Yes, constant: initializationPatternFor(CGM, ElTy));
1843 CharUnits ConstantAlign = getContext().getTypeAlignInChars(VlaSize.Type);
1844 llvm::BasicBlock *SetupBB = createBasicBlock(name: "vla-setup.loop");
1845 llvm::BasicBlock *LoopBB = createBasicBlock(name: "vla-init.loop");
1846 llvm::BasicBlock *ContBB = createBasicBlock(name: "vla-init.cont");
1847 llvm::Value *IsZeroSizedVLA = Builder.CreateICmpEQ(
1848 LHS: SizeVal, RHS: llvm::ConstantInt::get(Ty: SizeVal->getType(), V: 0),
1849 Name: "vla.iszerosized");
1850 Builder.CreateCondBr(Cond: IsZeroSizedVLA, True: ContBB, False: SetupBB);
1851 EmitBlock(BB: SetupBB);
1852 if (!EltSize.isOne())
1853 SizeVal = Builder.CreateNUWMul(LHS: SizeVal, RHS: CGM.getSize(numChars: EltSize));
1854 llvm::Value *BaseSizeInChars =
1855 llvm::ConstantInt::get(Ty: IntPtrTy, V: EltSize.getQuantity());
1856 Address Begin = Loc.withElementType(ElemTy: Int8Ty);
1857 llvm::Value *End = Builder.CreateInBoundsGEP(Ty: Begin.getElementType(),
1858 Ptr: Begin.emitRawPointer(CGF&: *this),
1859 IdxList: SizeVal, Name: "vla.end");
1860 llvm::BasicBlock *OriginBB = Builder.GetInsertBlock();
1861 EmitBlock(BB: LoopBB);
1862 llvm::PHINode *Cur = Builder.CreatePHI(Ty: Begin.getType(), NumReservedValues: 2, Name: "vla.cur");
1863 Cur->addIncoming(V: Begin.emitRawPointer(CGF&: *this), BB: OriginBB);
1864 CharUnits CurAlign = Loc.getAlignment().alignmentOfArrayElement(elementSize: EltSize);
1865 auto *I =
1866 Builder.CreateMemCpy(Dest: Address(Cur, Int8Ty, CurAlign),
1867 Src: createUnnamedGlobalForMemcpyFrom(
1868 CGM, D, Builder, Constant, Align: ConstantAlign),
1869 Size: BaseSizeInChars, IsVolatile: isVolatile);
1870 I->addAnnotationMetadata("auto-init");
1871 llvm::Value *Next =
1872 Builder.CreateInBoundsGEP(Ty: Int8Ty, Ptr: Cur, IdxList: BaseSizeInChars, Name: "vla.next");
1873 llvm::Value *Done = Builder.CreateICmpEQ(LHS: Next, RHS: End, Name: "vla-init.isdone");
1874 Builder.CreateCondBr(Cond: Done, True: ContBB, False: LoopBB);
1875 Cur->addIncoming(V: Next, BB: LoopBB);
1876 EmitBlock(BB: ContBB);
1877 } break;
1878 }
1879}
1880
1881void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
1882 assert(emission.Variable && "emission was not valid!");
1883
1884 // If this was emitted as a global constant, we're done.
1885 if (emission.wasEmittedAsGlobal()) return;
1886
1887 const VarDecl &D = *emission.Variable;
1888 auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF&: *this, TemporaryLocation: D.getLocation());
1889 QualType type = D.getType();
1890
1891 // If this local has an initializer, emit it now.
1892 const Expr *Init = D.getInit();
1893
1894 // If we are at an unreachable point, we don't need to emit the initializer
1895 // unless it contains a label.
1896 if (!HaveInsertPoint()) {
1897 if (!Init || !ContainsLabel(Init)) return;
1898 EnsureInsertPoint();
1899 }
1900
1901 // Initialize the structure of a __block variable.
1902 if (emission.IsEscapingByRef)
1903 emitByrefStructureInit(emission);
1904
1905 // Initialize the variable here if it doesn't have a initializer and it is a
1906 // C struct that is non-trivial to initialize or an array containing such a
1907 // struct.
1908 if (!Init &&
1909 type.isNonTrivialToPrimitiveDefaultInitialize() ==
1910 QualType::PDIK_Struct) {
1911 LValue Dst = MakeAddrLValue(Addr: emission.getAllocatedAddress(), T: type);
1912 if (emission.IsEscapingByRef)
1913 drillIntoBlockVariable(CGF&: *this, lvalue&: Dst, var: &D);
1914 defaultInitNonTrivialCStructVar(Dst);
1915 return;
1916 }
1917
1918 // Check whether this is a byref variable that's potentially
1919 // captured and moved by its own initializer. If so, we'll need to
1920 // emit the initializer first, then copy into the variable.
1921 bool capturedByInit =
1922 Init && emission.IsEscapingByRef && isCapturedBy(Var: D, E: Init);
1923
1924 bool locIsByrefHeader = !capturedByInit;
1925 const Address Loc =
1926 locIsByrefHeader ? emission.getObjectAddress(CGF&: *this) : emission.Addr;
1927
1928 // Note: constexpr already initializes everything correctly.
1929 LangOptions::TrivialAutoVarInitKind trivialAutoVarInit =
1930 (D.isConstexpr()
1931 ? LangOptions::TrivialAutoVarInitKind::Uninitialized
1932 : (D.getAttr<UninitializedAttr>()
1933 ? LangOptions::TrivialAutoVarInitKind::Uninitialized
1934 : getContext().getLangOpts().getTrivialAutoVarInit()));
1935
1936 auto initializeWhatIsTechnicallyUninitialized = [&](Address Loc) {
1937 if (trivialAutoVarInit ==
1938 LangOptions::TrivialAutoVarInitKind::Uninitialized)
1939 return;
1940
1941 // Only initialize a __block's storage: we always initialize the header.
1942 if (emission.IsEscapingByRef && !locIsByrefHeader)
1943 Loc = emitBlockByrefAddress(baseAddr: Loc, V: &D, /*follow=*/followForward: false);
1944
1945 return emitZeroOrPatternForAutoVarInit(type, D, Loc);
1946 };
1947
1948 if (isTrivialInitializer(Init))
1949 return initializeWhatIsTechnicallyUninitialized(Loc);
1950
1951 llvm::Constant *constant = nullptr;
1952 if (emission.IsConstantAggregate ||
1953 D.mightBeUsableInConstantExpressions(C: getContext())) {
1954 assert(!capturedByInit && "constant init contains a capturing block?");
1955 constant = ConstantEmitter(*this).tryEmitAbstractForInitializer(D);
1956 if (constant && !constant->isZeroValue() &&
1957 (trivialAutoVarInit !=
1958 LangOptions::TrivialAutoVarInitKind::Uninitialized)) {
1959 IsPattern isPattern =
1960 (trivialAutoVarInit == LangOptions::TrivialAutoVarInitKind::Pattern)
1961 ? IsPattern::Yes
1962 : IsPattern::No;
1963 // C guarantees that brace-init with fewer initializers than members in
1964 // the aggregate will initialize the rest of the aggregate as-if it were
1965 // static initialization. In turn static initialization guarantees that
1966 // padding is initialized to zero bits. We could instead pattern-init if D
1967 // has any ImplicitValueInitExpr, but that seems to be unintuitive
1968 // behavior.
1969 constant = constWithPadding(CGM, isPattern: IsPattern::No,
1970 constant: replaceUndef(CGM, isPattern, constant));
1971 }
1972 }
1973
1974 if (!constant) {
1975 initializeWhatIsTechnicallyUninitialized(Loc);
1976 LValue lv = MakeAddrLValue(Addr: Loc, T: type);
1977 lv.setNonGC(true);
1978 return EmitExprAsInit(Init, &D, lv, capturedByInit);
1979 }
1980
1981 if (!emission.IsConstantAggregate) {
1982 // For simple scalar/complex initialization, store the value directly.
1983 LValue lv = MakeAddrLValue(Addr: Loc, T: type);
1984 lv.setNonGC(true);
1985 return EmitStoreThroughLValue(Src: RValue::get(V: constant), Dst: lv, isInit: true);
1986 }
1987
1988 emitStoresForConstant(CGM, D, Loc: Loc.withElementType(ElemTy: CGM.Int8Ty),
1989 isVolatile: type.isVolatileQualified(), Builder, constant,
1990 /*IsAutoInit=*/false);
1991}
1992
1993/// Emit an expression as an initializer for an object (variable, field, etc.)
1994/// at the given location. The expression is not necessarily the normal
1995/// initializer for the object, and the address is not necessarily
1996/// its normal location.
1997///
1998/// \param init the initializing expression
1999/// \param D the object to act as if we're initializing
2000/// \param lvalue the lvalue to initialize
2001/// \param capturedByInit true if \p D is a __block variable
2002/// whose address is potentially changed by the initializer
2003void CodeGenFunction::EmitExprAsInit(const Expr *init, const ValueDecl *D,
2004 LValue lvalue, bool capturedByInit) {
2005 QualType type = D->getType();
2006
2007 if (type->isReferenceType()) {
2008 RValue rvalue = EmitReferenceBindingToExpr(E: init);
2009 if (capturedByInit)
2010 drillIntoBlockVariable(CGF&: *this, lvalue, var: cast<VarDecl>(Val: D));
2011 EmitStoreThroughLValue(Src: rvalue, Dst: lvalue, isInit: true);
2012 return;
2013 }
2014 switch (getEvaluationKind(T: type)) {
2015 case TEK_Scalar:
2016 EmitScalarInit(init, D, lvalue, capturedByInit);
2017 return;
2018 case TEK_Complex: {
2019 ComplexPairTy complex = EmitComplexExpr(E: init);
2020 if (capturedByInit)
2021 drillIntoBlockVariable(CGF&: *this, lvalue, var: cast<VarDecl>(Val: D));
2022 EmitStoreOfComplex(V: complex, dest: lvalue, /*init*/ isInit: true);
2023 return;
2024 }
2025 case TEK_Aggregate:
2026 if (type->isAtomicType()) {
2027 EmitAtomicInit(E: const_cast<Expr*>(init), lvalue);
2028 } else {
2029 AggValueSlot::Overlap_t Overlap = AggValueSlot::MayOverlap;
2030 if (isa<VarDecl>(Val: D))
2031 Overlap = AggValueSlot::DoesNotOverlap;
2032 else if (auto *FD = dyn_cast<FieldDecl>(Val: D))
2033 Overlap = getOverlapForFieldInit(FD);
2034 // TODO: how can we delay here if D is captured by its initializer?
2035 EmitAggExpr(E: init, AS: AggValueSlot::forLValue(
2036 LV: lvalue, CGF&: *this, isDestructed: AggValueSlot::IsDestructed,
2037 needsGC: AggValueSlot::DoesNotNeedGCBarriers,
2038 isAliased: AggValueSlot::IsNotAliased, mayOverlap: Overlap));
2039 }
2040 return;
2041 }
2042 llvm_unreachable("bad evaluation kind");
2043}
2044
2045/// Enter a destroy cleanup for the given local variable.
2046void CodeGenFunction::emitAutoVarTypeCleanup(
2047 const CodeGenFunction::AutoVarEmission &emission,
2048 QualType::DestructionKind dtorKind) {
2049 assert(dtorKind != QualType::DK_none);
2050
2051 // Note that for __block variables, we want to destroy the
2052 // original stack object, not the possibly forwarded object.
2053 Address addr = emission.getObjectAddress(CGF&: *this);
2054
2055 const VarDecl *var = emission.Variable;
2056 QualType type = var->getType();
2057
2058 CleanupKind cleanupKind = NormalAndEHCleanup;
2059 CodeGenFunction::Destroyer *destroyer = nullptr;
2060
2061 switch (dtorKind) {
2062 case QualType::DK_none:
2063 llvm_unreachable("no cleanup for trivially-destructible variable");
2064
2065 case QualType::DK_cxx_destructor:
2066 // If there's an NRVO flag on the emission, we need a different
2067 // cleanup.
2068 if (emission.NRVOFlag) {
2069 assert(!type->isArrayType());
2070 CXXDestructorDecl *dtor = type->getAsCXXRecordDecl()->getDestructor();
2071 EHStack.pushCleanup<DestroyNRVOVariableCXX>(Kind: cleanupKind, A: addr, A: type, A: dtor,
2072 A: emission.NRVOFlag);
2073 return;
2074 }
2075 break;
2076
2077 case QualType::DK_objc_strong_lifetime:
2078 // Suppress cleanups for pseudo-strong variables.
2079 if (var->isARCPseudoStrong()) return;
2080
2081 // Otherwise, consider whether to use an EH cleanup or not.
2082 cleanupKind = getARCCleanupKind();
2083
2084 // Use the imprecise destroyer by default.
2085 if (!var->hasAttr<ObjCPreciseLifetimeAttr>())
2086 destroyer = CodeGenFunction::destroyARCStrongImprecise;
2087 break;
2088
2089 case QualType::DK_objc_weak_lifetime:
2090 break;
2091
2092 case QualType::DK_nontrivial_c_struct:
2093 destroyer = CodeGenFunction::destroyNonTrivialCStruct;
2094 if (emission.NRVOFlag) {
2095 assert(!type->isArrayType());
2096 EHStack.pushCleanup<DestroyNRVOVariableC>(Kind: cleanupKind, A: addr,
2097 A: emission.NRVOFlag, A: type);
2098 return;
2099 }
2100 break;
2101 }
2102
2103 // If we haven't chosen a more specific destroyer, use the default.
2104 if (!destroyer) destroyer = getDestroyer(destructionKind: dtorKind);
2105
2106 // Use an EH cleanup in array destructors iff the destructor itself
2107 // is being pushed as an EH cleanup.
2108 bool useEHCleanup = (cleanupKind & EHCleanup);
2109 EHStack.pushCleanup<DestroyObject>(Kind: cleanupKind, A: addr, A: type, A: destroyer,
2110 A: useEHCleanup);
2111}
2112
2113void CodeGenFunction::EmitAutoVarCleanups(const AutoVarEmission &emission) {
2114 assert(emission.Variable && "emission was not valid!");
2115
2116 // If this was emitted as a global constant, we're done.
2117 if (emission.wasEmittedAsGlobal()) return;
2118
2119 // If we don't have an insertion point, we're done. Sema prevents
2120 // us from jumping into any of these scopes anyway.
2121 if (!HaveInsertPoint()) return;
2122
2123 const VarDecl &D = *emission.Variable;
2124
2125 // Check the type for a cleanup.
2126 if (QualType::DestructionKind dtorKind = D.needsDestruction(Ctx: getContext()))
2127 emitAutoVarTypeCleanup(emission, dtorKind);
2128
2129 // In GC mode, honor objc_precise_lifetime.
2130 if (getLangOpts().getGC() != LangOptions::NonGC &&
2131 D.hasAttr<ObjCPreciseLifetimeAttr>()) {
2132 EHStack.pushCleanup<ExtendGCLifetime>(Kind: NormalCleanup, A: &D);
2133 }
2134
2135 // Handle the cleanup attribute.
2136 if (const CleanupAttr *CA = D.getAttr<CleanupAttr>()) {
2137 const FunctionDecl *FD = CA->getFunctionDecl();
2138
2139 llvm::Constant *F = CGM.GetAddrOfFunction(GD: FD);
2140 assert(F && "Could not find function!");
2141
2142 const CGFunctionInfo &Info = CGM.getTypes().arrangeFunctionDeclaration(FD);
2143 EHStack.pushCleanup<CallCleanupFunction>(Kind: NormalAndEHCleanup, A: F, A: &Info, A: &D);
2144 }
2145
2146 // If this is a block variable, call _Block_object_destroy
2147 // (on the unforwarded address). Don't enter this cleanup if we're in pure-GC
2148 // mode.
2149 if (emission.IsEscapingByRef &&
2150 CGM.getLangOpts().getGC() != LangOptions::GCOnly) {
2151 BlockFieldFlags Flags = BLOCK_FIELD_IS_BYREF;
2152 if (emission.Variable->getType().isObjCGCWeak())
2153 Flags |= BLOCK_FIELD_IS_WEAK;
2154 enterByrefCleanup(Kind: NormalAndEHCleanup, Addr: emission.Addr, Flags,
2155 /*LoadBlockVarAddr*/ false,
2156 CanThrow: cxxDestructorCanThrow(T: emission.Variable->getType()));
2157 }
2158}
2159
2160CodeGenFunction::Destroyer *
2161CodeGenFunction::getDestroyer(QualType::DestructionKind kind) {
2162 switch (kind) {
2163 case QualType::DK_none: llvm_unreachable("no destroyer for trivial dtor");
2164 case QualType::DK_cxx_destructor:
2165 return destroyCXXObject;
2166 case QualType::DK_objc_strong_lifetime:
2167 return destroyARCStrongPrecise;
2168 case QualType::DK_objc_weak_lifetime:
2169 return destroyARCWeak;
2170 case QualType::DK_nontrivial_c_struct:
2171 return destroyNonTrivialCStruct;
2172 }
2173 llvm_unreachable("Unknown DestructionKind");
2174}
2175
2176/// pushEHDestroy - Push the standard destructor for the given type as
2177/// an EH-only cleanup.
2178void CodeGenFunction::pushEHDestroy(QualType::DestructionKind dtorKind,
2179 Address addr, QualType type) {
2180 assert(dtorKind && "cannot push destructor for trivial type");
2181 assert(needsEHCleanup(dtorKind));
2182
2183 pushDestroy(kind: EHCleanup, addr, type, destroyer: getDestroyer(kind: dtorKind), useEHCleanupForArray: true);
2184}
2185
2186/// pushDestroy - Push the standard destructor for the given type as
2187/// at least a normal cleanup.
2188void CodeGenFunction::pushDestroy(QualType::DestructionKind dtorKind,
2189 Address addr, QualType type) {
2190 assert(dtorKind && "cannot push destructor for trivial type");
2191
2192 CleanupKind cleanupKind = getCleanupKind(kind: dtorKind);
2193 pushDestroy(kind: cleanupKind, addr, type, destroyer: getDestroyer(kind: dtorKind),
2194 useEHCleanupForArray: cleanupKind & EHCleanup);
2195}
2196
2197void CodeGenFunction::pushDestroy(CleanupKind cleanupKind, Address addr,
2198 QualType type, Destroyer *destroyer,
2199 bool useEHCleanupForArray) {
2200 pushFullExprCleanup<DestroyObject>(kind: cleanupKind, A: addr, A: type,
2201 A: destroyer, A: useEHCleanupForArray);
2202}
2203
2204void CodeGenFunction::pushStackRestore(CleanupKind Kind, Address SPMem) {
2205 EHStack.pushCleanup<CallStackRestore>(Kind, A: SPMem);
2206}
2207
2208void CodeGenFunction::pushKmpcAllocFree(
2209 CleanupKind Kind, std::pair<llvm::Value *, llvm::Value *> AddrSizePair) {
2210 EHStack.pushCleanup<KmpcAllocFree>(Kind, A: AddrSizePair);
2211}
2212
2213void CodeGenFunction::pushLifetimeExtendedDestroy(CleanupKind cleanupKind,
2214 Address addr, QualType type,
2215 Destroyer *destroyer,
2216 bool useEHCleanupForArray) {
2217 // If we're not in a conditional branch, we don't need to bother generating a
2218 // conditional cleanup.
2219 if (!isInConditionalBranch()) {
2220 // Push an EH-only cleanup for the object now.
2221 // FIXME: When popping normal cleanups, we need to keep this EH cleanup
2222 // around in case a temporary's destructor throws an exception.
2223 if (cleanupKind & EHCleanup)
2224 EHStack.pushCleanup<DestroyObject>(
2225 Kind: static_cast<CleanupKind>(cleanupKind & ~NormalCleanup), A: addr, A: type,
2226 A: destroyer, A: useEHCleanupForArray);
2227
2228 return pushCleanupAfterFullExprWithActiveFlag<DestroyObject>(
2229 Kind: cleanupKind, ActiveFlag: Address::invalid(), A: addr, A: type, A: destroyer, A: useEHCleanupForArray);
2230 }
2231
2232 // Otherwise, we should only destroy the object if it's been initialized.
2233 // Re-use the active flag and saved address across both the EH and end of
2234 // scope cleanups.
2235
2236 using SavedType = typename DominatingValue<Address>::saved_type;
2237 using ConditionalCleanupType =
2238 EHScopeStack::ConditionalCleanup<DestroyObject, Address, QualType,
2239 Destroyer *, bool>;
2240
2241 Address ActiveFlag = createCleanupActiveFlag();
2242 SavedType SavedAddr = saveValueInCond(value: addr);
2243
2244 if (cleanupKind & EHCleanup) {
2245 EHStack.pushCleanup<ConditionalCleanupType>(
2246 Kind: static_cast<CleanupKind>(cleanupKind & ~NormalCleanup), A: SavedAddr, A: type,
2247 A: destroyer, A: useEHCleanupForArray);
2248 initFullExprCleanupWithFlag(ActiveFlag);
2249 }
2250
2251 pushCleanupAfterFullExprWithActiveFlag<ConditionalCleanupType>(
2252 Kind: cleanupKind, ActiveFlag, A: SavedAddr, A: type, A: destroyer,
2253 A: useEHCleanupForArray);
2254}
2255
2256/// emitDestroy - Immediately perform the destruction of the given
2257/// object.
2258///
2259/// \param addr - the address of the object; a type*
2260/// \param type - the type of the object; if an array type, all
2261/// objects are destroyed in reverse order
2262/// \param destroyer - the function to call to destroy individual
2263/// elements
2264/// \param useEHCleanupForArray - whether an EH cleanup should be
2265/// used when destroying array elements, in case one of the
2266/// destructions throws an exception
2267void CodeGenFunction::emitDestroy(Address addr, QualType type,
2268 Destroyer *destroyer,
2269 bool useEHCleanupForArray) {
2270 const ArrayType *arrayType = getContext().getAsArrayType(T: type);
2271 if (!arrayType)
2272 return destroyer(*this, addr, type);
2273
2274 llvm::Value *length = emitArrayLength(arrayType, baseType&: type, addr);
2275
2276 CharUnits elementAlign =
2277 addr.getAlignment()
2278 .alignmentOfArrayElement(elementSize: getContext().getTypeSizeInChars(T: type));
2279
2280 // Normally we have to check whether the array is zero-length.
2281 bool checkZeroLength = true;
2282
2283 // But if the array length is constant, we can suppress that.
2284 if (llvm::ConstantInt *constLength = dyn_cast<llvm::ConstantInt>(Val: length)) {
2285 // ...and if it's constant zero, we can just skip the entire thing.
2286 if (constLength->isZero()) return;
2287 checkZeroLength = false;
2288 }
2289
2290 llvm::Value *begin = addr.emitRawPointer(CGF&: *this);
2291 llvm::Value *end =
2292 Builder.CreateInBoundsGEP(Ty: addr.getElementType(), Ptr: begin, IdxList: length);
2293 emitArrayDestroy(begin, end, elementType: type, elementAlign, destroyer,
2294 checkZeroLength, useEHCleanup: useEHCleanupForArray);
2295}
2296
2297/// emitArrayDestroy - Destroys all the elements of the given array,
2298/// beginning from last to first. The array cannot be zero-length.
2299///
2300/// \param begin - a type* denoting the first element of the array
2301/// \param end - a type* denoting one past the end of the array
2302/// \param elementType - the element type of the array
2303/// \param destroyer - the function to call to destroy elements
2304/// \param useEHCleanup - whether to push an EH cleanup to destroy
2305/// the remaining elements in case the destruction of a single
2306/// element throws
2307void CodeGenFunction::emitArrayDestroy(llvm::Value *begin,
2308 llvm::Value *end,
2309 QualType elementType,
2310 CharUnits elementAlign,
2311 Destroyer *destroyer,
2312 bool checkZeroLength,
2313 bool useEHCleanup) {
2314 assert(!elementType->isArrayType());
2315
2316 // The basic structure here is a do-while loop, because we don't
2317 // need to check for the zero-element case.
2318 llvm::BasicBlock *bodyBB = createBasicBlock(name: "arraydestroy.body");
2319 llvm::BasicBlock *doneBB = createBasicBlock(name: "arraydestroy.done");
2320
2321 if (checkZeroLength) {
2322 llvm::Value *isEmpty = Builder.CreateICmpEQ(LHS: begin, RHS: end,
2323 Name: "arraydestroy.isempty");
2324 Builder.CreateCondBr(Cond: isEmpty, True: doneBB, False: bodyBB);
2325 }
2326
2327 // Enter the loop body, making that address the current address.
2328 llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
2329 EmitBlock(BB: bodyBB);
2330 llvm::PHINode *elementPast =
2331 Builder.CreatePHI(Ty: begin->getType(), NumReservedValues: 2, Name: "arraydestroy.elementPast");
2332 elementPast->addIncoming(V: end, BB: entryBB);
2333
2334 // Shift the address back by one element.
2335 llvm::Value *negativeOne = llvm::ConstantInt::get(Ty: SizeTy, V: -1, IsSigned: true);
2336 llvm::Type *llvmElementType = ConvertTypeForMem(T: elementType);
2337 llvm::Value *element = Builder.CreateInBoundsGEP(
2338 Ty: llvmElementType, Ptr: elementPast, IdxList: negativeOne, Name: "arraydestroy.element");
2339
2340 if (useEHCleanup)
2341 pushRegularPartialArrayCleanup(arrayBegin: begin, arrayEnd: element, elementType, elementAlignment: elementAlign,
2342 destroyer);
2343
2344 // Perform the actual destruction there.
2345 destroyer(*this, Address(element, llvmElementType, elementAlign),
2346 elementType);
2347
2348 if (useEHCleanup)
2349 PopCleanupBlock();
2350
2351 // Check whether we've reached the end.
2352 llvm::Value *done = Builder.CreateICmpEQ(LHS: element, RHS: begin, Name: "arraydestroy.done");
2353 Builder.CreateCondBr(Cond: done, True: doneBB, False: bodyBB);
2354 elementPast->addIncoming(V: element, BB: Builder.GetInsertBlock());
2355
2356 // Done.
2357 EmitBlock(BB: doneBB);
2358}
2359
2360/// Perform partial array destruction as if in an EH cleanup. Unlike
2361/// emitArrayDestroy, the element type here may still be an array type.
2362static void emitPartialArrayDestroy(CodeGenFunction &CGF,
2363 llvm::Value *begin, llvm::Value *end,
2364 QualType type, CharUnits elementAlign,
2365 CodeGenFunction::Destroyer *destroyer) {
2366 llvm::Type *elemTy = CGF.ConvertTypeForMem(T: type);
2367
2368 // If the element type is itself an array, drill down.
2369 unsigned arrayDepth = 0;
2370 while (const ArrayType *arrayType = CGF.getContext().getAsArrayType(T: type)) {
2371 // VLAs don't require a GEP index to walk into.
2372 if (!isa<VariableArrayType>(Val: arrayType))
2373 arrayDepth++;
2374 type = arrayType->getElementType();
2375 }
2376
2377 if (arrayDepth) {
2378 llvm::Value *zero = llvm::ConstantInt::get(Ty: CGF.SizeTy, V: 0);
2379
2380 SmallVector<llvm::Value*,4> gepIndices(arrayDepth+1, zero);
2381 begin = CGF.Builder.CreateInBoundsGEP(
2382 Ty: elemTy, Ptr: begin, IdxList: gepIndices, Name: "pad.arraybegin");
2383 end = CGF.Builder.CreateInBoundsGEP(
2384 Ty: elemTy, Ptr: end, IdxList: gepIndices, Name: "pad.arrayend");
2385 }
2386
2387 // Destroy the array. We don't ever need an EH cleanup because we
2388 // assume that we're in an EH cleanup ourselves, so a throwing
2389 // destructor causes an immediate terminate.
2390 CGF.emitArrayDestroy(begin, end, elementType: type, elementAlign, destroyer,
2391 /*checkZeroLength*/ true, /*useEHCleanup*/ false);
2392}
2393
2394namespace {
2395 /// RegularPartialArrayDestroy - a cleanup which performs a partial
2396 /// array destroy where the end pointer is regularly determined and
2397 /// does not need to be loaded from a local.
2398 class RegularPartialArrayDestroy final : public EHScopeStack::Cleanup {
2399 llvm::Value *ArrayBegin;
2400 llvm::Value *ArrayEnd;
2401 QualType ElementType;
2402 CodeGenFunction::Destroyer *Destroyer;
2403 CharUnits ElementAlign;
2404 public:
2405 RegularPartialArrayDestroy(llvm::Value *arrayBegin, llvm::Value *arrayEnd,
2406 QualType elementType, CharUnits elementAlign,
2407 CodeGenFunction::Destroyer *destroyer)
2408 : ArrayBegin(arrayBegin), ArrayEnd(arrayEnd),
2409 ElementType(elementType), Destroyer(destroyer),
2410 ElementAlign(elementAlign) {}
2411
2412 void Emit(CodeGenFunction &CGF, Flags flags) override {
2413 emitPartialArrayDestroy(CGF, ArrayBegin, ArrayEnd,
2414 ElementType, ElementAlign, Destroyer);
2415 }
2416 };
2417
2418 /// IrregularPartialArrayDestroy - a cleanup which performs a
2419 /// partial array destroy where the end pointer is irregularly
2420 /// determined and must be loaded from a local.
2421 class IrregularPartialArrayDestroy final : public EHScopeStack::Cleanup {
2422 llvm::Value *ArrayBegin;
2423 Address ArrayEndPointer;
2424 QualType ElementType;
2425 CodeGenFunction::Destroyer *Destroyer;
2426 CharUnits ElementAlign;
2427 public:
2428 IrregularPartialArrayDestroy(llvm::Value *arrayBegin,
2429 Address arrayEndPointer,
2430 QualType elementType,
2431 CharUnits elementAlign,
2432 CodeGenFunction::Destroyer *destroyer)
2433 : ArrayBegin(arrayBegin), ArrayEndPointer(arrayEndPointer),
2434 ElementType(elementType), Destroyer(destroyer),
2435 ElementAlign(elementAlign) {}
2436
2437 void Emit(CodeGenFunction &CGF, Flags flags) override {
2438 llvm::Value *arrayEnd = CGF.Builder.CreateLoad(Addr: ArrayEndPointer);
2439 emitPartialArrayDestroy(CGF, ArrayBegin, arrayEnd,
2440 ElementType, ElementAlign, Destroyer);
2441 }
2442 };
2443} // end anonymous namespace
2444
2445/// pushIrregularPartialArrayCleanup - Push an EH cleanup to destroy
2446/// already-constructed elements of the given array. The cleanup
2447/// may be popped with DeactivateCleanupBlock or PopCleanupBlock.
2448///
2449/// \param elementType - the immediate element type of the array;
2450/// possibly still an array type
2451void CodeGenFunction::pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
2452 Address arrayEndPointer,
2453 QualType elementType,
2454 CharUnits elementAlign,
2455 Destroyer *destroyer) {
2456 pushFullExprCleanup<IrregularPartialArrayDestroy>(kind: EHCleanup,
2457 A: arrayBegin, A: arrayEndPointer,
2458 A: elementType, A: elementAlign,
2459 A: destroyer);
2460}
2461
2462/// pushRegularPartialArrayCleanup - Push an EH cleanup to destroy
2463/// already-constructed elements of the given array. The cleanup
2464/// may be popped with DeactivateCleanupBlock or PopCleanupBlock.
2465///
2466/// \param elementType - the immediate element type of the array;
2467/// possibly still an array type
2468void CodeGenFunction::pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
2469 llvm::Value *arrayEnd,
2470 QualType elementType,
2471 CharUnits elementAlign,
2472 Destroyer *destroyer) {
2473 pushFullExprCleanup<RegularPartialArrayDestroy>(kind: EHCleanup,
2474 A: arrayBegin, A: arrayEnd,
2475 A: elementType, A: elementAlign,
2476 A: destroyer);
2477}
2478
2479/// Lazily declare the @llvm.lifetime.start intrinsic.
2480llvm::Function *CodeGenModule::getLLVMLifetimeStartFn() {
2481 if (LifetimeStartFn)
2482 return LifetimeStartFn;
2483 LifetimeStartFn = llvm::Intrinsic::getDeclaration(&getModule(),
2484 llvm::Intrinsic::lifetime_start, AllocaInt8PtrTy);
2485 return LifetimeStartFn;
2486}
2487
2488/// Lazily declare the @llvm.lifetime.end intrinsic.
2489llvm::Function *CodeGenModule::getLLVMLifetimeEndFn() {
2490 if (LifetimeEndFn)
2491 return LifetimeEndFn;
2492 LifetimeEndFn = llvm::Intrinsic::getDeclaration(&getModule(),
2493 llvm::Intrinsic::lifetime_end, AllocaInt8PtrTy);
2494 return LifetimeEndFn;
2495}
2496
2497namespace {
2498 /// A cleanup to perform a release of an object at the end of a
2499 /// function. This is used to balance out the incoming +1 of a
2500 /// ns_consumed argument when we can't reasonably do that just by
2501 /// not doing the initial retain for a __block argument.
2502 struct ConsumeARCParameter final : EHScopeStack::Cleanup {
2503 ConsumeARCParameter(llvm::Value *param,
2504 ARCPreciseLifetime_t precise)
2505 : Param(param), Precise(precise) {}
2506
2507 llvm::Value *Param;
2508 ARCPreciseLifetime_t Precise;
2509
2510 void Emit(CodeGenFunction &CGF, Flags flags) override {
2511 CGF.EmitARCRelease(value: Param, precise: Precise);
2512 }
2513 };
2514} // end anonymous namespace
2515
2516/// Emit an alloca (or GlobalValue depending on target)
2517/// for the specified parameter and set up LocalDeclMap.
2518void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
2519 unsigned ArgNo) {
2520 bool NoDebugInfo = false;
2521 // FIXME: Why isn't ImplicitParamDecl a ParmVarDecl?
2522 assert((isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)) &&
2523 "Invalid argument to EmitParmDecl");
2524
2525 // Set the name of the parameter's initial value to make IR easier to
2526 // read. Don't modify the names of globals.
2527 if (!isa<llvm::GlobalValue>(Val: Arg.getAnyValue()))
2528 Arg.getAnyValue()->setName(D.getName());
2529
2530 QualType Ty = D.getType();
2531
2532 // Use better IR generation for certain implicit parameters.
2533 if (auto IPD = dyn_cast<ImplicitParamDecl>(Val: &D)) {
2534 // The only implicit argument a block has is its literal.
2535 // This may be passed as an inalloca'ed value on Windows x86.
2536 if (BlockInfo) {
2537 llvm::Value *V = Arg.isIndirect()
2538 ? Builder.CreateLoad(Addr: Arg.getIndirectAddress())
2539 : Arg.getDirectValue();
2540 setBlockContextParameter(D: IPD, argNum: ArgNo, ptr: V);
2541 return;
2542 }
2543 // Suppressing debug info for ThreadPrivateVar parameters, else it hides
2544 // debug info of TLS variables.
2545 NoDebugInfo =
2546 (IPD->getParameterKind() == ImplicitParamKind::ThreadPrivateVar);
2547 }
2548
2549 Address DeclPtr = Address::invalid();
2550 RawAddress AllocaPtr = Address::invalid();
2551 bool DoStore = false;
2552 bool IsScalar = hasScalarEvaluationKind(T: Ty);
2553 bool UseIndirectDebugAddress = false;
2554
2555 // If we already have a pointer to the argument, reuse the input pointer.
2556 if (Arg.isIndirect()) {
2557 DeclPtr = Arg.getIndirectAddress();
2558 DeclPtr = DeclPtr.withElementType(ElemTy: ConvertTypeForMem(T: Ty));
2559 // Indirect argument is in alloca address space, which may be different
2560 // from the default address space.
2561 auto AllocaAS = CGM.getASTAllocaAddressSpace();
2562 auto *V = DeclPtr.emitRawPointer(CGF&: *this);
2563 AllocaPtr = RawAddress(V, DeclPtr.getElementType(), DeclPtr.getAlignment());
2564
2565 // For truly ABI indirect arguments -- those that are not `byval` -- store
2566 // the address of the argument on the stack to preserve debug information.
2567 ABIArgInfo ArgInfo = CurFnInfo->arguments()[ArgNo - 1].info;
2568 if (ArgInfo.isIndirect())
2569 UseIndirectDebugAddress = !ArgInfo.getIndirectByVal();
2570 if (UseIndirectDebugAddress) {
2571 auto PtrTy = getContext().getPointerType(T: Ty);
2572 AllocaPtr = CreateMemTemp(PtrTy, getContext().getTypeAlignInChars(PtrTy),
2573 D.getName() + ".indirect_addr");
2574 EmitStoreOfScalar(V, AllocaPtr, /* Volatile */ false, PtrTy);
2575 }
2576
2577 auto SrcLangAS = getLangOpts().OpenCL ? LangAS::opencl_private : AllocaAS;
2578 auto DestLangAS =
2579 getLangOpts().OpenCL ? LangAS::opencl_private : LangAS::Default;
2580 if (SrcLangAS != DestLangAS) {
2581 assert(getContext().getTargetAddressSpace(SrcLangAS) ==
2582 CGM.getDataLayout().getAllocaAddrSpace());
2583 auto DestAS = getContext().getTargetAddressSpace(AS: DestLangAS);
2584 auto *T = llvm::PointerType::get(C&: getLLVMContext(), AddressSpace: DestAS);
2585 DeclPtr =
2586 DeclPtr.withPointer(NewPointer: getTargetHooks().performAddrSpaceCast(
2587 *this, V, SrcLangAS, DestLangAS, T, true),
2588 IsKnownNonNull: DeclPtr.isKnownNonNull());
2589 }
2590
2591 // Push a destructor cleanup for this parameter if the ABI requires it.
2592 // Don't push a cleanup in a thunk for a method that will also emit a
2593 // cleanup.
2594 if (Ty->isRecordType() && !CurFuncIsThunk &&
2595 Ty->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
2596 if (QualType::DestructionKind DtorKind =
2597 D.needsDestruction(Ctx: getContext())) {
2598 assert((DtorKind == QualType::DK_cxx_destructor ||
2599 DtorKind == QualType::DK_nontrivial_c_struct) &&
2600 "unexpected destructor type");
2601 pushDestroy(dtorKind: DtorKind, addr: DeclPtr, type: Ty);
2602 CalleeDestructedParamCleanups[cast<ParmVarDecl>(Val: &D)] =
2603 EHStack.stable_begin();
2604 }
2605 }
2606 } else {
2607 // Check if the parameter address is controlled by OpenMP runtime.
2608 Address OpenMPLocalAddr =
2609 getLangOpts().OpenMP
2610 ? CGM.getOpenMPRuntime().getAddressOfLocalVariable(CGF&: *this, VD: &D)
2611 : Address::invalid();
2612 if (getLangOpts().OpenMP && OpenMPLocalAddr.isValid()) {
2613 DeclPtr = OpenMPLocalAddr;
2614 AllocaPtr = DeclPtr;
2615 } else {
2616 // Otherwise, create a temporary to hold the value.
2617 DeclPtr = CreateMemTemp(Ty, getContext().getDeclAlign(&D),
2618 D.getName() + ".addr", &AllocaPtr);
2619 }
2620 DoStore = true;
2621 }
2622
2623 llvm::Value *ArgVal = (DoStore ? Arg.getDirectValue() : nullptr);
2624
2625 LValue lv = MakeAddrLValue(Addr: DeclPtr, T: Ty);
2626 if (IsScalar) {
2627 Qualifiers qs = Ty.getQualifiers();
2628 if (Qualifiers::ObjCLifetime lt = qs.getObjCLifetime()) {
2629 // We honor __attribute__((ns_consumed)) for types with lifetime.
2630 // For __strong, it's handled by just skipping the initial retain;
2631 // otherwise we have to balance out the initial +1 with an extra
2632 // cleanup to do the release at the end of the function.
2633 bool isConsumed = D.hasAttr<NSConsumedAttr>();
2634
2635 // If a parameter is pseudo-strong then we can omit the implicit retain.
2636 if (D.isARCPseudoStrong()) {
2637 assert(lt == Qualifiers::OCL_Strong &&
2638 "pseudo-strong variable isn't strong?");
2639 assert(qs.hasConst() && "pseudo-strong variable should be const!");
2640 lt = Qualifiers::OCL_ExplicitNone;
2641 }
2642
2643 // Load objects passed indirectly.
2644 if (Arg.isIndirect() && !ArgVal)
2645 ArgVal = Builder.CreateLoad(Addr: DeclPtr);
2646
2647 if (lt == Qualifiers::OCL_Strong) {
2648 if (!isConsumed) {
2649 if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
2650 // use objc_storeStrong(&dest, value) for retaining the
2651 // object. But first, store a null into 'dest' because
2652 // objc_storeStrong attempts to release its old value.
2653 llvm::Value *Null = CGM.EmitNullConstant(T: D.getType());
2654 EmitStoreOfScalar(value: Null, lvalue: lv, /* isInitialization */ isInit: true);
2655 EmitARCStoreStrongCall(addr: lv.getAddress(CGF&: *this), value: ArgVal, resultIgnored: true);
2656 DoStore = false;
2657 }
2658 else
2659 // Don't use objc_retainBlock for block pointers, because we
2660 // don't want to Block_copy something just because we got it
2661 // as a parameter.
2662 ArgVal = EmitARCRetainNonBlock(value: ArgVal);
2663 }
2664 } else {
2665 // Push the cleanup for a consumed parameter.
2666 if (isConsumed) {
2667 ARCPreciseLifetime_t precise = (D.hasAttr<ObjCPreciseLifetimeAttr>()
2668 ? ARCPreciseLifetime : ARCImpreciseLifetime);
2669 EHStack.pushCleanup<ConsumeARCParameter>(Kind: getARCCleanupKind(), A: ArgVal,
2670 A: precise);
2671 }
2672
2673 if (lt == Qualifiers::OCL_Weak) {
2674 EmitARCInitWeak(addr: DeclPtr, value: ArgVal);
2675 DoStore = false; // The weak init is a store, no need to do two.
2676 }
2677 }
2678
2679 // Enter the cleanup scope.
2680 EmitAutoVarWithLifetime(CGF&: *this, var: D, addr: DeclPtr, lifetime: lt);
2681 }
2682 }
2683
2684 // Store the initial value into the alloca.
2685 if (DoStore)
2686 EmitStoreOfScalar(value: ArgVal, lvalue: lv, /* isInitialization */ isInit: true);
2687
2688 setAddrOfLocalVar(VD: &D, Addr: DeclPtr);
2689
2690 // Emit debug info for param declarations in non-thunk functions.
2691 if (CGDebugInfo *DI = getDebugInfo()) {
2692 if (CGM.getCodeGenOpts().hasReducedDebugInfo() && !CurFuncIsThunk &&
2693 !NoDebugInfo) {
2694 llvm::DILocalVariable *DILocalVar = DI->EmitDeclareOfArgVariable(
2695 Decl: &D, AI: AllocaPtr.getPointer(), ArgNo, Builder, UsePointerValue: UseIndirectDebugAddress);
2696 if (const auto *Var = dyn_cast_or_null<ParmVarDecl>(Val: &D))
2697 DI->getParamDbgMappings().insert(KV: {Var, DILocalVar});
2698 }
2699 }
2700
2701 if (D.hasAttr<AnnotateAttr>())
2702 EmitVarAnnotations(D: &D, V: DeclPtr.emitRawPointer(CGF&: *this));
2703
2704 // We can only check return value nullability if all arguments to the
2705 // function satisfy their nullability preconditions. This makes it necessary
2706 // to emit null checks for args in the function body itself.
2707 if (requiresReturnValueNullabilityCheck()) {
2708 auto Nullability = Ty->getNullability();
2709 if (Nullability && *Nullability == NullabilityKind::NonNull) {
2710 SanitizerScope SanScope(this);
2711 RetValNullabilityPrecondition =
2712 Builder.CreateAnd(LHS: RetValNullabilityPrecondition,
2713 RHS: Builder.CreateIsNotNull(Arg: Arg.getAnyValue()));
2714 }
2715 }
2716}
2717
2718void CodeGenModule::EmitOMPDeclareReduction(const OMPDeclareReductionDecl *D,
2719 CodeGenFunction *CGF) {
2720 if (!LangOpts.OpenMP || (!LangOpts.EmitAllDecls && !D->isUsed()))
2721 return;
2722 getOpenMPRuntime().emitUserDefinedReduction(CGF, D);
2723}
2724
2725void CodeGenModule::EmitOMPDeclareMapper(const OMPDeclareMapperDecl *D,
2726 CodeGenFunction *CGF) {
2727 if (!LangOpts.OpenMP || LangOpts.OpenMPSimd ||
2728 (!LangOpts.EmitAllDecls && !D->isUsed()))
2729 return;
2730 getOpenMPRuntime().emitUserDefinedMapper(D, CGF);
2731}
2732
2733void CodeGenModule::EmitOMPRequiresDecl(const OMPRequiresDecl *D) {
2734 getOpenMPRuntime().processRequiresDirective(D);
2735}
2736
2737void CodeGenModule::EmitOMPAllocateDecl(const OMPAllocateDecl *D) {
2738 for (const Expr *E : D->varlists()) {
2739 const auto *DE = cast<DeclRefExpr>(Val: E);
2740 const auto *VD = cast<VarDecl>(Val: DE->getDecl());
2741
2742 // Skip all but globals.
2743 if (!VD->hasGlobalStorage())
2744 continue;
2745
2746 // Check if the global has been materialized yet or not. If not, we are done
2747 // as any later generation will utilize the OMPAllocateDeclAttr. However, if
2748 // we already emitted the global we might have done so before the
2749 // OMPAllocateDeclAttr was attached, leading to the wrong address space
2750 // (potentially). While not pretty, common practise is to remove the old IR
2751 // global and generate a new one, so we do that here too. Uses are replaced
2752 // properly.
2753 StringRef MangledName = getMangledName(GD: VD);
2754 llvm::GlobalValue *Entry = GetGlobalValue(Ref: MangledName);
2755 if (!Entry)
2756 continue;
2757
2758 // We can also keep the existing global if the address space is what we
2759 // expect it to be, if not, it is replaced.
2760 QualType ASTTy = VD->getType();
2761 clang::LangAS GVAS = GetGlobalVarAddressSpace(D: VD);
2762 auto TargetAS = getContext().getTargetAddressSpace(AS: GVAS);
2763 if (Entry->getType()->getAddressSpace() == TargetAS)
2764 continue;
2765
2766 // Make a new global with the correct type / address space.
2767 llvm::Type *Ty = getTypes().ConvertTypeForMem(T: ASTTy);
2768 llvm::PointerType *PTy = llvm::PointerType::get(ElementType: Ty, AddressSpace: TargetAS);
2769
2770 // Replace all uses of the old global with a cast. Since we mutate the type
2771 // in place we neeed an intermediate that takes the spot of the old entry
2772 // until we can create the cast.
2773 llvm::GlobalVariable *DummyGV = new llvm::GlobalVariable(
2774 getModule(), Entry->getValueType(), false,
2775 llvm::GlobalValue::CommonLinkage, nullptr, "dummy", nullptr,
2776 llvm::GlobalVariable::NotThreadLocal, Entry->getAddressSpace());
2777 Entry->replaceAllUsesWith(V: DummyGV);
2778
2779 Entry->mutateType(Ty: PTy);
2780 llvm::Constant *NewPtrForOldDecl =
2781 llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
2782 C: Entry, Ty: DummyGV->getType());
2783
2784 // Now we have a casted version of the changed global, the dummy can be
2785 // replaced and deleted.
2786 DummyGV->replaceAllUsesWith(V: NewPtrForOldDecl);
2787 DummyGV->eraseFromParent();
2788 }
2789}
2790
2791std::optional<CharUnits>
2792CodeGenModule::getOMPAllocateAlignment(const VarDecl *VD) {
2793 if (const auto *AA = VD->getAttr<OMPAllocateDeclAttr>()) {
2794 if (Expr *Alignment = AA->getAlignment()) {
2795 unsigned UserAlign =
2796 Alignment->EvaluateKnownConstInt(Ctx: getContext()).getExtValue();
2797 CharUnits NaturalAlign =
2798 getNaturalTypeAlignment(T: VD->getType().getNonReferenceType());
2799
2800 // OpenMP5.1 pg 185 lines 7-10
2801 // Each item in the align modifier list must be aligned to the maximum
2802 // of the specified alignment and the type's natural alignment.
2803 return CharUnits::fromQuantity(
2804 Quantity: std::max<unsigned>(a: UserAlign, b: NaturalAlign.getQuantity()));
2805 }
2806 }
2807 return std::nullopt;
2808}
2809

source code of clang/lib/CodeGen/CGDecl.cpp