1 | //===--- CGDeclCXX.cpp - Emit LLVM Code for C++ declarations --------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This contains code dealing with code generation of C++ declarations |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "CGCXXABI.h" |
14 | #include "CGObjCRuntime.h" |
15 | #include "CGOpenMPRuntime.h" |
16 | #include "CodeGenFunction.h" |
17 | #include "TargetInfo.h" |
18 | #include "clang/AST/Attr.h" |
19 | #include "clang/Basic/LangOptions.h" |
20 | #include "llvm/ADT/StringExtras.h" |
21 | #include "llvm/IR/Intrinsics.h" |
22 | #include "llvm/IR/MDBuilder.h" |
23 | #include "llvm/Support/Path.h" |
24 | |
25 | using namespace clang; |
26 | using namespace CodeGen; |
27 | |
28 | static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D, |
29 | ConstantAddress DeclPtr) { |
30 | assert( |
31 | (D.hasGlobalStorage() || |
32 | (D.hasLocalStorage() && CGF.getContext().getLangOpts().OpenCLCPlusPlus)) && |
33 | "VarDecl must have global or local (in the case of OpenCL) storage!" ); |
34 | assert(!D.getType()->isReferenceType() && |
35 | "Should not call EmitDeclInit on a reference!" ); |
36 | |
37 | QualType type = D.getType(); |
38 | LValue lv = CGF.MakeAddrLValue(DeclPtr, type); |
39 | |
40 | const Expr *Init = D.getInit(); |
41 | switch (CGF.getEvaluationKind(type)) { |
42 | case TEK_Scalar: { |
43 | CodeGenModule &CGM = CGF.CGM; |
44 | if (lv.isObjCStrong()) |
45 | CGM.getObjCRuntime().EmitObjCGlobalAssign(CGF, CGF.EmitScalarExpr(Init), |
46 | DeclPtr, D.getTLSKind()); |
47 | else if (lv.isObjCWeak()) |
48 | CGM.getObjCRuntime().EmitObjCWeakAssign(CGF, CGF.EmitScalarExpr(Init), |
49 | DeclPtr); |
50 | else |
51 | CGF.EmitScalarInit(Init, &D, lv, false); |
52 | return; |
53 | } |
54 | case TEK_Complex: |
55 | CGF.EmitComplexExprIntoLValue(Init, lv, /*isInit*/ true); |
56 | return; |
57 | case TEK_Aggregate: |
58 | CGF.EmitAggExpr(Init, |
59 | AggValueSlot::forLValue(lv, CGF, AggValueSlot::IsDestructed, |
60 | AggValueSlot::DoesNotNeedGCBarriers, |
61 | AggValueSlot::IsNotAliased, |
62 | AggValueSlot::DoesNotOverlap)); |
63 | return; |
64 | } |
65 | llvm_unreachable("bad evaluation kind" ); |
66 | } |
67 | |
68 | /// Emit code to cause the destruction of the given variable with |
69 | /// static storage duration. |
70 | static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D, |
71 | ConstantAddress Addr) { |
72 | // Honor __attribute__((no_destroy)) and bail instead of attempting |
73 | // to emit a reference to a possibly nonexistent destructor, which |
74 | // in turn can cause a crash. This will result in a global constructor |
75 | // that isn't balanced out by a destructor call as intended by the |
76 | // attribute. This also checks for -fno-c++-static-destructors and |
77 | // bails even if the attribute is not present. |
78 | QualType::DestructionKind DtorKind = D.needsDestruction(CGF.getContext()); |
79 | |
80 | // FIXME: __attribute__((cleanup)) ? |
81 | |
82 | switch (DtorKind) { |
83 | case QualType::DK_none: |
84 | return; |
85 | |
86 | case QualType::DK_cxx_destructor: |
87 | break; |
88 | |
89 | case QualType::DK_objc_strong_lifetime: |
90 | case QualType::DK_objc_weak_lifetime: |
91 | case QualType::DK_nontrivial_c_struct: |
92 | // We don't care about releasing objects during process teardown. |
93 | assert(!D.getTLSKind() && "should have rejected this" ); |
94 | return; |
95 | } |
96 | |
97 | llvm::FunctionCallee Func; |
98 | llvm::Constant *Argument; |
99 | |
100 | CodeGenModule &CGM = CGF.CGM; |
101 | QualType Type = D.getType(); |
102 | |
103 | // Special-case non-array C++ destructors, if they have the right signature. |
104 | // Under some ABIs, destructors return this instead of void, and cannot be |
105 | // passed directly to __cxa_atexit if the target does not allow this |
106 | // mismatch. |
107 | const CXXRecordDecl *Record = Type->getAsCXXRecordDecl(); |
108 | bool CanRegisterDestructor = |
109 | Record && (!CGM.getCXXABI().HasThisReturn( |
110 | GlobalDecl(Record->getDestructor(), Dtor_Complete)) || |
111 | CGM.getCXXABI().canCallMismatchedFunctionType()); |
112 | // If __cxa_atexit is disabled via a flag, a different helper function is |
113 | // generated elsewhere which uses atexit instead, and it takes the destructor |
114 | // directly. |
115 | bool UsingExternalHelper = !CGM.getCodeGenOpts().CXAAtExit; |
116 | if (Record && (CanRegisterDestructor || UsingExternalHelper)) { |
117 | assert(!Record->hasTrivialDestructor()); |
118 | CXXDestructorDecl *Dtor = Record->getDestructor(); |
119 | |
120 | Func = CGM.getAddrAndTypeOfCXXStructor(GlobalDecl(Dtor, Dtor_Complete)); |
121 | if (CGF.getContext().getLangOpts().OpenCL) { |
122 | auto DestAS = |
123 | CGM.getTargetCodeGenInfo().getAddrSpaceOfCxaAtexitPtrParam(); |
124 | auto DestTy = CGF.getTypes().ConvertType(Type)->getPointerTo( |
125 | CGM.getContext().getTargetAddressSpace(DestAS)); |
126 | auto SrcAS = D.getType().getQualifiers().getAddressSpace(); |
127 | if (DestAS == SrcAS) |
128 | Argument = llvm::ConstantExpr::getBitCast(Addr.getPointer(), DestTy); |
129 | else |
130 | // FIXME: On addr space mismatch we are passing NULL. The generation |
131 | // of the global destructor function should be adjusted accordingly. |
132 | Argument = llvm::ConstantPointerNull::get(DestTy); |
133 | } else { |
134 | Argument = llvm::ConstantExpr::getBitCast( |
135 | Addr.getPointer(), CGF.getTypes().ConvertType(Type)->getPointerTo()); |
136 | } |
137 | // Otherwise, the standard logic requires a helper function. |
138 | } else { |
139 | Func = CodeGenFunction(CGM) |
140 | .generateDestroyHelper(Addr, Type, CGF.getDestroyer(DtorKind), |
141 | CGF.needsEHCleanup(DtorKind), &D); |
142 | Argument = llvm::Constant::getNullValue(CGF.Int8PtrTy); |
143 | } |
144 | |
145 | CGM.getCXXABI().registerGlobalDtor(CGF, D, Func, Argument); |
146 | } |
147 | |
148 | /// Emit code to cause the variable at the given address to be considered as |
149 | /// constant from this point onwards. |
150 | static void EmitDeclInvariant(CodeGenFunction &CGF, const VarDecl &D, |
151 | llvm::Constant *Addr) { |
152 | return CGF.EmitInvariantStart( |
153 | Addr, CGF.getContext().getTypeSizeInChars(D.getType())); |
154 | } |
155 | |
156 | void CodeGenFunction::EmitInvariantStart(llvm::Constant *Addr, CharUnits Size) { |
157 | // Do not emit the intrinsic if we're not optimizing. |
158 | if (!CGM.getCodeGenOpts().OptimizationLevel) |
159 | return; |
160 | |
161 | // Grab the llvm.invariant.start intrinsic. |
162 | llvm::Intrinsic::ID InvStartID = llvm::Intrinsic::invariant_start; |
163 | // Overloaded address space type. |
164 | llvm::Type *ObjectPtr[1] = {Int8PtrTy}; |
165 | llvm::Function *InvariantStart = CGM.getIntrinsic(InvStartID, ObjectPtr); |
166 | |
167 | // Emit a call with the size in bytes of the object. |
168 | uint64_t Width = Size.getQuantity(); |
169 | llvm::Value *Args[2] = { llvm::ConstantInt::getSigned(Int64Ty, Width), |
170 | llvm::ConstantExpr::getBitCast(Addr, Int8PtrTy)}; |
171 | Builder.CreateCall(InvariantStart, Args); |
172 | } |
173 | |
174 | void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D, |
175 | llvm::Constant *DeclPtr, |
176 | bool PerformInit) { |
177 | |
178 | const Expr *Init = D.getInit(); |
179 | QualType T = D.getType(); |
180 | |
181 | // The address space of a static local variable (DeclPtr) may be different |
182 | // from the address space of the "this" argument of the constructor. In that |
183 | // case, we need an addrspacecast before calling the constructor. |
184 | // |
185 | // struct StructWithCtor { |
186 | // __device__ StructWithCtor() {...} |
187 | // }; |
188 | // __device__ void foo() { |
189 | // __shared__ StructWithCtor s; |
190 | // ... |
191 | // } |
192 | // |
193 | // For example, in the above CUDA code, the static local variable s has a |
194 | // "shared" address space qualifier, but the constructor of StructWithCtor |
195 | // expects "this" in the "generic" address space. |
196 | unsigned ExpectedAddrSpace = getContext().getTargetAddressSpace(T); |
197 | unsigned ActualAddrSpace = DeclPtr->getType()->getPointerAddressSpace(); |
198 | if (ActualAddrSpace != ExpectedAddrSpace) { |
199 | llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(T); |
200 | llvm::PointerType *PTy = llvm::PointerType::get(LTy, ExpectedAddrSpace); |
201 | DeclPtr = llvm::ConstantExpr::getAddrSpaceCast(DeclPtr, PTy); |
202 | } |
203 | |
204 | ConstantAddress DeclAddr(DeclPtr, getContext().getDeclAlign(&D)); |
205 | |
206 | if (!T->isReferenceType()) { |
207 | if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd && |
208 | D.hasAttr<OMPThreadPrivateDeclAttr>()) { |
209 | (void)CGM.getOpenMPRuntime().emitThreadPrivateVarDefinition( |
210 | &D, DeclAddr, D.getAttr<OMPThreadPrivateDeclAttr>()->getLocation(), |
211 | PerformInit, this); |
212 | } |
213 | if (PerformInit) |
214 | EmitDeclInit(*this, D, DeclAddr); |
215 | if (CGM.isTypeConstant(D.getType(), true)) |
216 | EmitDeclInvariant(*this, D, DeclPtr); |
217 | else |
218 | EmitDeclDestroy(*this, D, DeclAddr); |
219 | return; |
220 | } |
221 | |
222 | assert(PerformInit && "cannot have constant initializer which needs " |
223 | "destruction for reference" ); |
224 | RValue RV = EmitReferenceBindingToExpr(Init); |
225 | EmitStoreOfScalar(RV.getScalarVal(), DeclAddr, false, T); |
226 | } |
227 | |
228 | /// Create a stub function, suitable for being passed to atexit, |
229 | /// which passes the given address to the given destructor function. |
230 | llvm::Function *CodeGenFunction::createAtExitStub(const VarDecl &VD, |
231 | llvm::FunctionCallee dtor, |
232 | llvm::Constant *addr) { |
233 | // Get the destructor function type, void(*)(void). |
234 | llvm::FunctionType *ty = llvm::FunctionType::get(CGM.VoidTy, false); |
235 | SmallString<256> FnName; |
236 | { |
237 | llvm::raw_svector_ostream Out(FnName); |
238 | CGM.getCXXABI().getMangleContext().mangleDynamicAtExitDestructor(&VD, Out); |
239 | } |
240 | |
241 | const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction(); |
242 | llvm::Function *fn = CGM.CreateGlobalInitOrCleanUpFunction( |
243 | ty, FnName.str(), FI, VD.getLocation()); |
244 | |
245 | CodeGenFunction CGF(CGM); |
246 | |
247 | CGF.StartFunction(GlobalDecl(&VD, DynamicInitKind::AtExit), |
248 | CGM.getContext().VoidTy, fn, FI, FunctionArgList(), |
249 | VD.getLocation(), VD.getInit()->getExprLoc()); |
250 | // Emit an artificial location for this function. |
251 | auto AL = ApplyDebugLocation::CreateArtificial(CGF); |
252 | |
253 | llvm::CallInst *call = CGF.Builder.CreateCall(dtor, addr); |
254 | |
255 | // Make sure the call and the callee agree on calling convention. |
256 | if (auto *dtorFn = dyn_cast<llvm::Function>( |
257 | dtor.getCallee()->stripPointerCastsAndAliases())) |
258 | call->setCallingConv(dtorFn->getCallingConv()); |
259 | |
260 | CGF.FinishFunction(); |
261 | |
262 | return fn; |
263 | } |
264 | |
265 | /// Register a global destructor using the C atexit runtime function. |
266 | void CodeGenFunction::registerGlobalDtorWithAtExit(const VarDecl &VD, |
267 | llvm::FunctionCallee dtor, |
268 | llvm::Constant *addr) { |
269 | // Create a function which calls the destructor. |
270 | llvm::Constant *dtorStub = createAtExitStub(VD, dtor, addr); |
271 | registerGlobalDtorWithAtExit(dtorStub); |
272 | } |
273 | |
274 | void CodeGenFunction::registerGlobalDtorWithAtExit(llvm::Constant *dtorStub) { |
275 | // extern "C" int atexit(void (*f)(void)); |
276 | assert(dtorStub->getType() == |
277 | llvm::PointerType::get( |
278 | llvm::FunctionType::get(CGM.VoidTy, false), |
279 | dtorStub->getType()->getPointerAddressSpace()) && |
280 | "Argument to atexit has a wrong type." ); |
281 | |
282 | llvm::FunctionType *atexitTy = |
283 | llvm::FunctionType::get(IntTy, dtorStub->getType(), false); |
284 | |
285 | llvm::FunctionCallee atexit = |
286 | CGM.CreateRuntimeFunction(atexitTy, "atexit" , llvm::AttributeList(), |
287 | /*Local=*/true); |
288 | if (llvm::Function *atexitFn = dyn_cast<llvm::Function>(atexit.getCallee())) |
289 | atexitFn->setDoesNotThrow(); |
290 | |
291 | EmitNounwindRuntimeCall(atexit, dtorStub); |
292 | } |
293 | |
294 | llvm::Value * |
295 | CodeGenFunction::unregisterGlobalDtorWithUnAtExit(llvm::Constant *dtorStub) { |
296 | // The unatexit subroutine unregisters __dtor functions that were previously |
297 | // registered by the atexit subroutine. If the referenced function is found, |
298 | // it is removed from the list of functions that are called at normal program |
299 | // termination and the unatexit returns a value of 0, otherwise a non-zero |
300 | // value is returned. |
301 | // |
302 | // extern "C" int unatexit(void (*f)(void)); |
303 | assert(dtorStub->getType() == |
304 | llvm::PointerType::get( |
305 | llvm::FunctionType::get(CGM.VoidTy, false), |
306 | dtorStub->getType()->getPointerAddressSpace()) && |
307 | "Argument to unatexit has a wrong type." ); |
308 | |
309 | llvm::FunctionType *unatexitTy = |
310 | llvm::FunctionType::get(IntTy, {dtorStub->getType()}, /*isVarArg=*/false); |
311 | |
312 | llvm::FunctionCallee unatexit = |
313 | CGM.CreateRuntimeFunction(unatexitTy, "unatexit" , llvm::AttributeList()); |
314 | |
315 | cast<llvm::Function>(unatexit.getCallee())->setDoesNotThrow(); |
316 | |
317 | return EmitNounwindRuntimeCall(unatexit, dtorStub); |
318 | } |
319 | |
320 | void CodeGenFunction::EmitCXXGuardedInit(const VarDecl &D, |
321 | llvm::GlobalVariable *DeclPtr, |
322 | bool PerformInit) { |
323 | // If we've been asked to forbid guard variables, emit an error now. |
324 | // This diagnostic is hard-coded for Darwin's use case; we can find |
325 | // better phrasing if someone else needs it. |
326 | if (CGM.getCodeGenOpts().ForbidGuardVariables) |
327 | CGM.Error(D.getLocation(), |
328 | "this initialization requires a guard variable, which " |
329 | "the kernel does not support" ); |
330 | |
331 | CGM.getCXXABI().EmitGuardedInit(*this, D, DeclPtr, PerformInit); |
332 | } |
333 | |
334 | void CodeGenFunction::EmitCXXGuardedInitBranch(llvm::Value *NeedsInit, |
335 | llvm::BasicBlock *InitBlock, |
336 | llvm::BasicBlock *NoInitBlock, |
337 | GuardKind Kind, |
338 | const VarDecl *D) { |
339 | assert((Kind == GuardKind::TlsGuard || D) && "no guarded variable" ); |
340 | |
341 | // A guess at how many times we will enter the initialization of a |
342 | // variable, depending on the kind of variable. |
343 | static const uint64_t InitsPerTLSVar = 1024; |
344 | static const uint64_t InitsPerLocalVar = 1024 * 1024; |
345 | |
346 | llvm::MDNode *Weights; |
347 | if (Kind == GuardKind::VariableGuard && !D->isLocalVarDecl()) { |
348 | // For non-local variables, don't apply any weighting for now. Due to our |
349 | // use of COMDATs, we expect there to be at most one initialization of the |
350 | // variable per DSO, but we have no way to know how many DSOs will try to |
351 | // initialize the variable. |
352 | Weights = nullptr; |
353 | } else { |
354 | uint64_t NumInits; |
355 | // FIXME: For the TLS case, collect and use profiling information to |
356 | // determine a more accurate brach weight. |
357 | if (Kind == GuardKind::TlsGuard || D->getTLSKind()) |
358 | NumInits = InitsPerTLSVar; |
359 | else |
360 | NumInits = InitsPerLocalVar; |
361 | |
362 | // The probability of us entering the initializer is |
363 | // 1 / (total number of times we attempt to initialize the variable). |
364 | llvm::MDBuilder MDHelper(CGM.getLLVMContext()); |
365 | Weights = MDHelper.createBranchWeights(1, NumInits - 1); |
366 | } |
367 | |
368 | Builder.CreateCondBr(NeedsInit, InitBlock, NoInitBlock, Weights); |
369 | } |
370 | |
371 | llvm::Function *CodeGenModule::CreateGlobalInitOrCleanUpFunction( |
372 | llvm::FunctionType *FTy, const Twine &Name, const CGFunctionInfo &FI, |
373 | SourceLocation Loc, bool TLS) { |
374 | llvm::Function *Fn = llvm::Function::Create( |
375 | FTy, llvm::GlobalValue::InternalLinkage, Name, &getModule()); |
376 | |
377 | if (!getLangOpts().AppleKext && !TLS) { |
378 | // Set the section if needed. |
379 | if (const char *Section = getTarget().getStaticInitSectionSpecifier()) |
380 | Fn->setSection(Section); |
381 | } |
382 | |
383 | SetInternalFunctionAttributes(GlobalDecl(), Fn, FI); |
384 | |
385 | Fn->setCallingConv(getRuntimeCC()); |
386 | |
387 | if (!getLangOpts().Exceptions) |
388 | Fn->setDoesNotThrow(); |
389 | |
390 | if (getLangOpts().Sanitize.has(SanitizerKind::Address) && |
391 | !isInNoSanitizeList(SanitizerKind::Address, Fn, Loc)) |
392 | Fn->addFnAttr(llvm::Attribute::SanitizeAddress); |
393 | |
394 | if (getLangOpts().Sanitize.has(SanitizerKind::KernelAddress) && |
395 | !isInNoSanitizeList(SanitizerKind::KernelAddress, Fn, Loc)) |
396 | Fn->addFnAttr(llvm::Attribute::SanitizeAddress); |
397 | |
398 | if (getLangOpts().Sanitize.has(SanitizerKind::HWAddress) && |
399 | !isInNoSanitizeList(SanitizerKind::HWAddress, Fn, Loc)) |
400 | Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress); |
401 | |
402 | if (getLangOpts().Sanitize.has(SanitizerKind::KernelHWAddress) && |
403 | !isInNoSanitizeList(SanitizerKind::KernelHWAddress, Fn, Loc)) |
404 | Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress); |
405 | |
406 | if (getLangOpts().Sanitize.has(SanitizerKind::MemTag) && |
407 | !isInNoSanitizeList(SanitizerKind::MemTag, Fn, Loc)) |
408 | Fn->addFnAttr(llvm::Attribute::SanitizeMemTag); |
409 | |
410 | if (getLangOpts().Sanitize.has(SanitizerKind::Thread) && |
411 | !isInNoSanitizeList(SanitizerKind::Thread, Fn, Loc)) |
412 | Fn->addFnAttr(llvm::Attribute::SanitizeThread); |
413 | |
414 | if (getLangOpts().Sanitize.has(SanitizerKind::Memory) && |
415 | !isInNoSanitizeList(SanitizerKind::Memory, Fn, Loc)) |
416 | Fn->addFnAttr(llvm::Attribute::SanitizeMemory); |
417 | |
418 | if (getLangOpts().Sanitize.has(SanitizerKind::KernelMemory) && |
419 | !isInNoSanitizeList(SanitizerKind::KernelMemory, Fn, Loc)) |
420 | Fn->addFnAttr(llvm::Attribute::SanitizeMemory); |
421 | |
422 | if (getLangOpts().Sanitize.has(SanitizerKind::SafeStack) && |
423 | !isInNoSanitizeList(SanitizerKind::SafeStack, Fn, Loc)) |
424 | Fn->addFnAttr(llvm::Attribute::SafeStack); |
425 | |
426 | if (getLangOpts().Sanitize.has(SanitizerKind::ShadowCallStack) && |
427 | !isInNoSanitizeList(SanitizerKind::ShadowCallStack, Fn, Loc)) |
428 | Fn->addFnAttr(llvm::Attribute::ShadowCallStack); |
429 | |
430 | return Fn; |
431 | } |
432 | |
433 | /// Create a global pointer to a function that will initialize a global |
434 | /// variable. The user has requested that this pointer be emitted in a specific |
435 | /// section. |
436 | void CodeGenModule::EmitPointerToInitFunc(const VarDecl *D, |
437 | llvm::GlobalVariable *GV, |
438 | llvm::Function *InitFunc, |
439 | InitSegAttr *ISA) { |
440 | llvm::GlobalVariable *PtrArray = new llvm::GlobalVariable( |
441 | TheModule, InitFunc->getType(), /*isConstant=*/true, |
442 | llvm::GlobalValue::PrivateLinkage, InitFunc, "__cxx_init_fn_ptr" ); |
443 | PtrArray->setSection(ISA->getSection()); |
444 | addUsedGlobal(PtrArray); |
445 | |
446 | // If the GV is already in a comdat group, then we have to join it. |
447 | if (llvm::Comdat *C = GV->getComdat()) |
448 | PtrArray->setComdat(C); |
449 | } |
450 | |
451 | void |
452 | CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D, |
453 | llvm::GlobalVariable *Addr, |
454 | bool PerformInit) { |
455 | |
456 | // According to E.2.3.1 in CUDA-7.5 Programming guide: __device__, |
457 | // __constant__ and __shared__ variables defined in namespace scope, |
458 | // that are of class type, cannot have a non-empty constructor. All |
459 | // the checks have been done in Sema by now. Whatever initializers |
460 | // are allowed are empty and we just need to ignore them here. |
461 | if (getLangOpts().CUDAIsDevice && !getLangOpts().GPUAllowDeviceInit && |
462 | (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() || |
463 | D->hasAttr<CUDASharedAttr>())) |
464 | return; |
465 | |
466 | if (getLangOpts().OpenMP && |
467 | getOpenMPRuntime().emitDeclareTargetVarDefinition(D, Addr, PerformInit)) |
468 | return; |
469 | |
470 | // Check if we've already initialized this decl. |
471 | auto I = DelayedCXXInitPosition.find(D); |
472 | if (I != DelayedCXXInitPosition.end() && I->second == ~0U) |
473 | return; |
474 | |
475 | llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false); |
476 | SmallString<256> FnName; |
477 | { |
478 | llvm::raw_svector_ostream Out(FnName); |
479 | getCXXABI().getMangleContext().mangleDynamicInitializer(D, Out); |
480 | } |
481 | |
482 | // Create a variable initialization function. |
483 | llvm::Function *Fn = CreateGlobalInitOrCleanUpFunction( |
484 | FTy, FnName.str(), getTypes().arrangeNullaryFunction(), D->getLocation()); |
485 | |
486 | auto *ISA = D->getAttr<InitSegAttr>(); |
487 | CodeGenFunction(*this).GenerateCXXGlobalVarDeclInitFunc(Fn, D, Addr, |
488 | PerformInit); |
489 | |
490 | llvm::GlobalVariable *COMDATKey = |
491 | supportsCOMDAT() && D->isExternallyVisible() ? Addr : nullptr; |
492 | |
493 | if (D->getTLSKind()) { |
494 | // FIXME: Should we support init_priority for thread_local? |
495 | // FIXME: We only need to register one __cxa_thread_atexit function for the |
496 | // entire TU. |
497 | CXXThreadLocalInits.push_back(Fn); |
498 | CXXThreadLocalInitVars.push_back(D); |
499 | } else if (PerformInit && ISA) { |
500 | EmitPointerToInitFunc(D, Addr, Fn, ISA); |
501 | } else if (auto *IPA = D->getAttr<InitPriorityAttr>()) { |
502 | OrderGlobalInitsOrStermFinalizers Key(IPA->getPriority(), |
503 | PrioritizedCXXGlobalInits.size()); |
504 | PrioritizedCXXGlobalInits.push_back(std::make_pair(Key, Fn)); |
505 | } else if (isTemplateInstantiation(D->getTemplateSpecializationKind()) || |
506 | getContext().GetGVALinkageForVariable(D) == GVA_DiscardableODR) { |
507 | // C++ [basic.start.init]p2: |
508 | // Definitions of explicitly specialized class template static data |
509 | // members have ordered initialization. Other class template static data |
510 | // members (i.e., implicitly or explicitly instantiated specializations) |
511 | // have unordered initialization. |
512 | // |
513 | // As a consequence, we can put them into their own llvm.global_ctors entry. |
514 | // |
515 | // If the global is externally visible, put the initializer into a COMDAT |
516 | // group with the global being initialized. On most platforms, this is a |
517 | // minor startup time optimization. In the MS C++ ABI, there are no guard |
518 | // variables, so this COMDAT key is required for correctness. |
519 | AddGlobalCtor(Fn, 65535, COMDATKey); |
520 | if (getTarget().getCXXABI().isMicrosoft() && COMDATKey) { |
521 | // In The MS C++, MS add template static data member in the linker |
522 | // drective. |
523 | addUsedGlobal(COMDATKey); |
524 | } |
525 | } else if (D->hasAttr<SelectAnyAttr>()) { |
526 | // SelectAny globals will be comdat-folded. Put the initializer into a |
527 | // COMDAT group associated with the global, so the initializers get folded |
528 | // too. |
529 | AddGlobalCtor(Fn, 65535, COMDATKey); |
530 | } else { |
531 | I = DelayedCXXInitPosition.find(D); // Re-do lookup in case of re-hash. |
532 | if (I == DelayedCXXInitPosition.end()) { |
533 | CXXGlobalInits.push_back(Fn); |
534 | } else if (I->second != ~0U) { |
535 | assert(I->second < CXXGlobalInits.size() && |
536 | CXXGlobalInits[I->second] == nullptr); |
537 | CXXGlobalInits[I->second] = Fn; |
538 | } |
539 | } |
540 | |
541 | // Remember that we already emitted the initializer for this global. |
542 | DelayedCXXInitPosition[D] = ~0U; |
543 | } |
544 | |
545 | void CodeGenModule::EmitCXXThreadLocalInitFunc() { |
546 | getCXXABI().EmitThreadLocalInitFuncs( |
547 | *this, CXXThreadLocals, CXXThreadLocalInits, CXXThreadLocalInitVars); |
548 | |
549 | CXXThreadLocalInits.clear(); |
550 | CXXThreadLocalInitVars.clear(); |
551 | CXXThreadLocals.clear(); |
552 | } |
553 | |
554 | static SmallString<128> getTransformedFileName(llvm::Module &M) { |
555 | SmallString<128> FileName = llvm::sys::path::filename(M.getName()); |
556 | |
557 | if (FileName.empty()) |
558 | FileName = "<null>" ; |
559 | |
560 | for (size_t i = 0; i < FileName.size(); ++i) { |
561 | // Replace everything that's not [a-zA-Z0-9._] with a _. This set happens |
562 | // to be the set of C preprocessing numbers. |
563 | if (!isPreprocessingNumberBody(FileName[i])) |
564 | FileName[i] = '_'; |
565 | } |
566 | |
567 | return FileName; |
568 | } |
569 | |
570 | static std::string getPrioritySuffix(unsigned int Priority) { |
571 | assert(Priority <= 65535 && "Priority should always be <= 65535." ); |
572 | |
573 | // Compute the function suffix from priority. Prepend with zeroes to make |
574 | // sure the function names are also ordered as priorities. |
575 | std::string PrioritySuffix = llvm::utostr(Priority); |
576 | PrioritySuffix = std::string(6 - PrioritySuffix.size(), '0') + PrioritySuffix; |
577 | |
578 | return PrioritySuffix; |
579 | } |
580 | |
581 | void |
582 | CodeGenModule::EmitCXXGlobalInitFunc() { |
583 | while (!CXXGlobalInits.empty() && !CXXGlobalInits.back()) |
584 | CXXGlobalInits.pop_back(); |
585 | |
586 | if (CXXGlobalInits.empty() && PrioritizedCXXGlobalInits.empty()) |
587 | return; |
588 | |
589 | llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false); |
590 | const CGFunctionInfo &FI = getTypes().arrangeNullaryFunction(); |
591 | |
592 | // Create our global prioritized initialization function. |
593 | if (!PrioritizedCXXGlobalInits.empty()) { |
594 | SmallVector<llvm::Function *, 8> LocalCXXGlobalInits; |
595 | llvm::array_pod_sort(PrioritizedCXXGlobalInits.begin(), |
596 | PrioritizedCXXGlobalInits.end()); |
597 | // Iterate over "chunks" of ctors with same priority and emit each chunk |
598 | // into separate function. Note - everything is sorted first by priority, |
599 | // second - by lex order, so we emit ctor functions in proper order. |
600 | for (SmallVectorImpl<GlobalInitData >::iterator |
601 | I = PrioritizedCXXGlobalInits.begin(), |
602 | E = PrioritizedCXXGlobalInits.end(); I != E; ) { |
603 | SmallVectorImpl<GlobalInitData >::iterator |
604 | PrioE = std::upper_bound(I + 1, E, *I, GlobalInitPriorityCmp()); |
605 | |
606 | LocalCXXGlobalInits.clear(); |
607 | |
608 | unsigned int Priority = I->first.priority; |
609 | llvm::Function *Fn = CreateGlobalInitOrCleanUpFunction( |
610 | FTy, "_GLOBAL__I_" + getPrioritySuffix(Priority), FI); |
611 | |
612 | for (; I < PrioE; ++I) |
613 | LocalCXXGlobalInits.push_back(I->second); |
614 | |
615 | CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, LocalCXXGlobalInits); |
616 | AddGlobalCtor(Fn, Priority); |
617 | } |
618 | PrioritizedCXXGlobalInits.clear(); |
619 | } |
620 | |
621 | if (getCXXABI().useSinitAndSterm() && CXXGlobalInits.empty()) |
622 | return; |
623 | |
624 | // Include the filename in the symbol name. Including "sub_" matches gcc |
625 | // and makes sure these symbols appear lexicographically behind the symbols |
626 | // with priority emitted above. |
627 | llvm::Function *Fn = CreateGlobalInitOrCleanUpFunction( |
628 | FTy, llvm::Twine("_GLOBAL__sub_I_" , getTransformedFileName(getModule())), |
629 | FI); |
630 | |
631 | CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, CXXGlobalInits); |
632 | AddGlobalCtor(Fn); |
633 | |
634 | // In OpenCL global init functions must be converted to kernels in order to |
635 | // be able to launch them from the host. |
636 | // FIXME: Some more work might be needed to handle destructors correctly. |
637 | // Current initialization function makes use of function pointers callbacks. |
638 | // We can't support function pointers especially between host and device. |
639 | // However it seems global destruction has little meaning without any |
640 | // dynamic resource allocation on the device and program scope variables are |
641 | // destroyed by the runtime when program is released. |
642 | if (getLangOpts().OpenCL) { |
643 | GenOpenCLArgMetadata(Fn); |
644 | Fn->setCallingConv(llvm::CallingConv::SPIR_KERNEL); |
645 | } |
646 | |
647 | if (getLangOpts().HIP) { |
648 | Fn->setCallingConv(llvm::CallingConv::AMDGPU_KERNEL); |
649 | Fn->addFnAttr("device-init" ); |
650 | } |
651 | |
652 | CXXGlobalInits.clear(); |
653 | } |
654 | |
655 | void CodeGenModule::EmitCXXGlobalCleanUpFunc() { |
656 | if (CXXGlobalDtorsOrStermFinalizers.empty() && |
657 | PrioritizedCXXStermFinalizers.empty()) |
658 | return; |
659 | |
660 | llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false); |
661 | const CGFunctionInfo &FI = getTypes().arrangeNullaryFunction(); |
662 | |
663 | // Create our global prioritized cleanup function. |
664 | if (!PrioritizedCXXStermFinalizers.empty()) { |
665 | SmallVector<CXXGlobalDtorsOrStermFinalizer_t, 8> LocalCXXStermFinalizers; |
666 | llvm::array_pod_sort(PrioritizedCXXStermFinalizers.begin(), |
667 | PrioritizedCXXStermFinalizers.end()); |
668 | // Iterate over "chunks" of dtors with same priority and emit each chunk |
669 | // into separate function. Note - everything is sorted first by priority, |
670 | // second - by lex order, so we emit dtor functions in proper order. |
671 | for (SmallVectorImpl<StermFinalizerData>::iterator |
672 | I = PrioritizedCXXStermFinalizers.begin(), |
673 | E = PrioritizedCXXStermFinalizers.end(); |
674 | I != E;) { |
675 | SmallVectorImpl<StermFinalizerData>::iterator PrioE = |
676 | std::upper_bound(I + 1, E, *I, StermFinalizerPriorityCmp()); |
677 | |
678 | LocalCXXStermFinalizers.clear(); |
679 | |
680 | unsigned int Priority = I->first.priority; |
681 | llvm::Function *Fn = CreateGlobalInitOrCleanUpFunction( |
682 | FTy, "_GLOBAL__a_" + getPrioritySuffix(Priority), FI); |
683 | |
684 | for (; I < PrioE; ++I) { |
685 | llvm::FunctionCallee DtorFn = I->second; |
686 | LocalCXXStermFinalizers.emplace_back(DtorFn.getFunctionType(), |
687 | DtorFn.getCallee(), nullptr); |
688 | } |
689 | |
690 | CodeGenFunction(*this).GenerateCXXGlobalCleanUpFunc( |
691 | Fn, LocalCXXStermFinalizers); |
692 | AddGlobalDtor(Fn, Priority); |
693 | } |
694 | PrioritizedCXXStermFinalizers.clear(); |
695 | } |
696 | |
697 | if (CXXGlobalDtorsOrStermFinalizers.empty()) |
698 | return; |
699 | |
700 | // Create our global cleanup function. |
701 | llvm::Function *Fn = |
702 | CreateGlobalInitOrCleanUpFunction(FTy, "_GLOBAL__D_a" , FI); |
703 | |
704 | CodeGenFunction(*this).GenerateCXXGlobalCleanUpFunc( |
705 | Fn, CXXGlobalDtorsOrStermFinalizers); |
706 | AddGlobalDtor(Fn); |
707 | CXXGlobalDtorsOrStermFinalizers.clear(); |
708 | } |
709 | |
710 | /// Emit the code necessary to initialize the given global variable. |
711 | void CodeGenFunction::GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn, |
712 | const VarDecl *D, |
713 | llvm::GlobalVariable *Addr, |
714 | bool PerformInit) { |
715 | // Check if we need to emit debug info for variable initializer. |
716 | if (D->hasAttr<NoDebugAttr>()) |
717 | DebugInfo = nullptr; // disable debug info indefinitely for this function |
718 | |
719 | CurEHLocation = D->getBeginLoc(); |
720 | |
721 | StartFunction(GlobalDecl(D, DynamicInitKind::Initializer), |
722 | getContext().VoidTy, Fn, getTypes().arrangeNullaryFunction(), |
723 | FunctionArgList()); |
724 | // Emit an artificial location for this function. |
725 | auto AL = ApplyDebugLocation::CreateArtificial(*this); |
726 | |
727 | // Use guarded initialization if the global variable is weak. This |
728 | // occurs for, e.g., instantiated static data members and |
729 | // definitions explicitly marked weak. |
730 | // |
731 | // Also use guarded initialization for a variable with dynamic TLS and |
732 | // unordered initialization. (If the initialization is ordered, the ABI |
733 | // layer will guard the whole-TU initialization for us.) |
734 | if (Addr->hasWeakLinkage() || Addr->hasLinkOnceLinkage() || |
735 | (D->getTLSKind() == VarDecl::TLS_Dynamic && |
736 | isTemplateInstantiation(D->getTemplateSpecializationKind()))) { |
737 | EmitCXXGuardedInit(*D, Addr, PerformInit); |
738 | } else { |
739 | EmitCXXGlobalVarDeclInit(*D, Addr, PerformInit); |
740 | } |
741 | |
742 | FinishFunction(); |
743 | } |
744 | |
745 | void |
746 | CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn, |
747 | ArrayRef<llvm::Function *> Decls, |
748 | ConstantAddress Guard) { |
749 | { |
750 | auto NL = ApplyDebugLocation::CreateEmpty(*this); |
751 | StartFunction(GlobalDecl(), getContext().VoidTy, Fn, |
752 | getTypes().arrangeNullaryFunction(), FunctionArgList()); |
753 | // Emit an artificial location for this function. |
754 | auto AL = ApplyDebugLocation::CreateArtificial(*this); |
755 | |
756 | llvm::BasicBlock *ExitBlock = nullptr; |
757 | if (Guard.isValid()) { |
758 | // If we have a guard variable, check whether we've already performed |
759 | // these initializations. This happens for TLS initialization functions. |
760 | llvm::Value *GuardVal = Builder.CreateLoad(Guard); |
761 | llvm::Value *Uninit = Builder.CreateIsNull(GuardVal, |
762 | "guard.uninitialized" ); |
763 | llvm::BasicBlock *InitBlock = createBasicBlock("init" ); |
764 | ExitBlock = createBasicBlock("exit" ); |
765 | EmitCXXGuardedInitBranch(Uninit, InitBlock, ExitBlock, |
766 | GuardKind::TlsGuard, nullptr); |
767 | EmitBlock(InitBlock); |
768 | // Mark as initialized before initializing anything else. If the |
769 | // initializers use previously-initialized thread_local vars, that's |
770 | // probably supposed to be OK, but the standard doesn't say. |
771 | Builder.CreateStore(llvm::ConstantInt::get(GuardVal->getType(),1), Guard); |
772 | |
773 | // The guard variable can't ever change again. |
774 | EmitInvariantStart( |
775 | Guard.getPointer(), |
776 | CharUnits::fromQuantity( |
777 | CGM.getDataLayout().getTypeAllocSize(GuardVal->getType()))); |
778 | } |
779 | |
780 | RunCleanupsScope Scope(*this); |
781 | |
782 | // When building in Objective-C++ ARC mode, create an autorelease pool |
783 | // around the global initializers. |
784 | if (getLangOpts().ObjCAutoRefCount && getLangOpts().CPlusPlus) { |
785 | llvm::Value *token = EmitObjCAutoreleasePoolPush(); |
786 | EmitObjCAutoreleasePoolCleanup(token); |
787 | } |
788 | |
789 | for (unsigned i = 0, e = Decls.size(); i != e; ++i) |
790 | if (Decls[i]) |
791 | EmitRuntimeCall(Decls[i]); |
792 | |
793 | Scope.ForceCleanup(); |
794 | |
795 | if (ExitBlock) { |
796 | Builder.CreateBr(ExitBlock); |
797 | EmitBlock(ExitBlock); |
798 | } |
799 | } |
800 | |
801 | FinishFunction(); |
802 | } |
803 | |
804 | void CodeGenFunction::GenerateCXXGlobalCleanUpFunc( |
805 | llvm::Function *Fn, |
806 | ArrayRef<std::tuple<llvm::FunctionType *, llvm::WeakTrackingVH, |
807 | llvm::Constant *>> |
808 | DtorsOrStermFinalizers) { |
809 | { |
810 | auto NL = ApplyDebugLocation::CreateEmpty(*this); |
811 | StartFunction(GlobalDecl(), getContext().VoidTy, Fn, |
812 | getTypes().arrangeNullaryFunction(), FunctionArgList()); |
813 | // Emit an artificial location for this function. |
814 | auto AL = ApplyDebugLocation::CreateArtificial(*this); |
815 | |
816 | // Emit the cleanups, in reverse order from construction. |
817 | for (unsigned i = 0, e = DtorsOrStermFinalizers.size(); i != e; ++i) { |
818 | llvm::FunctionType *CalleeTy; |
819 | llvm::Value *Callee; |
820 | llvm::Constant *Arg; |
821 | std::tie(CalleeTy, Callee, Arg) = DtorsOrStermFinalizers[e - i - 1]; |
822 | |
823 | llvm::CallInst *CI = nullptr; |
824 | if (Arg == nullptr) { |
825 | assert( |
826 | CGM.getCXXABI().useSinitAndSterm() && |
827 | "Arg could not be nullptr unless using sinit and sterm functions." ); |
828 | CI = Builder.CreateCall(CalleeTy, Callee); |
829 | } else |
830 | CI = Builder.CreateCall(CalleeTy, Callee, Arg); |
831 | |
832 | // Make sure the call and the callee agree on calling convention. |
833 | if (llvm::Function *F = dyn_cast<llvm::Function>(Callee)) |
834 | CI->setCallingConv(F->getCallingConv()); |
835 | } |
836 | } |
837 | |
838 | FinishFunction(); |
839 | } |
840 | |
841 | /// generateDestroyHelper - Generates a helper function which, when |
842 | /// invoked, destroys the given object. The address of the object |
843 | /// should be in global memory. |
844 | llvm::Function *CodeGenFunction::generateDestroyHelper( |
845 | Address addr, QualType type, Destroyer *destroyer, |
846 | bool useEHCleanupForArray, const VarDecl *VD) { |
847 | FunctionArgList args; |
848 | ImplicitParamDecl Dst(getContext(), getContext().VoidPtrTy, |
849 | ImplicitParamDecl::Other); |
850 | args.push_back(&Dst); |
851 | |
852 | const CGFunctionInfo &FI = |
853 | CGM.getTypes().arrangeBuiltinFunctionDeclaration(getContext().VoidTy, args); |
854 | llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI); |
855 | llvm::Function *fn = CGM.CreateGlobalInitOrCleanUpFunction( |
856 | FTy, "__cxx_global_array_dtor" , FI, VD->getLocation()); |
857 | |
858 | CurEHLocation = VD->getBeginLoc(); |
859 | |
860 | StartFunction(GlobalDecl(VD, DynamicInitKind::GlobalArrayDestructor), |
861 | getContext().VoidTy, fn, FI, args); |
862 | // Emit an artificial location for this function. |
863 | auto AL = ApplyDebugLocation::CreateArtificial(*this); |
864 | |
865 | emitDestroy(addr, type, destroyer, useEHCleanupForArray); |
866 | |
867 | FinishFunction(); |
868 | |
869 | return fn; |
870 | } |
871 | |