1//===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This provides C++ code generation targeting the Itanium C++ ABI. The class
10// in this file generates structures that follow the Itanium C++ ABI, which is
11// documented at:
12// https://itanium-cxx-abi.github.io/cxx-abi/abi.html
13// https://itanium-cxx-abi.github.io/cxx-abi/abi-eh.html
14//
15// It also supports the closely-related ARM ABI, documented at:
16// https://developer.arm.com/documentation/ihi0041/g/
17//
18//===----------------------------------------------------------------------===//
19
20#include "CGCXXABI.h"
21#include "CGCleanup.h"
22#include "CGRecordLayout.h"
23#include "CGVTables.h"
24#include "CodeGenFunction.h"
25#include "CodeGenModule.h"
26#include "TargetInfo.h"
27#include "clang/AST/Attr.h"
28#include "clang/AST/Mangle.h"
29#include "clang/AST/StmtCXX.h"
30#include "clang/AST/Type.h"
31#include "clang/CodeGen/ConstantInitBuilder.h"
32#include "llvm/IR/DataLayout.h"
33#include "llvm/IR/GlobalValue.h"
34#include "llvm/IR/Instructions.h"
35#include "llvm/IR/Intrinsics.h"
36#include "llvm/IR/Value.h"
37#include "llvm/Support/ScopedPrinter.h"
38
39using namespace clang;
40using namespace CodeGen;
41
42namespace {
43class ItaniumCXXABI : public CodeGen::CGCXXABI {
44 /// VTables - All the vtables which have been defined.
45 llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables;
46
47 /// All the thread wrapper functions that have been used.
48 llvm::SmallVector<std::pair<const VarDecl *, llvm::Function *>, 8>
49 ThreadWrappers;
50
51protected:
52 bool UseARMMethodPtrABI;
53 bool UseARMGuardVarABI;
54 bool Use32BitVTableOffsetABI;
55
56 ItaniumMangleContext &getMangleContext() {
57 return cast<ItaniumMangleContext>(CodeGen::CGCXXABI::getMangleContext());
58 }
59
60public:
61 ItaniumCXXABI(CodeGen::CodeGenModule &CGM,
62 bool UseARMMethodPtrABI = false,
63 bool UseARMGuardVarABI = false) :
64 CGCXXABI(CGM), UseARMMethodPtrABI(UseARMMethodPtrABI),
65 UseARMGuardVarABI(UseARMGuardVarABI),
66 Use32BitVTableOffsetABI(false) { }
67
68 bool classifyReturnType(CGFunctionInfo &FI) const override;
69
70 RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const override {
71 // If C++ prohibits us from making a copy, pass by address.
72 if (!RD->canPassInRegisters())
73 return RAA_Indirect;
74 return RAA_Default;
75 }
76
77 bool isThisCompleteObject(GlobalDecl GD) const override {
78 // The Itanium ABI has separate complete-object vs. base-object
79 // variants of both constructors and destructors.
80 if (isa<CXXDestructorDecl>(GD.getDecl())) {
81 switch (GD.getDtorType()) {
82 case Dtor_Complete:
83 case Dtor_Deleting:
84 return true;
85
86 case Dtor_Base:
87 return false;
88
89 case Dtor_Comdat:
90 llvm_unreachable("emitting dtor comdat as function?");
91 }
92 llvm_unreachable("bad dtor kind");
93 }
94 if (isa<CXXConstructorDecl>(GD.getDecl())) {
95 switch (GD.getCtorType()) {
96 case Ctor_Complete:
97 return true;
98
99 case Ctor_Base:
100 return false;
101
102 case Ctor_CopyingClosure:
103 case Ctor_DefaultClosure:
104 llvm_unreachable("closure ctors in Itanium ABI?");
105
106 case Ctor_Comdat:
107 llvm_unreachable("emitting ctor comdat as function?");
108 }
109 llvm_unreachable("bad dtor kind");
110 }
111
112 // No other kinds.
113 return false;
114 }
115
116 bool isZeroInitializable(const MemberPointerType *MPT) override;
117
118 llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override;
119
120 CGCallee
121 EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
122 const Expr *E,
123 Address This,
124 llvm::Value *&ThisPtrForCall,
125 llvm::Value *MemFnPtr,
126 const MemberPointerType *MPT) override;
127
128 llvm::Value *
129 EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
130 Address Base,
131 llvm::Value *MemPtr,
132 const MemberPointerType *MPT) override;
133
134 llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
135 const CastExpr *E,
136 llvm::Value *Src) override;
137 llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
138 llvm::Constant *Src) override;
139
140 llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override;
141
142 llvm::Constant *EmitMemberFunctionPointer(const CXXMethodDecl *MD) override;
143 llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
144 CharUnits offset) override;
145 llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT) override;
146 llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD,
147 CharUnits ThisAdjustment);
148
149 llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF,
150 llvm::Value *L, llvm::Value *R,
151 const MemberPointerType *MPT,
152 bool Inequality) override;
153
154 llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
155 llvm::Value *Addr,
156 const MemberPointerType *MPT) override;
157
158 void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE,
159 Address Ptr, QualType ElementType,
160 const CXXDestructorDecl *Dtor) override;
161
162 void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override;
163 void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override;
164
165 void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
166
167 llvm::CallInst *
168 emitTerminateForUnexpectedException(CodeGenFunction &CGF,
169 llvm::Value *Exn) override;
170
171 void EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD);
172 llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override;
173 CatchTypeInfo
174 getAddrOfCXXCatchHandlerType(QualType Ty,
175 QualType CatchHandlerType) override {
176 return CatchTypeInfo{getAddrOfRTTIDescriptor(Ty), 0};
177 }
178
179 bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) override;
180 void EmitBadTypeidCall(CodeGenFunction &CGF) override;
181 llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy,
182 Address ThisPtr,
183 llvm::Type *StdTypeInfoPtrTy) override;
184
185 bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
186 QualType SrcRecordTy) override;
187
188 llvm::Value *EmitDynamicCastCall(CodeGenFunction &CGF, Address Value,
189 QualType SrcRecordTy, QualType DestTy,
190 QualType DestRecordTy,
191 llvm::BasicBlock *CastEnd) override;
192
193 llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
194 QualType SrcRecordTy,
195 QualType DestTy) override;
196
197 bool EmitBadCastCall(CodeGenFunction &CGF) override;
198
199 llvm::Value *
200 GetVirtualBaseClassOffset(CodeGenFunction &CGF, Address This,
201 const CXXRecordDecl *ClassDecl,
202 const CXXRecordDecl *BaseClassDecl) override;
203
204 void EmitCXXConstructors(const CXXConstructorDecl *D) override;
205
206 AddedStructorArgCounts
207 buildStructorSignature(GlobalDecl GD,
208 SmallVectorImpl<CanQualType> &ArgTys) override;
209
210 bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor,
211 CXXDtorType DT) const override {
212 // Itanium does not emit any destructor variant as an inline thunk.
213 // Delegating may occur as an optimization, but all variants are either
214 // emitted with external linkage or as linkonce if they are inline and used.
215 return false;
216 }
217
218 void EmitCXXDestructors(const CXXDestructorDecl *D) override;
219
220 void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy,
221 FunctionArgList &Params) override;
222
223 void EmitInstanceFunctionProlog(CodeGenFunction &CGF) override;
224
225 AddedStructorArgs getImplicitConstructorArgs(CodeGenFunction &CGF,
226 const CXXConstructorDecl *D,
227 CXXCtorType Type,
228 bool ForVirtualBase,
229 bool Delegating) override;
230
231 llvm::Value *getCXXDestructorImplicitParam(CodeGenFunction &CGF,
232 const CXXDestructorDecl *DD,
233 CXXDtorType Type,
234 bool ForVirtualBase,
235 bool Delegating) override;
236
237 void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD,
238 CXXDtorType Type, bool ForVirtualBase,
239 bool Delegating, Address This,
240 QualType ThisTy) override;
241
242 void emitVTableDefinitions(CodeGenVTables &CGVT,
243 const CXXRecordDecl *RD) override;
244
245 bool isVirtualOffsetNeededForVTableField(CodeGenFunction &CGF,
246 CodeGenFunction::VPtr Vptr) override;
247
248 bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override {
249 return true;
250 }
251
252 llvm::Constant *
253 getVTableAddressPoint(BaseSubobject Base,
254 const CXXRecordDecl *VTableClass) override;
255
256 llvm::Value *getVTableAddressPointInStructor(
257 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
258 BaseSubobject Base, const CXXRecordDecl *NearestVBase) override;
259
260 llvm::Value *getVTableAddressPointInStructorWithVTT(
261 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
262 BaseSubobject Base, const CXXRecordDecl *NearestVBase);
263
264 llvm::Constant *
265 getVTableAddressPointForConstExpr(BaseSubobject Base,
266 const CXXRecordDecl *VTableClass) override;
267
268 llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
269 CharUnits VPtrOffset) override;
270
271 CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD,
272 Address This, llvm::Type *Ty,
273 SourceLocation Loc) override;
274
275 llvm::Value *EmitVirtualDestructorCall(CodeGenFunction &CGF,
276 const CXXDestructorDecl *Dtor,
277 CXXDtorType DtorType, Address This,
278 DeleteOrMemberCallExpr E) override;
279
280 void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override;
281
282 bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override;
283 bool canSpeculativelyEmitVTableAsBaseClass(const CXXRecordDecl *RD) const;
284
285 void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD,
286 bool ReturnAdjustment) override {
287 // Allow inlining of thunks by emitting them with available_externally
288 // linkage together with vtables when needed.
289 if (ForVTable && !Thunk->hasLocalLinkage())
290 Thunk->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
291 CGM.setGVProperties(Thunk, GD);
292 }
293
294 bool exportThunk() override { return true; }
295
296 llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This,
297 const ThisAdjustment &TA) override;
298
299 llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
300 const ReturnAdjustment &RA) override;
301
302 size_t getSrcArgforCopyCtor(const CXXConstructorDecl *,
303 FunctionArgList &Args) const override {
304 assert(!Args.empty() && "expected the arglist to not be empty!");
305 return Args.size() - 1;
306 }
307
308 StringRef GetPureVirtualCallName() override { return "__cxa_pure_virtual"; }
309 StringRef GetDeletedVirtualCallName() override
310 { return "__cxa_deleted_virtual"; }
311
312 CharUnits getArrayCookieSizeImpl(QualType elementType) override;
313 Address InitializeArrayCookie(CodeGenFunction &CGF,
314 Address NewPtr,
315 llvm::Value *NumElements,
316 const CXXNewExpr *expr,
317 QualType ElementType) override;
318 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
319 Address allocPtr,
320 CharUnits cookieSize) override;
321
322 void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
323 llvm::GlobalVariable *DeclPtr,
324 bool PerformInit) override;
325 void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
326 llvm::FunctionCallee dtor,
327 llvm::Constant *addr) override;
328
329 llvm::Function *getOrCreateThreadLocalWrapper(const VarDecl *VD,
330 llvm::Value *Val);
331 void EmitThreadLocalInitFuncs(
332 CodeGenModule &CGM,
333 ArrayRef<const VarDecl *> CXXThreadLocals,
334 ArrayRef<llvm::Function *> CXXThreadLocalInits,
335 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override;
336
337 /// Determine whether we will definitely emit this variable with a constant
338 /// initializer, either because the language semantics demand it or because
339 /// we know that the initializer is a constant.
340 bool isEmittedWithConstantInitializer(const VarDecl *VD) const {
341 VD = VD->getMostRecentDecl();
342 if (VD->hasAttr<ConstInitAttr>())
343 return true;
344
345 // All later checks examine the initializer specified on the variable. If
346 // the variable is weak, such examination would not be correct.
347 if (VD->isWeak() || VD->hasAttr<SelectAnyAttr>())
348 return false;
349
350 const VarDecl *InitDecl = VD->getInitializingDeclaration();
351 if (!InitDecl)
352 return false;
353
354 // If there's no initializer to run, this is constant initialization.
355 if (!InitDecl->hasInit())
356 return true;
357
358 // If we have the only definition, we don't need a thread wrapper if we
359 // will emit the value as a constant.
360 if (isUniqueGVALinkage(getContext().GetGVALinkageForVariable(VD)))
361 return !VD->needsDestruction(getContext()) && InitDecl->evaluateValue();
362
363 // Otherwise, we need a thread wrapper unless we know that every
364 // translation unit will emit the value as a constant. We rely on the
365 // variable being constant-initialized in every translation unit if it's
366 // constant-initialized in any translation unit, which isn't actually
367 // guaranteed by the standard but is necessary for sanity.
368 return InitDecl->hasConstantInitialization();
369 }
370
371 bool usesThreadWrapperFunction(const VarDecl *VD) const override {
372 return !isEmittedWithConstantInitializer(VD) ||
373 VD->needsDestruction(getContext());
374 }
375 LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD,
376 QualType LValType) override;
377
378 bool NeedsVTTParameter(GlobalDecl GD) override;
379
380 /**************************** RTTI Uniqueness ******************************/
381
382protected:
383 /// Returns true if the ABI requires RTTI type_info objects to be unique
384 /// across a program.
385 virtual bool shouldRTTIBeUnique() const { return true; }
386
387public:
388 /// What sort of unique-RTTI behavior should we use?
389 enum RTTIUniquenessKind {
390 /// We are guaranteeing, or need to guarantee, that the RTTI string
391 /// is unique.
392 RUK_Unique,
393
394 /// We are not guaranteeing uniqueness for the RTTI string, so we
395 /// can demote to hidden visibility but must use string comparisons.
396 RUK_NonUniqueHidden,
397
398 /// We are not guaranteeing uniqueness for the RTTI string, so we
399 /// have to use string comparisons, but we also have to emit it with
400 /// non-hidden visibility.
401 RUK_NonUniqueVisible
402 };
403
404 /// Return the required visibility status for the given type and linkage in
405 /// the current ABI.
406 RTTIUniquenessKind
407 classifyRTTIUniqueness(QualType CanTy,
408 llvm::GlobalValue::LinkageTypes Linkage) const;
409 friend class ItaniumRTTIBuilder;
410
411 void emitCXXStructor(GlobalDecl GD) override;
412
413 std::pair<llvm::Value *, const CXXRecordDecl *>
414 LoadVTablePtr(CodeGenFunction &CGF, Address This,
415 const CXXRecordDecl *RD) override;
416
417 private:
418 bool hasAnyUnusedVirtualInlineFunction(const CXXRecordDecl *RD) const {
419 const auto &VtableLayout =
420 CGM.getItaniumVTableContext().getVTableLayout(RD);
421
422 for (const auto &VtableComponent : VtableLayout.vtable_components()) {
423 // Skip empty slot.
424 if (!VtableComponent.isUsedFunctionPointerKind())
425 continue;
426
427 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
428 if (!Method->getCanonicalDecl()->isInlined())
429 continue;
430
431 StringRef Name = CGM.getMangledName(VtableComponent.getGlobalDecl());
432 auto *Entry = CGM.GetGlobalValue(Name);
433 // This checks if virtual inline function has already been emitted.
434 // Note that it is possible that this inline function would be emitted
435 // after trying to emit vtable speculatively. Because of this we do
436 // an extra pass after emitting all deferred vtables to find and emit
437 // these vtables opportunistically.
438 if (!Entry || Entry->isDeclaration())
439 return true;
440 }
441 return false;
442 }
443
444 bool isVTableHidden(const CXXRecordDecl *RD) const {
445 const auto &VtableLayout =
446 CGM.getItaniumVTableContext().getVTableLayout(RD);
447
448 for (const auto &VtableComponent : VtableLayout.vtable_components()) {
449 if (VtableComponent.isRTTIKind()) {
450 const CXXRecordDecl *RTTIDecl = VtableComponent.getRTTIDecl();
451 if (RTTIDecl->getVisibility() == Visibility::HiddenVisibility)
452 return true;
453 } else if (VtableComponent.isUsedFunctionPointerKind()) {
454 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
455 if (Method->getVisibility() == Visibility::HiddenVisibility &&
456 !Method->isDefined())
457 return true;
458 }
459 }
460 return false;
461 }
462};
463
464class ARMCXXABI : public ItaniumCXXABI {
465public:
466 ARMCXXABI(CodeGen::CodeGenModule &CGM) :
467 ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
468 /*UseARMGuardVarABI=*/true) {}
469
470 bool HasThisReturn(GlobalDecl GD) const override {
471 return (isa<CXXConstructorDecl>(GD.getDecl()) || (
472 isa<CXXDestructorDecl>(GD.getDecl()) &&
473 GD.getDtorType() != Dtor_Deleting));
474 }
475
476 void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV,
477 QualType ResTy) override;
478
479 CharUnits getArrayCookieSizeImpl(QualType elementType) override;
480 Address InitializeArrayCookie(CodeGenFunction &CGF,
481 Address NewPtr,
482 llvm::Value *NumElements,
483 const CXXNewExpr *expr,
484 QualType ElementType) override;
485 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, Address allocPtr,
486 CharUnits cookieSize) override;
487};
488
489class AppleARM64CXXABI : public ARMCXXABI {
490public:
491 AppleARM64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) {
492 Use32BitVTableOffsetABI = true;
493 }
494
495 // ARM64 libraries are prepared for non-unique RTTI.
496 bool shouldRTTIBeUnique() const override { return false; }
497};
498
499class FuchsiaCXXABI final : public ItaniumCXXABI {
500public:
501 explicit FuchsiaCXXABI(CodeGen::CodeGenModule &CGM)
502 : ItaniumCXXABI(CGM) {}
503
504private:
505 bool HasThisReturn(GlobalDecl GD) const override {
506 return isa<CXXConstructorDecl>(GD.getDecl()) ||
507 (isa<CXXDestructorDecl>(GD.getDecl()) &&
508 GD.getDtorType() != Dtor_Deleting);
509 }
510};
511
512class WebAssemblyCXXABI final : public ItaniumCXXABI {
513public:
514 explicit WebAssemblyCXXABI(CodeGen::CodeGenModule &CGM)
515 : ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
516 /*UseARMGuardVarABI=*/true) {}
517 void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
518 llvm::CallInst *
519 emitTerminateForUnexpectedException(CodeGenFunction &CGF,
520 llvm::Value *Exn) override;
521
522private:
523 bool HasThisReturn(GlobalDecl GD) const override {
524 return isa<CXXConstructorDecl>(GD.getDecl()) ||
525 (isa<CXXDestructorDecl>(GD.getDecl()) &&
526 GD.getDtorType() != Dtor_Deleting);
527 }
528 bool canCallMismatchedFunctionType() const override { return false; }
529};
530
531class XLCXXABI final : public ItaniumCXXABI {
532public:
533 explicit XLCXXABI(CodeGen::CodeGenModule &CGM)
534 : ItaniumCXXABI(CGM) {}
535
536 void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
537 llvm::FunctionCallee dtor,
538 llvm::Constant *addr) override;
539
540 bool useSinitAndSterm() const override { return true; }
541
542private:
543 void emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
544 llvm::Constant *addr);
545};
546}
547
548CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
549 switch (CGM.getContext().getCXXABIKind()) {
550 // For IR-generation purposes, there's no significant difference
551 // between the ARM and iOS ABIs.
552 case TargetCXXABI::GenericARM:
553 case TargetCXXABI::iOS:
554 case TargetCXXABI::WatchOS:
555 return new ARMCXXABI(CGM);
556
557 case TargetCXXABI::AppleARM64:
558 return new AppleARM64CXXABI(CGM);
559
560 case TargetCXXABI::Fuchsia:
561 return new FuchsiaCXXABI(CGM);
562
563 // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't
564 // include the other 32-bit ARM oddities: constructor/destructor return values
565 // and array cookies.
566 case TargetCXXABI::GenericAArch64:
567 return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
568 /*UseARMGuardVarABI=*/true);
569
570 case TargetCXXABI::GenericMIPS:
571 return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true);
572
573 case TargetCXXABI::WebAssembly:
574 return new WebAssemblyCXXABI(CGM);
575
576 case TargetCXXABI::XL:
577 return new XLCXXABI(CGM);
578
579 case TargetCXXABI::GenericItanium:
580 if (CGM.getContext().getTargetInfo().getTriple().getArch()
581 == llvm::Triple::le32) {
582 // For PNaCl, use ARM-style method pointers so that PNaCl code
583 // does not assume anything about the alignment of function
584 // pointers.
585 return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true);
586 }
587 return new ItaniumCXXABI(CGM);
588
589 case TargetCXXABI::Microsoft:
590 llvm_unreachable("Microsoft ABI is not Itanium-based");
591 }
592 llvm_unreachable("bad ABI kind");
593}
594
595llvm::Type *
596ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
597 if (MPT->isMemberDataPointer())
598 return CGM.PtrDiffTy;
599 return llvm::StructType::get(CGM.PtrDiffTy, CGM.PtrDiffTy);
600}
601
602/// In the Itanium and ARM ABIs, method pointers have the form:
603/// struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr;
604///
605/// In the Itanium ABI:
606/// - method pointers are virtual if (memptr.ptr & 1) is nonzero
607/// - the this-adjustment is (memptr.adj)
608/// - the virtual offset is (memptr.ptr - 1)
609///
610/// In the ARM ABI:
611/// - method pointers are virtual if (memptr.adj & 1) is nonzero
612/// - the this-adjustment is (memptr.adj >> 1)
613/// - the virtual offset is (memptr.ptr)
614/// ARM uses 'adj' for the virtual flag because Thumb functions
615/// may be only single-byte aligned.
616///
617/// If the member is virtual, the adjusted 'this' pointer points
618/// to a vtable pointer from which the virtual offset is applied.
619///
620/// If the member is non-virtual, memptr.ptr is the address of
621/// the function to call.
622CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
623 CodeGenFunction &CGF, const Expr *E, Address ThisAddr,
624 llvm::Value *&ThisPtrForCall,
625 llvm::Value *MemFnPtr, const MemberPointerType *MPT) {
626 CGBuilderTy &Builder = CGF.Builder;
627
628 const FunctionProtoType *FPT =
629 MPT->getPointeeType()->getAs<FunctionProtoType>();
630 auto *RD =
631 cast<CXXRecordDecl>(MPT->getClass()->castAs<RecordType>()->getDecl());
632
633 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(
634 CGM.getTypes().arrangeCXXMethodType(RD, FPT, /*FD=*/nullptr));
635
636 llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1);
637
638 llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual");
639 llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock("memptr.nonvirtual");
640 llvm::BasicBlock *FnEnd = CGF.createBasicBlock("memptr.end");
641
642 // Extract memptr.adj, which is in the second field.
643 llvm::Value *RawAdj = Builder.CreateExtractValue(MemFnPtr, 1, "memptr.adj");
644
645 // Compute the true adjustment.
646 llvm::Value *Adj = RawAdj;
647 if (UseARMMethodPtrABI)
648 Adj = Builder.CreateAShr(Adj, ptrdiff_1, "memptr.adj.shifted");
649
650 // Apply the adjustment and cast back to the original struct type
651 // for consistency.
652 llvm::Value *This = ThisAddr.getPointer();
653 llvm::Value *Ptr = Builder.CreateBitCast(This, Builder.getInt8PtrTy());
654 Ptr = Builder.CreateInBoundsGEP(Builder.getInt8Ty(), Ptr, Adj);
655 This = Builder.CreateBitCast(Ptr, This->getType(), "this.adjusted");
656 ThisPtrForCall = This;
657
658 // Load the function pointer.
659 llvm::Value *FnAsInt = Builder.CreateExtractValue(MemFnPtr, 0, "memptr.ptr");
660
661 // If the LSB in the function pointer is 1, the function pointer points to
662 // a virtual function.
663 llvm::Value *IsVirtual;
664 if (UseARMMethodPtrABI)
665 IsVirtual = Builder.CreateAnd(RawAdj, ptrdiff_1);
666 else
667 IsVirtual = Builder.CreateAnd(FnAsInt, ptrdiff_1);
668 IsVirtual = Builder.CreateIsNotNull(IsVirtual, "memptr.isvirtual");
669 Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual);
670
671 // In the virtual path, the adjustment left 'This' pointing to the
672 // vtable of the correct base subobject. The "function pointer" is an
673 // offset within the vtable (+1 for the virtual flag on non-ARM).
674 CGF.EmitBlock(FnVirtual);
675
676 // Cast the adjusted this to a pointer to vtable pointer and load.
677 llvm::Type *VTableTy = Builder.getInt8PtrTy();
678 CharUnits VTablePtrAlign =
679 CGF.CGM.getDynamicOffsetAlignment(ThisAddr.getAlignment(), RD,
680 CGF.getPointerAlign());
681 llvm::Value *VTable =
682 CGF.GetVTablePtr(Address(This, VTablePtrAlign), VTableTy, RD);
683
684 // Apply the offset.
685 // On ARM64, to reserve extra space in virtual member function pointers,
686 // we only pay attention to the low 32 bits of the offset.
687 llvm::Value *VTableOffset = FnAsInt;
688 if (!UseARMMethodPtrABI)
689 VTableOffset = Builder.CreateSub(VTableOffset, ptrdiff_1);
690 if (Use32BitVTableOffsetABI) {
691 VTableOffset = Builder.CreateTrunc(VTableOffset, CGF.Int32Ty);
692 VTableOffset = Builder.CreateZExt(VTableOffset, CGM.PtrDiffTy);
693 }
694
695 // Check the address of the function pointer if CFI on member function
696 // pointers is enabled.
697 llvm::Constant *CheckSourceLocation;
698 llvm::Constant *CheckTypeDesc;
699 bool ShouldEmitCFICheck = CGF.SanOpts.has(SanitizerKind::CFIMFCall) &&
700 CGM.HasHiddenLTOVisibility(RD);
701 bool ShouldEmitVFEInfo = CGM.getCodeGenOpts().VirtualFunctionElimination &&
702 CGM.HasHiddenLTOVisibility(RD);
703 bool ShouldEmitWPDInfo =
704 CGM.getCodeGenOpts().WholeProgramVTables &&
705 // Don't insert type tests if we are forcing public std visibility.
706 !CGM.HasLTOVisibilityPublicStd(RD);
707 llvm::Value *VirtualFn = nullptr;
708
709 {
710 CodeGenFunction::SanitizerScope SanScope(&CGF);
711 llvm::Value *TypeId = nullptr;
712 llvm::Value *CheckResult = nullptr;
713
714 if (ShouldEmitCFICheck || ShouldEmitVFEInfo || ShouldEmitWPDInfo) {
715 // If doing CFI, VFE or WPD, we will need the metadata node to check
716 // against.
717 llvm::Metadata *MD =
718 CGM.CreateMetadataIdentifierForVirtualMemPtrType(QualType(MPT, 0));
719 TypeId = llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
720 }
721
722 if (ShouldEmitVFEInfo) {
723 llvm::Value *VFPAddr = Builder.CreateGEP(VTable, VTableOffset);
724
725 // If doing VFE, load from the vtable with a type.checked.load intrinsic
726 // call. Note that we use the GEP to calculate the address to load from
727 // and pass 0 as the offset to the intrinsic. This is because every
728 // vtable slot of the correct type is marked with matching metadata, and
729 // we know that the load must be from one of these slots.
730 llvm::Value *CheckedLoad = Builder.CreateCall(
731 CGM.getIntrinsic(llvm::Intrinsic::type_checked_load),
732 {VFPAddr, llvm::ConstantInt::get(CGM.Int32Ty, 0), TypeId});
733 CheckResult = Builder.CreateExtractValue(CheckedLoad, 1);
734 VirtualFn = Builder.CreateExtractValue(CheckedLoad, 0);
735 VirtualFn = Builder.CreateBitCast(VirtualFn, FTy->getPointerTo(),
736 "memptr.virtualfn");
737 } else {
738 // When not doing VFE, emit a normal load, as it allows more
739 // optimisations than type.checked.load.
740 if (ShouldEmitCFICheck || ShouldEmitWPDInfo) {
741 llvm::Value *VFPAddr = Builder.CreateGEP(VTable, VTableOffset);
742 CheckResult = Builder.CreateCall(
743 CGM.getIntrinsic(llvm::Intrinsic::type_test),
744 {Builder.CreateBitCast(VFPAddr, CGF.Int8PtrTy), TypeId});
745 }
746
747 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
748 VirtualFn = CGF.Builder.CreateCall(
749 CGM.getIntrinsic(llvm::Intrinsic::load_relative,
750 {VTableOffset->getType()}),
751 {VTable, VTableOffset});
752 VirtualFn = CGF.Builder.CreateBitCast(VirtualFn, FTy->getPointerTo());
753 } else {
754 llvm::Value *VFPAddr = CGF.Builder.CreateGEP(VTable, VTableOffset);
755 VFPAddr = CGF.Builder.CreateBitCast(
756 VFPAddr, FTy->getPointerTo()->getPointerTo());
757 VirtualFn = CGF.Builder.CreateAlignedLoad(
758 FTy->getPointerTo(), VFPAddr, CGF.getPointerAlign(),
759 "memptr.virtualfn");
760 }
761 }
762 assert(VirtualFn && "Virtual fuction pointer not created!");
763 assert((!ShouldEmitCFICheck || !ShouldEmitVFEInfo || !ShouldEmitWPDInfo ||
764 CheckResult) &&
765 "Check result required but not created!");
766
767 if (ShouldEmitCFICheck) {
768 // If doing CFI, emit the check.
769 CheckSourceLocation = CGF.EmitCheckSourceLocation(E->getBeginLoc());
770 CheckTypeDesc = CGF.EmitCheckTypeDescriptor(QualType(MPT, 0));
771 llvm::Constant *StaticData[] = {
772 llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_VMFCall),
773 CheckSourceLocation,
774 CheckTypeDesc,
775 };
776
777 if (CGM.getCodeGenOpts().SanitizeTrap.has(SanitizerKind::CFIMFCall)) {
778 CGF.EmitTrapCheck(CheckResult, SanitizerHandler::CFICheckFail);
779 } else {
780 llvm::Value *AllVtables = llvm::MetadataAsValue::get(
781 CGM.getLLVMContext(),
782 llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
783 llvm::Value *ValidVtable = Builder.CreateCall(
784 CGM.getIntrinsic(llvm::Intrinsic::type_test), {VTable, AllVtables});
785 CGF.EmitCheck(std::make_pair(CheckResult, SanitizerKind::CFIMFCall),
786 SanitizerHandler::CFICheckFail, StaticData,
787 {VTable, ValidVtable});
788 }
789
790 FnVirtual = Builder.GetInsertBlock();
791 }
792 } // End of sanitizer scope
793
794 CGF.EmitBranch(FnEnd);
795
796 // In the non-virtual path, the function pointer is actually a
797 // function pointer.
798 CGF.EmitBlock(FnNonVirtual);
799 llvm::Value *NonVirtualFn =
800 Builder.CreateIntToPtr(FnAsInt, FTy->getPointerTo(), "memptr.nonvirtualfn");
801
802 // Check the function pointer if CFI on member function pointers is enabled.
803 if (ShouldEmitCFICheck) {
804 CXXRecordDecl *RD = MPT->getClass()->getAsCXXRecordDecl();
805 if (RD->hasDefinition()) {
806 CodeGenFunction::SanitizerScope SanScope(&CGF);
807
808 llvm::Constant *StaticData[] = {
809 llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_NVMFCall),
810 CheckSourceLocation,
811 CheckTypeDesc,
812 };
813
814 llvm::Value *Bit = Builder.getFalse();
815 llvm::Value *CastedNonVirtualFn =
816 Builder.CreateBitCast(NonVirtualFn, CGF.Int8PtrTy);
817 for (const CXXRecordDecl *Base : CGM.getMostBaseClasses(RD)) {
818 llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(
819 getContext().getMemberPointerType(
820 MPT->getPointeeType(),
821 getContext().getRecordType(Base).getTypePtr()));
822 llvm::Value *TypeId =
823 llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
824
825 llvm::Value *TypeTest =
826 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
827 {CastedNonVirtualFn, TypeId});
828 Bit = Builder.CreateOr(Bit, TypeTest);
829 }
830
831 CGF.EmitCheck(std::make_pair(Bit, SanitizerKind::CFIMFCall),
832 SanitizerHandler::CFICheckFail, StaticData,
833 {CastedNonVirtualFn, llvm::UndefValue::get(CGF.IntPtrTy)});
834
835 FnNonVirtual = Builder.GetInsertBlock();
836 }
837 }
838
839 // We're done.
840 CGF.EmitBlock(FnEnd);
841 llvm::PHINode *CalleePtr = Builder.CreatePHI(FTy->getPointerTo(), 2);
842 CalleePtr->addIncoming(VirtualFn, FnVirtual);
843 CalleePtr->addIncoming(NonVirtualFn, FnNonVirtual);
844
845 CGCallee Callee(FPT, CalleePtr);
846 return Callee;
847}
848
849/// Compute an l-value by applying the given pointer-to-member to a
850/// base object.
851llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(
852 CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr,
853 const MemberPointerType *MPT) {
854 assert(MemPtr->getType() == CGM.PtrDiffTy);
855
856 CGBuilderTy &Builder = CGF.Builder;
857
858 // Cast to char*.
859 Base = Builder.CreateElementBitCast(Base, CGF.Int8Ty);
860
861 // Apply the offset, which we assume is non-null.
862 llvm::Value *Addr = Builder.CreateInBoundsGEP(
863 Base.getElementType(), Base.getPointer(), MemPtr, "memptr.offset");
864
865 // Cast the address to the appropriate pointer type, adopting the
866 // address space of the base pointer.
867 llvm::Type *PType = CGF.ConvertTypeForMem(MPT->getPointeeType())
868 ->getPointerTo(Base.getAddressSpace());
869 return Builder.CreateBitCast(Addr, PType);
870}
871
872/// Perform a bitcast, derived-to-base, or base-to-derived member pointer
873/// conversion.
874///
875/// Bitcast conversions are always a no-op under Itanium.
876///
877/// Obligatory offset/adjustment diagram:
878/// <-- offset --> <-- adjustment -->
879/// |--------------------------|----------------------|--------------------|
880/// ^Derived address point ^Base address point ^Member address point
881///
882/// So when converting a base member pointer to a derived member pointer,
883/// we add the offset to the adjustment because the address point has
884/// decreased; and conversely, when converting a derived MP to a base MP
885/// we subtract the offset from the adjustment because the address point
886/// has increased.
887///
888/// The standard forbids (at compile time) conversion to and from
889/// virtual bases, which is why we don't have to consider them here.
890///
891/// The standard forbids (at run time) casting a derived MP to a base
892/// MP when the derived MP does not point to a member of the base.
893/// This is why -1 is a reasonable choice for null data member
894/// pointers.
895llvm::Value *
896ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
897 const CastExpr *E,
898 llvm::Value *src) {
899 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
900 E->getCastKind() == CK_BaseToDerivedMemberPointer ||
901 E->getCastKind() == CK_ReinterpretMemberPointer);
902
903 // Under Itanium, reinterprets don't require any additional processing.
904 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
905
906 // Use constant emission if we can.
907 if (isa<llvm::Constant>(src))
908 return EmitMemberPointerConversion(E, cast<llvm::Constant>(src));
909
910 llvm::Constant *adj = getMemberPointerAdjustment(E);
911 if (!adj) return src;
912
913 CGBuilderTy &Builder = CGF.Builder;
914 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
915
916 const MemberPointerType *destTy =
917 E->getType()->castAs<MemberPointerType>();
918
919 // For member data pointers, this is just a matter of adding the
920 // offset if the source is non-null.
921 if (destTy->isMemberDataPointer()) {
922 llvm::Value *dst;
923 if (isDerivedToBase)
924 dst = Builder.CreateNSWSub(src, adj, "adj");
925 else
926 dst = Builder.CreateNSWAdd(src, adj, "adj");
927
928 // Null check.
929 llvm::Value *null = llvm::Constant::getAllOnesValue(src->getType());
930 llvm::Value *isNull = Builder.CreateICmpEQ(src, null, "memptr.isnull");
931 return Builder.CreateSelect(isNull, src, dst);
932 }
933
934 // The this-adjustment is left-shifted by 1 on ARM.
935 if (UseARMMethodPtrABI) {
936 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
937 offset <<= 1;
938 adj = llvm::ConstantInt::get(adj->getType(), offset);
939 }
940
941 llvm::Value *srcAdj = Builder.CreateExtractValue(src, 1, "src.adj");
942 llvm::Value *dstAdj;
943 if (isDerivedToBase)
944 dstAdj = Builder.CreateNSWSub(srcAdj, adj, "adj");
945 else
946 dstAdj = Builder.CreateNSWAdd(srcAdj, adj, "adj");
947
948 return Builder.CreateInsertValue(src, dstAdj, 1);
949}
950
951llvm::Constant *
952ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E,
953 llvm::Constant *src) {
954 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
955 E->getCastKind() == CK_BaseToDerivedMemberPointer ||
956 E->getCastKind() == CK_ReinterpretMemberPointer);
957
958 // Under Itanium, reinterprets don't require any additional processing.
959 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
960
961 // If the adjustment is trivial, we don't need to do anything.
962 llvm::Constant *adj = getMemberPointerAdjustment(E);
963 if (!adj) return src;
964
965 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
966
967 const MemberPointerType *destTy =
968 E->getType()->castAs<MemberPointerType>();
969
970 // For member data pointers, this is just a matter of adding the
971 // offset if the source is non-null.
972 if (destTy->isMemberDataPointer()) {
973 // null maps to null.
974 if (src->isAllOnesValue()) return src;
975
976 if (isDerivedToBase)
977 return llvm::ConstantExpr::getNSWSub(src, adj);
978 else
979 return llvm::ConstantExpr::getNSWAdd(src, adj);
980 }
981
982 // The this-adjustment is left-shifted by 1 on ARM.
983 if (UseARMMethodPtrABI) {
984 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
985 offset <<= 1;
986 adj = llvm::ConstantInt::get(adj->getType(), offset);
987 }
988
989 llvm::Constant *srcAdj = llvm::ConstantExpr::getExtractValue(src, 1);
990 llvm::Constant *dstAdj;
991 if (isDerivedToBase)
992 dstAdj = llvm::ConstantExpr::getNSWSub(srcAdj, adj);
993 else
994 dstAdj = llvm::ConstantExpr::getNSWAdd(srcAdj, adj);
995
996 return llvm::ConstantExpr::getInsertValue(src, dstAdj, 1);
997}
998
999llvm::Constant *
1000ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
1001 // Itanium C++ ABI 2.3:
1002 // A NULL pointer is represented as -1.
1003 if (MPT->isMemberDataPointer())
1004 return llvm::ConstantInt::get(CGM.PtrDiffTy, -1ULL, /*isSigned=*/true);
1005
1006 llvm::Constant *Zero = llvm::ConstantInt::get(CGM.PtrDiffTy, 0);
1007 llvm::Constant *Values[2] = { Zero, Zero };
1008 return llvm::ConstantStruct::getAnon(Values);
1009}
1010
1011llvm::Constant *
1012ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
1013 CharUnits offset) {
1014 // Itanium C++ ABI 2.3:
1015 // A pointer to data member is an offset from the base address of
1016 // the class object containing it, represented as a ptrdiff_t
1017 return llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity());
1018}
1019
1020llvm::Constant *
1021ItaniumCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
1022 return BuildMemberPointer(MD, CharUnits::Zero());
1023}
1024
1025llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD,
1026 CharUnits ThisAdjustment) {
1027 assert(MD->isInstance() && "Member function must not be static!");
1028
1029 CodeGenTypes &Types = CGM.getTypes();
1030
1031 // Get the function pointer (or index if this is a virtual function).
1032 llvm::Constant *MemPtr[2];
1033 if (MD->isVirtual()) {
1034 uint64_t Index = CGM.getItaniumVTableContext().getMethodVTableIndex(MD);
1035 uint64_t VTableOffset;
1036 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1037 // Multiply by 4-byte relative offsets.
1038 VTableOffset = Index * 4;
1039 } else {
1040 const ASTContext &Context = getContext();
1041 CharUnits PointerWidth = Context.toCharUnitsFromBits(
1042 Context.getTargetInfo().getPointerWidth(0));
1043 VTableOffset = Index * PointerWidth.getQuantity();
1044 }
1045
1046 if (UseARMMethodPtrABI) {
1047 // ARM C++ ABI 3.2.1:
1048 // This ABI specifies that adj contains twice the this
1049 // adjustment, plus 1 if the member function is virtual. The
1050 // least significant bit of adj then makes exactly the same
1051 // discrimination as the least significant bit of ptr does for
1052 // Itanium.
1053 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset);
1054 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1055 2 * ThisAdjustment.getQuantity() + 1);
1056 } else {
1057 // Itanium C++ ABI 2.3:
1058 // For a virtual function, [the pointer field] is 1 plus the
1059 // virtual table offset (in bytes) of the function,
1060 // represented as a ptrdiff_t.
1061 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset + 1);
1062 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1063 ThisAdjustment.getQuantity());
1064 }
1065 } else {
1066 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
1067 llvm::Type *Ty;
1068 // Check whether the function has a computable LLVM signature.
1069 if (Types.isFuncTypeConvertible(FPT)) {
1070 // The function has a computable LLVM signature; use the correct type.
1071 Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD));
1072 } else {
1073 // Use an arbitrary non-function type to tell GetAddrOfFunction that the
1074 // function type is incomplete.
1075 Ty = CGM.PtrDiffTy;
1076 }
1077 llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty);
1078
1079 MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, CGM.PtrDiffTy);
1080 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1081 (UseARMMethodPtrABI ? 2 : 1) *
1082 ThisAdjustment.getQuantity());
1083 }
1084
1085 return llvm::ConstantStruct::getAnon(MemPtr);
1086}
1087
1088llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP,
1089 QualType MPType) {
1090 const MemberPointerType *MPT = MPType->castAs<MemberPointerType>();
1091 const ValueDecl *MPD = MP.getMemberPointerDecl();
1092 if (!MPD)
1093 return EmitNullMemberPointer(MPT);
1094
1095 CharUnits ThisAdjustment = getContext().getMemberPointerPathAdjustment(MP);
1096
1097 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD))
1098 return BuildMemberPointer(MD, ThisAdjustment);
1099
1100 CharUnits FieldOffset =
1101 getContext().toCharUnitsFromBits(getContext().getFieldOffset(MPD));
1102 return EmitMemberDataPointer(MPT, ThisAdjustment + FieldOffset);
1103}
1104
1105/// The comparison algorithm is pretty easy: the member pointers are
1106/// the same if they're either bitwise identical *or* both null.
1107///
1108/// ARM is different here only because null-ness is more complicated.
1109llvm::Value *
1110ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
1111 llvm::Value *L,
1112 llvm::Value *R,
1113 const MemberPointerType *MPT,
1114 bool Inequality) {
1115 CGBuilderTy &Builder = CGF.Builder;
1116
1117 llvm::ICmpInst::Predicate Eq;
1118 llvm::Instruction::BinaryOps And, Or;
1119 if (Inequality) {
1120 Eq = llvm::ICmpInst::ICMP_NE;
1121 And = llvm::Instruction::Or;
1122 Or = llvm::Instruction::And;
1123 } else {
1124 Eq = llvm::ICmpInst::ICMP_EQ;
1125 And = llvm::Instruction::And;
1126 Or = llvm::Instruction::Or;
1127 }
1128
1129 // Member data pointers are easy because there's a unique null
1130 // value, so it just comes down to bitwise equality.
1131 if (MPT->isMemberDataPointer())
1132 return Builder.CreateICmp(Eq, L, R);
1133
1134 // For member function pointers, the tautologies are more complex.
1135 // The Itanium tautology is:
1136 // (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj))
1137 // The ARM tautology is:
1138 // (L == R) <==> (L.ptr == R.ptr &&
1139 // (L.adj == R.adj ||
1140 // (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0)))
1141 // The inequality tautologies have exactly the same structure, except
1142 // applying De Morgan's laws.
1143
1144 llvm::Value *LPtr = Builder.CreateExtractValue(L, 0, "lhs.memptr.ptr");
1145 llvm::Value *RPtr = Builder.CreateExtractValue(R, 0, "rhs.memptr.ptr");
1146
1147 // This condition tests whether L.ptr == R.ptr. This must always be
1148 // true for equality to hold.
1149 llvm::Value *PtrEq = Builder.CreateICmp(Eq, LPtr, RPtr, "cmp.ptr");
1150
1151 // This condition, together with the assumption that L.ptr == R.ptr,
1152 // tests whether the pointers are both null. ARM imposes an extra
1153 // condition.
1154 llvm::Value *Zero = llvm::Constant::getNullValue(LPtr->getType());
1155 llvm::Value *EqZero = Builder.CreateICmp(Eq, LPtr, Zero, "cmp.ptr.null");
1156
1157 // This condition tests whether L.adj == R.adj. If this isn't
1158 // true, the pointers are unequal unless they're both null.
1159 llvm::Value *LAdj = Builder.CreateExtractValue(L, 1, "lhs.memptr.adj");
1160 llvm::Value *RAdj = Builder.CreateExtractValue(R, 1, "rhs.memptr.adj");
1161 llvm::Value *AdjEq = Builder.CreateICmp(Eq, LAdj, RAdj, "cmp.adj");
1162
1163 // Null member function pointers on ARM clear the low bit of Adj,
1164 // so the zero condition has to check that neither low bit is set.
1165 if (UseARMMethodPtrABI) {
1166 llvm::Value *One = llvm::ConstantInt::get(LPtr->getType(), 1);
1167
1168 // Compute (l.adj | r.adj) & 1 and test it against zero.
1169 llvm::Value *OrAdj = Builder.CreateOr(LAdj, RAdj, "or.adj");
1170 llvm::Value *OrAdjAnd1 = Builder.CreateAnd(OrAdj, One);
1171 llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(Eq, OrAdjAnd1, Zero,
1172 "cmp.or.adj");
1173 EqZero = Builder.CreateBinOp(And, EqZero, OrAdjAnd1EqZero);
1174 }
1175
1176 // Tie together all our conditions.
1177 llvm::Value *Result = Builder.CreateBinOp(Or, EqZero, AdjEq);
1178 Result = Builder.CreateBinOp(And, PtrEq, Result,
1179 Inequality ? "memptr.ne" : "memptr.eq");
1180 return Result;
1181}
1182
1183llvm::Value *
1184ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
1185 llvm::Value *MemPtr,
1186 const MemberPointerType *MPT) {
1187 CGBuilderTy &Builder = CGF.Builder;
1188
1189 /// For member data pointers, this is just a check against -1.
1190 if (MPT->isMemberDataPointer()) {
1191 assert(MemPtr->getType() == CGM.PtrDiffTy);
1192 llvm::Value *NegativeOne =
1193 llvm::Constant::getAllOnesValue(MemPtr->getType());
1194 return Builder.CreateICmpNE(MemPtr, NegativeOne, "memptr.tobool");
1195 }
1196
1197 // In Itanium, a member function pointer is not null if 'ptr' is not null.
1198 llvm::Value *Ptr = Builder.CreateExtractValue(MemPtr, 0, "memptr.ptr");
1199
1200 llvm::Constant *Zero = llvm::ConstantInt::get(Ptr->getType(), 0);
1201 llvm::Value *Result = Builder.CreateICmpNE(Ptr, Zero, "memptr.tobool");
1202
1203 // On ARM, a member function pointer is also non-null if the low bit of 'adj'
1204 // (the virtual bit) is set.
1205 if (UseARMMethodPtrABI) {
1206 llvm::Constant *One = llvm::ConstantInt::get(Ptr->getType(), 1);
1207 llvm::Value *Adj = Builder.CreateExtractValue(MemPtr, 1, "memptr.adj");
1208 llvm::Value *VirtualBit = Builder.CreateAnd(Adj, One, "memptr.virtualbit");
1209 llvm::Value *IsVirtual = Builder.CreateICmpNE(VirtualBit, Zero,
1210 "memptr.isvirtual");
1211 Result = Builder.CreateOr(Result, IsVirtual);
1212 }
1213
1214 return Result;
1215}
1216
1217bool ItaniumCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
1218 const CXXRecordDecl *RD = FI.getReturnType()->getAsCXXRecordDecl();
1219 if (!RD)
1220 return false;
1221
1222 // If C++ prohibits us from making a copy, return by address.
1223 if (!RD->canPassInRegisters()) {
1224 auto Align = CGM.getContext().getTypeAlignInChars(FI.getReturnType());
1225 FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
1226 return true;
1227 }
1228 return false;
1229}
1230
1231/// The Itanium ABI requires non-zero initialization only for data
1232/// member pointers, for which '0' is a valid offset.
1233bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
1234 return MPT->isMemberFunctionPointer();
1235}
1236
1237/// The Itanium ABI always places an offset to the complete object
1238/// at entry -2 in the vtable.
1239void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
1240 const CXXDeleteExpr *DE,
1241 Address Ptr,
1242 QualType ElementType,
1243 const CXXDestructorDecl *Dtor) {
1244 bool UseGlobalDelete = DE->isGlobalDelete();
1245 if (UseGlobalDelete) {
1246 // Derive the complete-object pointer, which is what we need
1247 // to pass to the deallocation function.
1248
1249 // Grab the vtable pointer as an intptr_t*.
1250 auto *ClassDecl =
1251 cast<CXXRecordDecl>(ElementType->castAs<RecordType>()->getDecl());
1252 llvm::Value *VTable =
1253 CGF.GetVTablePtr(Ptr, CGF.IntPtrTy->getPointerTo(), ClassDecl);
1254
1255 // Track back to entry -2 and pull out the offset there.
1256 llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
1257 VTable, -2, "complete-offset.ptr");
1258 llvm::Value *Offset = CGF.Builder.CreateAlignedLoad(CGF.IntPtrTy, OffsetPtr, CGF.getPointerAlign());
1259
1260 // Apply the offset.
1261 llvm::Value *CompletePtr =
1262 CGF.Builder.CreateBitCast(Ptr.getPointer(), CGF.Int8PtrTy);
1263 CompletePtr =
1264 CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, CompletePtr, Offset);
1265
1266 // If we're supposed to call the global delete, make sure we do so
1267 // even if the destructor throws.
1268 CGF.pushCallObjectDeleteCleanup(DE->getOperatorDelete(), CompletePtr,
1269 ElementType);
1270 }
1271
1272 // FIXME: Provide a source location here even though there's no
1273 // CXXMemberCallExpr for dtor call.
1274 CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting;
1275 EmitVirtualDestructorCall(CGF, Dtor, DtorType, Ptr, DE);
1276
1277 if (UseGlobalDelete)
1278 CGF.PopCleanupBlock();
1279}
1280
1281void ItaniumCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
1282 // void __cxa_rethrow();
1283
1284 llvm::FunctionType *FTy =
1285 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
1286
1287 llvm::FunctionCallee Fn = CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
1288
1289 if (isNoReturn)
1290 CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, None);
1291 else
1292 CGF.EmitRuntimeCallOrInvoke(Fn);
1293}
1294
1295static llvm::FunctionCallee getAllocateExceptionFn(CodeGenModule &CGM) {
1296 // void *__cxa_allocate_exception(size_t thrown_size);
1297
1298 llvm::FunctionType *FTy =
1299 llvm::FunctionType::get(CGM.Int8PtrTy, CGM.SizeTy, /*isVarArg=*/false);
1300
1301 return CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception");
1302}
1303
1304static llvm::FunctionCallee getThrowFn(CodeGenModule &CGM) {
1305 // void __cxa_throw(void *thrown_exception, std::type_info *tinfo,
1306 // void (*dest) (void *));
1307
1308 llvm::Type *Args[3] = { CGM.Int8PtrTy, CGM.Int8PtrTy, CGM.Int8PtrTy };
1309 llvm::FunctionType *FTy =
1310 llvm::FunctionType::get(CGM.VoidTy, Args, /*isVarArg=*/false);
1311
1312 return CGM.CreateRuntimeFunction(FTy, "__cxa_throw");
1313}
1314
1315void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
1316 QualType ThrowType = E->getSubExpr()->getType();
1317 // Now allocate the exception object.
1318 llvm::Type *SizeTy = CGF.ConvertType(getContext().getSizeType());
1319 uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity();
1320
1321 llvm::FunctionCallee AllocExceptionFn = getAllocateExceptionFn(CGM);
1322 llvm::CallInst *ExceptionPtr = CGF.EmitNounwindRuntimeCall(
1323 AllocExceptionFn, llvm::ConstantInt::get(SizeTy, TypeSize), "exception");
1324
1325 CharUnits ExnAlign = CGF.getContext().getExnObjectAlignment();
1326 CGF.EmitAnyExprToExn(E->getSubExpr(), Address(ExceptionPtr, ExnAlign));
1327
1328 // Now throw the exception.
1329 llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType,
1330 /*ForEH=*/true);
1331
1332 // The address of the destructor. If the exception type has a
1333 // trivial destructor (or isn't a record), we just pass null.
1334 llvm::Constant *Dtor = nullptr;
1335 if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) {
1336 CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
1337 if (!Record->hasTrivialDestructor()) {
1338 CXXDestructorDecl *DtorD = Record->getDestructor();
1339 Dtor = CGM.getAddrOfCXXStructor(GlobalDecl(DtorD, Dtor_Complete));
1340 Dtor = llvm::ConstantExpr::getBitCast(Dtor, CGM.Int8PtrTy);
1341 }
1342 }
1343 if (!Dtor) Dtor = llvm::Constant::getNullValue(CGM.Int8PtrTy);
1344
1345 llvm::Value *args[] = { ExceptionPtr, TypeInfo, Dtor };
1346 CGF.EmitNoreturnRuntimeCallOrInvoke(getThrowFn(CGM), args);
1347}
1348
1349static llvm::FunctionCallee getItaniumDynamicCastFn(CodeGenFunction &CGF) {
1350 // void *__dynamic_cast(const void *sub,
1351 // const abi::__class_type_info *src,
1352 // const abi::__class_type_info *dst,
1353 // std::ptrdiff_t src2dst_offset);
1354
1355 llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
1356 llvm::Type *PtrDiffTy =
1357 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1358
1359 llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
1360
1361 llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false);
1362
1363 // Mark the function as nounwind readonly.
1364 llvm::Attribute::AttrKind FuncAttrs[] = { llvm::Attribute::NoUnwind,
1365 llvm::Attribute::ReadOnly };
1366 llvm::AttributeList Attrs = llvm::AttributeList::get(
1367 CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, FuncAttrs);
1368
1369 return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast", Attrs);
1370}
1371
1372static llvm::FunctionCallee getBadCastFn(CodeGenFunction &CGF) {
1373 // void __cxa_bad_cast();
1374 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1375 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
1376}
1377
1378/// Compute the src2dst_offset hint as described in the
1379/// Itanium C++ ABI [2.9.7]
1380static CharUnits computeOffsetHint(ASTContext &Context,
1381 const CXXRecordDecl *Src,
1382 const CXXRecordDecl *Dst) {
1383 CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
1384 /*DetectVirtual=*/false);
1385
1386 // If Dst is not derived from Src we can skip the whole computation below and
1387 // return that Src is not a public base of Dst. Record all inheritance paths.
1388 if (!Dst->isDerivedFrom(Src, Paths))
1389 return CharUnits::fromQuantity(-2ULL);
1390
1391 unsigned NumPublicPaths = 0;
1392 CharUnits Offset;
1393
1394 // Now walk all possible inheritance paths.
1395 for (const CXXBasePath &Path : Paths) {
1396 if (Path.Access != AS_public) // Ignore non-public inheritance.
1397 continue;
1398
1399 ++NumPublicPaths;
1400
1401 for (const CXXBasePathElement &PathElement : Path) {
1402 // If the path contains a virtual base class we can't give any hint.
1403 // -1: no hint.
1404 if (PathElement.Base->isVirtual())
1405 return CharUnits::fromQuantity(-1ULL);
1406
1407 if (NumPublicPaths > 1) // Won't use offsets, skip computation.
1408 continue;
1409
1410 // Accumulate the base class offsets.
1411 const ASTRecordLayout &L = Context.getASTRecordLayout(PathElement.Class);
1412 Offset += L.getBaseClassOffset(
1413 PathElement.Base->getType()->getAsCXXRecordDecl());
1414 }
1415 }
1416
1417 // -2: Src is not a public base of Dst.
1418 if (NumPublicPaths == 0)
1419 return CharUnits::fromQuantity(-2ULL);
1420
1421 // -3: Src is a multiple public base type but never a virtual base type.
1422 if (NumPublicPaths > 1)
1423 return CharUnits::fromQuantity(-3ULL);
1424
1425 // Otherwise, the Src type is a unique public nonvirtual base type of Dst.
1426 // Return the offset of Src from the origin of Dst.
1427 return Offset;
1428}
1429
1430static llvm::FunctionCallee getBadTypeidFn(CodeGenFunction &CGF) {
1431 // void __cxa_bad_typeid();
1432 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1433
1434 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1435}
1436
1437bool ItaniumCXXABI::shouldTypeidBeNullChecked(bool IsDeref,
1438 QualType SrcRecordTy) {
1439 return IsDeref;
1440}
1441
1442void ItaniumCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) {
1443 llvm::FunctionCallee Fn = getBadTypeidFn(CGF);
1444 llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn);
1445 Call->setDoesNotReturn();
1446 CGF.Builder.CreateUnreachable();
1447}
1448
1449llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF,
1450 QualType SrcRecordTy,
1451 Address ThisPtr,
1452 llvm::Type *StdTypeInfoPtrTy) {
1453 auto *ClassDecl =
1454 cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
1455 llvm::Value *Value =
1456 CGF.GetVTablePtr(ThisPtr, StdTypeInfoPtrTy->getPointerTo(), ClassDecl);
1457
1458 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1459 // Load the type info.
1460 Value = CGF.Builder.CreateBitCast(Value, CGM.Int8PtrTy);
1461 Value = CGF.Builder.CreateCall(
1462 CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}),
1463 {Value, llvm::ConstantInt::get(CGM.Int32Ty, -4)});
1464
1465 // Setup to dereference again since this is a proxy we accessed.
1466 Value = CGF.Builder.CreateBitCast(Value, StdTypeInfoPtrTy->getPointerTo());
1467 } else {
1468 // Load the type info.
1469 Value = CGF.Builder.CreateConstInBoundsGEP1_64(Value, -1ULL);
1470 }
1471 return CGF.Builder.CreateAlignedLoad(StdTypeInfoPtrTy, Value,
1472 CGF.getPointerAlign());
1473}
1474
1475bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
1476 QualType SrcRecordTy) {
1477 return SrcIsPtr;
1478}
1479
1480llvm::Value *ItaniumCXXABI::EmitDynamicCastCall(
1481 CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
1482 QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) {
1483 llvm::Type *PtrDiffLTy =
1484 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1485 llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1486
1487 llvm::Value *SrcRTTI =
1488 CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
1489 llvm::Value *DestRTTI =
1490 CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
1491
1492 // Compute the offset hint.
1493 const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
1494 const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
1495 llvm::Value *OffsetHint = llvm::ConstantInt::get(
1496 PtrDiffLTy,
1497 computeOffsetHint(CGF.getContext(), SrcDecl, DestDecl).getQuantity());
1498
1499 // Emit the call to __dynamic_cast.
1500 llvm::Value *Value = ThisAddr.getPointer();
1501 Value = CGF.EmitCastToVoidPtr(Value);
1502
1503 llvm::Value *args[] = {Value, SrcRTTI, DestRTTI, OffsetHint};
1504 Value = CGF.EmitNounwindRuntimeCall(getItaniumDynamicCastFn(CGF), args);
1505 Value = CGF.Builder.CreateBitCast(Value, DestLTy);
1506
1507 /// C++ [expr.dynamic.cast]p9:
1508 /// A failed cast to reference type throws std::bad_cast
1509 if (DestTy->isReferenceType()) {
1510 llvm::BasicBlock *BadCastBlock =
1511 CGF.createBasicBlock("dynamic_cast.bad_cast");
1512
1513 llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
1514 CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
1515
1516 CGF.EmitBlock(BadCastBlock);
1517 EmitBadCastCall(CGF);
1518 }
1519
1520 return Value;
1521}
1522
1523llvm::Value *ItaniumCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF,
1524 Address ThisAddr,
1525 QualType SrcRecordTy,
1526 QualType DestTy) {
1527 llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1528 auto *ClassDecl =
1529 cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
1530 llvm::Value *OffsetToTop;
1531 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1532 // Get the vtable pointer.
1533 llvm::Value *VTable =
1534 CGF.GetVTablePtr(ThisAddr, CGM.Int32Ty->getPointerTo(), ClassDecl);
1535
1536 // Get the offset-to-top from the vtable.
1537 OffsetToTop =
1538 CGF.Builder.CreateConstInBoundsGEP1_32(/*Type=*/nullptr, VTable, -2U);
1539 OffsetToTop = CGF.Builder.CreateAlignedLoad(
1540 CGM.Int32Ty, OffsetToTop, CharUnits::fromQuantity(4), "offset.to.top");
1541 } else {
1542 llvm::Type *PtrDiffLTy =
1543 CGF.ConvertType(CGF.getContext().getPointerDiffType());
1544
1545 // Get the vtable pointer.
1546 llvm::Value *VTable =
1547 CGF.GetVTablePtr(ThisAddr, PtrDiffLTy->getPointerTo(), ClassDecl);
1548
1549 // Get the offset-to-top from the vtable.
1550 OffsetToTop = CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL);
1551 OffsetToTop = CGF.Builder.CreateAlignedLoad(
1552 PtrDiffLTy, OffsetToTop, CGF.getPointerAlign(), "offset.to.top");
1553 }
1554 // Finally, add the offset to the pointer.
1555 llvm::Value *Value = ThisAddr.getPointer();
1556 Value = CGF.EmitCastToVoidPtr(Value);
1557 Value = CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, Value, OffsetToTop);
1558 return CGF.Builder.CreateBitCast(Value, DestLTy);
1559}
1560
1561bool ItaniumCXXABI::EmitBadCastCall(CodeGenFunction &CGF) {
1562 llvm::FunctionCallee Fn = getBadCastFn(CGF);
1563 llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn);
1564 Call->setDoesNotReturn();
1565 CGF.Builder.CreateUnreachable();
1566 return true;
1567}
1568
1569llvm::Value *
1570ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF,
1571 Address This,
1572 const CXXRecordDecl *ClassDecl,
1573 const CXXRecordDecl *BaseClassDecl) {
1574 llvm::Value *VTablePtr = CGF.GetVTablePtr(This, CGM.Int8PtrTy, ClassDecl);
1575 CharUnits VBaseOffsetOffset =
1576 CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(ClassDecl,
1577 BaseClassDecl);
1578 llvm::Value *VBaseOffsetPtr =
1579 CGF.Builder.CreateConstGEP1_64(VTablePtr, VBaseOffsetOffset.getQuantity(),
1580 "vbase.offset.ptr");
1581
1582 llvm::Value *VBaseOffset;
1583 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1584 VBaseOffsetPtr =
1585 CGF.Builder.CreateBitCast(VBaseOffsetPtr, CGF.Int32Ty->getPointerTo());
1586 VBaseOffset = CGF.Builder.CreateAlignedLoad(
1587 CGF.Int32Ty, VBaseOffsetPtr, CharUnits::fromQuantity(4),
1588 "vbase.offset");
1589 } else {
1590 VBaseOffsetPtr = CGF.Builder.CreateBitCast(VBaseOffsetPtr,
1591 CGM.PtrDiffTy->getPointerTo());
1592 VBaseOffset = CGF.Builder.CreateAlignedLoad(
1593 CGM.PtrDiffTy, VBaseOffsetPtr, CGF.getPointerAlign(), "vbase.offset");
1594 }
1595 return VBaseOffset;
1596}
1597
1598void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
1599 // Just make sure we're in sync with TargetCXXABI.
1600 assert(CGM.getTarget().getCXXABI().hasConstructorVariants());
1601
1602 // The constructor used for constructing this as a base class;
1603 // ignores virtual bases.
1604 CGM.EmitGlobal(GlobalDecl(D, Ctor_Base));
1605
1606 // The constructor used for constructing this as a complete class;
1607 // constructs the virtual bases, then calls the base constructor.
1608 if (!D->getParent()->isAbstract()) {
1609 // We don't need to emit the complete ctor if the class is abstract.
1610 CGM.EmitGlobal(GlobalDecl(D, Ctor_Complete));
1611 }
1612}
1613
1614CGCXXABI::AddedStructorArgCounts
1615ItaniumCXXABI::buildStructorSignature(GlobalDecl GD,
1616 SmallVectorImpl<CanQualType> &ArgTys) {
1617 ASTContext &Context = getContext();
1618
1619 // All parameters are already in place except VTT, which goes after 'this'.
1620 // These are Clang types, so we don't need to worry about sret yet.
1621
1622 // Check if we need to add a VTT parameter (which has type void **).
1623 if ((isa<CXXConstructorDecl>(GD.getDecl()) ? GD.getCtorType() == Ctor_Base
1624 : GD.getDtorType() == Dtor_Base) &&
1625 cast<CXXMethodDecl>(GD.getDecl())->getParent()->getNumVBases() != 0) {
1626 ArgTys.insert(ArgTys.begin() + 1,
1627 Context.getPointerType(Context.VoidPtrTy));
1628 return AddedStructorArgCounts::prefix(1);
1629 }
1630 return AddedStructorArgCounts{};
1631}
1632
1633void ItaniumCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) {
1634 // The destructor used for destructing this as a base class; ignores
1635 // virtual bases.
1636 CGM.EmitGlobal(GlobalDecl(D, Dtor_Base));
1637
1638 // The destructor used for destructing this as a most-derived class;
1639 // call the base destructor and then destructs any virtual bases.
1640 CGM.EmitGlobal(GlobalDecl(D, Dtor_Complete));
1641
1642 // The destructor in a virtual table is always a 'deleting'
1643 // destructor, which calls the complete destructor and then uses the
1644 // appropriate operator delete.
1645 if (D->isVirtual())
1646 CGM.EmitGlobal(GlobalDecl(D, Dtor_Deleting));
1647}
1648
1649void ItaniumCXXABI::addImplicitStructorParams(CodeGenFunction &CGF,
1650 QualType &ResTy,
1651 FunctionArgList &Params) {
1652 const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
1653 assert(isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD));
1654
1655 // Check if we need a VTT parameter as well.
1656 if (NeedsVTTParameter(CGF.CurGD)) {
1657 ASTContext &Context = getContext();
1658
1659 // FIXME: avoid the fake decl
1660 QualType T = Context.getPointerType(Context.VoidPtrTy);
1661 auto *VTTDecl = ImplicitParamDecl::Create(
1662 Context, /*DC=*/nullptr, MD->getLocation(), &Context.Idents.get("vtt"),
1663 T, ImplicitParamDecl::CXXVTT);
1664 Params.insert(Params.begin() + 1, VTTDecl);
1665 getStructorImplicitParamDecl(CGF) = VTTDecl;
1666 }
1667}
1668
1669void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
1670 // Naked functions have no prolog.
1671 if (CGF.CurFuncDecl && CGF.CurFuncDecl->hasAttr<NakedAttr>())
1672 return;
1673
1674 /// Initialize the 'this' slot. In the Itanium C++ ABI, no prologue
1675 /// adjustments are required, because they are all handled by thunks.
1676 setCXXABIThisValue(CGF, loadIncomingCXXThis(CGF));
1677
1678 /// Initialize the 'vtt' slot if needed.
1679 if (getStructorImplicitParamDecl(CGF)) {
1680 getStructorImplicitParamValue(CGF) = CGF.Builder.CreateLoad(
1681 CGF.GetAddrOfLocalVar(getStructorImplicitParamDecl(CGF)), "vtt");
1682 }
1683
1684 /// If this is a function that the ABI specifies returns 'this', initialize
1685 /// the return slot to 'this' at the start of the function.
1686 ///
1687 /// Unlike the setting of return types, this is done within the ABI
1688 /// implementation instead of by clients of CGCXXABI because:
1689 /// 1) getThisValue is currently protected
1690 /// 2) in theory, an ABI could implement 'this' returns some other way;
1691 /// HasThisReturn only specifies a contract, not the implementation
1692 if (HasThisReturn(CGF.CurGD))
1693 CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue);
1694}
1695
1696CGCXXABI::AddedStructorArgs ItaniumCXXABI::getImplicitConstructorArgs(
1697 CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type,
1698 bool ForVirtualBase, bool Delegating) {
1699 if (!NeedsVTTParameter(GlobalDecl(D, Type)))
1700 return AddedStructorArgs{};
1701
1702 // Insert the implicit 'vtt' argument as the second argument.
1703 llvm::Value *VTT =
1704 CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, Delegating);
1705 QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1706 return AddedStructorArgs::prefix({{VTT, VTTTy}});
1707}
1708
1709llvm::Value *ItaniumCXXABI::getCXXDestructorImplicitParam(
1710 CodeGenFunction &CGF, const CXXDestructorDecl *DD, CXXDtorType Type,
1711 bool ForVirtualBase, bool Delegating) {
1712 GlobalDecl GD(DD, Type);
1713 return CGF.GetVTTParameter(GD, ForVirtualBase, Delegating);
1714}
1715
1716void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
1717 const CXXDestructorDecl *DD,
1718 CXXDtorType Type, bool ForVirtualBase,
1719 bool Delegating, Address This,
1720 QualType ThisTy) {
1721 GlobalDecl GD(DD, Type);
1722 llvm::Value *VTT =
1723 getCXXDestructorImplicitParam(CGF, DD, Type, ForVirtualBase, Delegating);
1724 QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1725
1726 CGCallee Callee;
1727 if (getContext().getLangOpts().AppleKext &&
1728 Type != Dtor_Base && DD->isVirtual())
1729 Callee = CGF.BuildAppleKextVirtualDestructorCall(DD, Type, DD->getParent());
1730 else
1731 Callee = CGCallee::forDirect(CGM.getAddrOfCXXStructor(GD), GD);
1732
1733 CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, VTT, VTTTy,
1734 nullptr);
1735}
1736
1737void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
1738 const CXXRecordDecl *RD) {
1739 llvm::GlobalVariable *VTable = getAddrOfVTable(RD, CharUnits());
1740 if (VTable->hasInitializer())
1741 return;
1742
1743 ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext();
1744 const VTableLayout &VTLayout = VTContext.getVTableLayout(RD);
1745 llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD);
1746 llvm::Constant *RTTI =
1747 CGM.GetAddrOfRTTIDescriptor(CGM.getContext().getTagDeclType(RD));
1748
1749 // Create and set the initializer.
1750 ConstantInitBuilder builder(CGM);
1751 auto components = builder.beginStruct();
1752 CGVT.createVTableInitializer(components, VTLayout, RTTI,
1753 llvm::GlobalValue::isLocalLinkage(Linkage));
1754 components.finishAndSetAsInitializer(VTable);
1755
1756 // Set the correct linkage.
1757 VTable->setLinkage(Linkage);
1758
1759 if (CGM.supportsCOMDAT() && VTable->isWeakForLinker())
1760 VTable->setComdat(CGM.getModule().getOrInsertComdat(VTable->getName()));
1761
1762 // Set the right visibility.
1763 CGM.setGVProperties(VTable, RD);
1764
1765 // If this is the magic class __cxxabiv1::__fundamental_type_info,
1766 // we will emit the typeinfo for the fundamental types. This is the
1767 // same behaviour as GCC.
1768 const DeclContext *DC = RD->getDeclContext();
1769 if (RD->getIdentifier() &&
1770 RD->getIdentifier()->isStr("__fundamental_type_info") &&
1771 isa<NamespaceDecl>(DC) && cast<NamespaceDecl>(DC)->getIdentifier() &&
1772 cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") &&
1773 DC->getParent()->isTranslationUnit())
1774 EmitFundamentalRTTIDescriptors(RD);
1775
1776 // Always emit type metadata on non-available_externally definitions, and on
1777 // available_externally definitions if we are performing whole program
1778 // devirtualization. For WPD we need the type metadata on all vtable
1779 // definitions to ensure we associate derived classes with base classes
1780 // defined in headers but with a strong definition only in a shared library.
1781 if (!VTable->isDeclarationForLinker() ||
1782 CGM.getCodeGenOpts().WholeProgramVTables) {
1783 CGM.EmitVTableTypeMetadata(RD, VTable, VTLayout);
1784 // For available_externally definitions, add the vtable to
1785 // @llvm.compiler.used so that it isn't deleted before whole program
1786 // analysis.
1787 if (VTable->isDeclarationForLinker()) {
1788 assert(CGM.getCodeGenOpts().WholeProgramVTables);
1789 CGM.addCompilerUsedGlobal(VTable);
1790 }
1791 }
1792
1793 if (VTContext.isRelativeLayout() && !VTable->isDSOLocal())
1794 CGVT.GenerateRelativeVTableAlias(VTable, VTable->getName());
1795}
1796
1797bool ItaniumCXXABI::isVirtualOffsetNeededForVTableField(
1798 CodeGenFunction &CGF, CodeGenFunction::VPtr Vptr) {
1799 if (Vptr.NearestVBase == nullptr)
1800 return false;
1801 return NeedsVTTParameter(CGF.CurGD);
1802}
1803
1804llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructor(
1805 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1806 const CXXRecordDecl *NearestVBase) {
1807
1808 if ((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1809 NeedsVTTParameter(CGF.CurGD)) {
1810 return getVTableAddressPointInStructorWithVTT(CGF, VTableClass, Base,
1811 NearestVBase);
1812 }
1813 return getVTableAddressPoint(Base, VTableClass);
1814}
1815
1816llvm::Constant *
1817ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base,
1818 const CXXRecordDecl *VTableClass) {
1819 llvm::GlobalValue *VTable = getAddrOfVTable(VTableClass, CharUnits());
1820
1821 // Find the appropriate vtable within the vtable group, and the address point
1822 // within that vtable.
1823 VTableLayout::AddressPointLocation AddressPoint =
1824 CGM.getItaniumVTableContext()
1825 .getVTableLayout(VTableClass)
1826 .getAddressPoint(Base);
1827 llvm::Value *Indices[] = {
1828 llvm::ConstantInt::get(CGM.Int32Ty, 0),
1829 llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.VTableIndex),
1830 llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.AddressPointIndex),
1831 };
1832
1833 return llvm::ConstantExpr::getGetElementPtr(VTable->getValueType(), VTable,
1834 Indices, /*InBounds=*/true,
1835 /*InRangeIndex=*/1);
1836}
1837
1838// Check whether all the non-inline virtual methods for the class have the
1839// specified attribute.
1840template <typename T>
1841static bool CXXRecordAllNonInlineVirtualsHaveAttr(const CXXRecordDecl *RD) {
1842 bool FoundNonInlineVirtualMethodWithAttr = false;
1843 for (const auto *D : RD->noload_decls()) {
1844 if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
1845 if (!FD->isVirtualAsWritten() || FD->isInlineSpecified() ||
1846 FD->doesThisDeclarationHaveABody())
1847 continue;
1848 if (!D->hasAttr<T>())
1849 return false;
1850 FoundNonInlineVirtualMethodWithAttr = true;
1851 }
1852 }
1853
1854 // We didn't find any non-inline virtual methods missing the attribute. We
1855 // will return true when we found at least one non-inline virtual with the
1856 // attribute. (This lets our caller know that the attribute needs to be
1857 // propagated up to the vtable.)
1858 return FoundNonInlineVirtualMethodWithAttr;
1859}
1860
1861llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT(
1862 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1863 const CXXRecordDecl *NearestVBase) {
1864 assert((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1865 NeedsVTTParameter(CGF.CurGD) && "This class doesn't have VTT");
1866
1867 // Get the secondary vpointer index.
1868 uint64_t VirtualPointerIndex =
1869 CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base);
1870
1871 /// Load the VTT.
1872 llvm::Value *VTT = CGF.LoadCXXVTT();
1873 if (VirtualPointerIndex)
1874 VTT = CGF.Builder.CreateConstInBoundsGEP1_64(VTT, VirtualPointerIndex);
1875
1876 // And load the address point from the VTT.
1877 return CGF.Builder.CreateAlignedLoad(CGF.VoidPtrTy, VTT,
1878 CGF.getPointerAlign());
1879}
1880
1881llvm::Constant *ItaniumCXXABI::getVTableAddressPointForConstExpr(
1882 BaseSubobject Base, const CXXRecordDecl *VTableClass) {
1883 return getVTableAddressPoint(Base, VTableClass);
1884}
1885
1886llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
1887 CharUnits VPtrOffset) {
1888 assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets");
1889
1890 llvm::GlobalVariable *&VTable = VTables[RD];
1891 if (VTable)
1892 return VTable;
1893
1894 // Queue up this vtable for possible deferred emission.
1895 CGM.addDeferredVTable(RD);
1896
1897 SmallString<256> Name;
1898 llvm::raw_svector_ostream Out(Name);
1899 getMangleContext().mangleCXXVTable(RD, Out);
1900
1901 const VTableLayout &VTLayout =
1902 CGM.getItaniumVTableContext().getVTableLayout(RD);
1903 llvm::Type *VTableType = CGM.getVTables().getVTableType(VTLayout);
1904
1905 // Use pointer alignment for the vtable. Otherwise we would align them based
1906 // on the size of the initializer which doesn't make sense as only single
1907 // values are read.
1908 unsigned PAlign = CGM.getItaniumVTableContext().isRelativeLayout()
1909 ? 32
1910 : CGM.getTarget().getPointerAlign(0);
1911
1912 VTable = CGM.CreateOrReplaceCXXRuntimeVariable(
1913 Name, VTableType, llvm::GlobalValue::ExternalLinkage,
1914 getContext().toCharUnitsFromBits(PAlign).getQuantity());
1915 VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1916
1917 // In MS C++ if you have a class with virtual functions in which you are using
1918 // selective member import/export, then all virtual functions must be exported
1919 // unless they are inline, otherwise a link error will result. To match this
1920 // behavior, for such classes, we dllimport the vtable if it is defined
1921 // externally and all the non-inline virtual methods are marked dllimport, and
1922 // we dllexport the vtable if it is defined in this TU and all the non-inline
1923 // virtual methods are marked dllexport.
1924 if (CGM.getTarget().hasPS4DLLImportExport()) {
1925 if ((!RD->hasAttr<DLLImportAttr>()) && (!RD->hasAttr<DLLExportAttr>())) {
1926 if (CGM.getVTables().isVTableExternal(RD)) {
1927 if (CXXRecordAllNonInlineVirtualsHaveAttr<DLLImportAttr>(RD))
1928 VTable->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
1929 } else {
1930 if (CXXRecordAllNonInlineVirtualsHaveAttr<DLLExportAttr>(RD))
1931 VTable->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
1932 }
1933 }
1934 }
1935 CGM.setGVProperties(VTable, RD);
1936
1937 return VTable;
1938}
1939
1940CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
1941 GlobalDecl GD,
1942 Address This,
1943 llvm::Type *Ty,
1944 SourceLocation Loc) {
1945 auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl());
1946 llvm::Value *VTable = CGF.GetVTablePtr(
1947 This, Ty->getPointerTo()->getPointerTo(), MethodDecl->getParent());
1948
1949 uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
1950 llvm::Value *VFunc;
1951 if (CGF.ShouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) {
1952 VFunc = CGF.EmitVTableTypeCheckedLoad(
1953 MethodDecl->getParent(), VTable,
1954 VTableIndex * CGM.getContext().getTargetInfo().getPointerWidth(0) / 8);
1955 } else {
1956 CGF.EmitTypeMetadataCodeForVCall(MethodDecl->getParent(), VTable, Loc);
1957
1958 llvm::Value *VFuncLoad;
1959 if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1960 VTable = CGF.Builder.CreateBitCast(VTable, CGM.Int8PtrTy);
1961 llvm::Value *Load = CGF.Builder.CreateCall(
1962 CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}),
1963 {VTable, llvm::ConstantInt::get(CGM.Int32Ty, 4 * VTableIndex)});
1964 VFuncLoad = CGF.Builder.CreateBitCast(Load, Ty->getPointerTo());
1965 } else {
1966 VTable =
1967 CGF.Builder.CreateBitCast(VTable, Ty->getPointerTo()->getPointerTo());
1968 llvm::Value *VTableSlotPtr =
1969 CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfn");
1970 VFuncLoad =
1971 CGF.Builder.CreateAlignedLoad(Ty->getPointerTo(), VTableSlotPtr,
1972 CGF.getPointerAlign());
1973 }
1974
1975 // Add !invariant.load md to virtual function load to indicate that
1976 // function didn't change inside vtable.
1977 // It's safe to add it without -fstrict-vtable-pointers, but it would not
1978 // help in devirtualization because it will only matter if we will have 2
1979 // the same virtual function loads from the same vtable load, which won't
1980 // happen without enabled devirtualization with -fstrict-vtable-pointers.
1981 if (CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1982 CGM.getCodeGenOpts().StrictVTablePointers) {
1983 if (auto *VFuncLoadInstr = dyn_cast<llvm::Instruction>(VFuncLoad)) {
1984 VFuncLoadInstr->setMetadata(
1985 llvm::LLVMContext::MD_invariant_load,
1986 llvm::MDNode::get(CGM.getLLVMContext(),
1987 llvm::ArrayRef<llvm::Metadata *>()));
1988 }
1989 }
1990 VFunc = VFuncLoad;
1991 }
1992
1993 CGCallee Callee(GD, VFunc);
1994 return Callee;
1995}
1996
1997llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall(
1998 CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType,
1999 Address This, DeleteOrMemberCallExpr E) {
2000 auto *CE = E.dyn_cast<const CXXMemberCallExpr *>();
2001 auto *D = E.dyn_cast<const CXXDeleteExpr *>();
2002 assert((CE != nullptr) ^ (D != nullptr));
2003 assert(CE == nullptr || CE->arg_begin() == CE->arg_end());
2004 assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete);
2005
2006 GlobalDecl GD(Dtor, DtorType);
2007 const CGFunctionInfo *FInfo =
2008 &CGM.getTypes().arrangeCXXStructorDeclaration(GD);
2009 llvm::FunctionType *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo);
2010 CGCallee Callee = CGCallee::forVirtual(CE, GD, This, Ty);
2011
2012 QualType ThisTy;
2013 if (CE) {
2014 ThisTy = CE->getObjectType();
2015 } else {
2016 ThisTy = D->getDestroyedType();
2017 }
2018
2019 CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, nullptr,
2020 QualType(), nullptr);
2021 return nullptr;
2022}
2023
2024void ItaniumCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) {
2025 CodeGenVTables &VTables = CGM.getVTables();
2026 llvm::GlobalVariable *VTT = VTables.GetAddrOfVTT(RD);
2027 VTables.EmitVTTDefinition(VTT, CGM.getVTableLinkage(RD), RD);
2028}
2029
2030bool ItaniumCXXABI::canSpeculativelyEmitVTableAsBaseClass(
2031 const CXXRecordDecl *RD) const {
2032 // We don't emit available_externally vtables if we are in -fapple-kext mode
2033 // because kext mode does not permit devirtualization.
2034 if (CGM.getLangOpts().AppleKext)
2035 return false;
2036
2037 // If the vtable is hidden then it is not safe to emit an available_externally
2038 // copy of vtable.
2039 if (isVTableHidden(RD))
2040 return false;
2041
2042 if (CGM.getCodeGenOpts().ForceEmitVTables)
2043 return true;
2044
2045 // If we don't have any not emitted inline virtual function then we are safe
2046 // to emit an available_externally copy of vtable.
2047 // FIXME we can still emit a copy of the vtable if we
2048 // can emit definition of the inline functions.
2049 if (hasAnyUnusedVirtualInlineFunction(RD))
2050 return false;
2051
2052 // For a class with virtual bases, we must also be able to speculatively
2053 // emit the VTT, because CodeGen doesn't have separate notions of "can emit
2054 // the vtable" and "can emit the VTT". For a base subobject, this means we
2055 // need to be able to emit non-virtual base vtables.
2056 if (RD->getNumVBases()) {
2057 for (const auto &B : RD->bases()) {
2058 auto *BRD = B.getType()->getAsCXXRecordDecl();
2059 assert(BRD && "no class for base specifier");
2060 if (B.isVirtual() || !BRD->isDynamicClass())
2061 continue;
2062 if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
2063 return false;
2064 }
2065 }
2066
2067 return true;
2068}
2069
2070bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const {
2071 if (!canSpeculativelyEmitVTableAsBaseClass(RD))
2072 return false;
2073
2074 // For a complete-object vtable (or more specifically, for the VTT), we need
2075 // to be able to speculatively emit the vtables of all dynamic virtual bases.
2076 for (const auto &B : RD->vbases()) {
2077 auto *BRD = B.getType()->getAsCXXRecordDecl();
2078 assert(BRD && "no class for base specifier");
2079 if (!BRD->isDynamicClass())
2080 continue;
2081 if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
2082 return false;
2083 }
2084
2085 return true;
2086}
2087static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
2088 Address InitialPtr,
2089 int64_t NonVirtualAdjustment,
2090 int64_t VirtualAdjustment,
2091 bool IsReturnAdjustment) {
2092 if (!NonVirtualAdjustment && !VirtualAdjustment)
2093 return InitialPtr.getPointer();
2094
2095 Address V = CGF.Builder.CreateElementBitCast(InitialPtr, CGF.Int8Ty);
2096
2097 // In a base-to-derived cast, the non-virtual adjustment is applied first.
2098 if (NonVirtualAdjustment && !IsReturnAdjustment) {
2099 V = CGF.Builder.CreateConstInBoundsByteGEP(V,
2100 CharUnits::fromQuantity(NonVirtualAdjustment));
2101 }
2102
2103 // Perform the virtual adjustment if we have one.
2104 llvm::Value *ResultPtr;
2105 if (VirtualAdjustment) {
2106 Address VTablePtrPtr = CGF.Builder.CreateElementBitCast(V, CGF.Int8PtrTy);
2107 llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr);
2108
2109 llvm::Value *Offset;
2110 llvm::Value *OffsetPtr =
2111 CGF.Builder.CreateConstInBoundsGEP1_64(VTablePtr, VirtualAdjustment);
2112 if (CGF.CGM.getItaniumVTableContext().isRelativeLayout()) {
2113 // Load the adjustment offset from the vtable as a 32-bit int.
2114 OffsetPtr =
2115 CGF.