1 | //===--- CGRecordLayoutBuilder.cpp - CGRecordLayout builder ----*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // Builder implementation for CGRecordLayout objects. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "CGRecordLayout.h" |
14 | #include "CGCXXABI.h" |
15 | #include "CodeGenTypes.h" |
16 | #include "clang/AST/ASTContext.h" |
17 | #include "clang/AST/Attr.h" |
18 | #include "clang/AST/CXXInheritance.h" |
19 | #include "clang/AST/DeclCXX.h" |
20 | #include "clang/AST/Expr.h" |
21 | #include "clang/AST/RecordLayout.h" |
22 | #include "clang/Basic/CodeGenOptions.h" |
23 | #include "llvm/IR/DataLayout.h" |
24 | #include "llvm/IR/DerivedTypes.h" |
25 | #include "llvm/IR/Type.h" |
26 | #include "llvm/Support/Debug.h" |
27 | #include "llvm/Support/MathExtras.h" |
28 | #include "llvm/Support/raw_ostream.h" |
29 | using namespace clang; |
30 | using namespace CodeGen; |
31 | |
32 | namespace { |
33 | /// The CGRecordLowering is responsible for lowering an ASTRecordLayout to an |
34 | /// llvm::Type. Some of the lowering is straightforward, some is not. Here we |
35 | /// detail some of the complexities and weirdnesses here. |
36 | /// * LLVM does not have unions - Unions can, in theory be represented by any |
37 | /// llvm::Type with correct size. We choose a field via a specific heuristic |
38 | /// and add padding if necessary. |
39 | /// * LLVM does not have bitfields - Bitfields are collected into contiguous |
40 | /// runs and allocated as a single storage type for the run. ASTRecordLayout |
41 | /// contains enough information to determine where the runs break. Microsoft |
42 | /// and Itanium follow different rules and use different codepaths. |
43 | /// * It is desired that, when possible, bitfields use the appropriate iN type |
44 | /// when lowered to llvm types. For example unsigned x : 24 gets lowered to |
45 | /// i24. This isn't always possible because i24 has storage size of 32 bit |
46 | /// and if it is possible to use that extra byte of padding we must use |
47 | /// [i8 x 3] instead of i24. The function clipTailPadding does this. |
48 | /// C++ examples that require clipping: |
49 | /// struct { int a : 24; char b; }; // a must be clipped, b goes at offset 3 |
50 | /// struct A { int a : 24; }; // a must be clipped because a struct like B |
51 | // could exist: struct B : A { char b; }; // b goes at offset 3 |
52 | /// * Clang ignores 0 sized bitfields and 0 sized bases but *not* zero sized |
53 | /// fields. The existing asserts suggest that LLVM assumes that *every* field |
54 | /// has an underlying storage type. Therefore empty structures containing |
55 | /// zero sized subobjects such as empty records or zero sized arrays still get |
56 | /// a zero sized (empty struct) storage type. |
57 | /// * Clang reads the complete type rather than the base type when generating |
58 | /// code to access fields. Bitfields in tail position with tail padding may |
59 | /// be clipped in the base class but not the complete class (we may discover |
60 | /// that the tail padding is not used in the complete class.) However, |
61 | /// because LLVM reads from the complete type it can generate incorrect code |
62 | /// if we do not clip the tail padding off of the bitfield in the complete |
63 | /// layout. This introduces a somewhat awkward extra unnecessary clip stage. |
64 | /// The location of the clip is stored internally as a sentinel of type |
65 | /// SCISSOR. If LLVM were updated to read base types (which it probably |
66 | /// should because locations of things such as VBases are bogus in the llvm |
67 | /// type anyway) then we could eliminate the SCISSOR. |
68 | /// * Itanium allows nearly empty primary virtual bases. These bases don't get |
69 | /// get their own storage because they're laid out as part of another base |
70 | /// or at the beginning of the structure. Determining if a VBase actually |
71 | /// gets storage awkwardly involves a walk of all bases. |
72 | /// * VFPtrs and VBPtrs do *not* make a record NotZeroInitializable. |
73 | struct CGRecordLowering { |
74 | // MemberInfo is a helper structure that contains information about a record |
75 | // member. In additional to the standard member types, there exists a |
76 | // sentinel member type that ensures correct rounding. |
77 | struct MemberInfo { |
78 | CharUnits Offset; |
79 | enum InfoKind { VFPtr, VBPtr, Field, Base, VBase, Scissor } Kind; |
80 | llvm::Type *Data; |
81 | union { |
82 | const FieldDecl *FD; |
83 | const CXXRecordDecl *RD; |
84 | }; |
85 | MemberInfo(CharUnits Offset, InfoKind Kind, llvm::Type *Data, |
86 | const FieldDecl *FD = nullptr) |
87 | : Offset(Offset), Kind(Kind), Data(Data), FD(FD) {} |
88 | MemberInfo(CharUnits Offset, InfoKind Kind, llvm::Type *Data, |
89 | const CXXRecordDecl *RD) |
90 | : Offset(Offset), Kind(Kind), Data(Data), RD(RD) {} |
91 | // MemberInfos are sorted so we define a < operator. |
92 | bool operator <(const MemberInfo& a) const { return Offset < a.Offset; } |
93 | }; |
94 | // The constructor. |
95 | CGRecordLowering(CodeGenTypes &Types, const RecordDecl *D, bool Packed); |
96 | // Short helper routines. |
97 | /// Constructs a MemberInfo instance from an offset and llvm::Type *. |
98 | MemberInfo StorageInfo(CharUnits Offset, llvm::Type *Data) { |
99 | return MemberInfo(Offset, MemberInfo::Field, Data); |
100 | } |
101 | |
102 | /// The Microsoft bitfield layout rule allocates discrete storage |
103 | /// units of the field's formal type and only combines adjacent |
104 | /// fields of the same formal type. We want to emit a layout with |
105 | /// these discrete storage units instead of combining them into a |
106 | /// continuous run. |
107 | bool isDiscreteBitFieldABI() { |
108 | return Context.getTargetInfo().getCXXABI().isMicrosoft() || |
109 | D->isMsStruct(C: Context); |
110 | } |
111 | |
112 | /// Helper function to check if we are targeting AAPCS. |
113 | bool isAAPCS() const { |
114 | return Context.getTargetInfo().getABI().starts_with(Prefix: "aapcs" ); |
115 | } |
116 | |
117 | /// Helper function to check if the target machine is BigEndian. |
118 | bool isBE() const { return Context.getTargetInfo().isBigEndian(); } |
119 | |
120 | /// The Itanium base layout rule allows virtual bases to overlap |
121 | /// other bases, which complicates layout in specific ways. |
122 | /// |
123 | /// Note specifically that the ms_struct attribute doesn't change this. |
124 | bool isOverlappingVBaseABI() { |
125 | return !Context.getTargetInfo().getCXXABI().isMicrosoft(); |
126 | } |
127 | |
128 | /// Wraps llvm::Type::getIntNTy with some implicit arguments. |
129 | llvm::Type *getIntNType(uint64_t NumBits) { |
130 | unsigned AlignedBits = llvm::alignTo(Value: NumBits, Align: Context.getCharWidth()); |
131 | return llvm::Type::getIntNTy(C&: Types.getLLVMContext(), N: AlignedBits); |
132 | } |
133 | /// Get the LLVM type sized as one character unit. |
134 | llvm::Type *getCharType() { |
135 | return llvm::Type::getIntNTy(C&: Types.getLLVMContext(), |
136 | N: Context.getCharWidth()); |
137 | } |
138 | /// Gets an llvm type of size NumChars and alignment 1. |
139 | llvm::Type *getByteArrayType(CharUnits NumChars) { |
140 | assert(!NumChars.isZero() && "Empty byte arrays aren't allowed." ); |
141 | llvm::Type *Type = getCharType(); |
142 | return NumChars == CharUnits::One() ? Type : |
143 | (llvm::Type *)llvm::ArrayType::get(ElementType: Type, NumElements: NumChars.getQuantity()); |
144 | } |
145 | /// Gets the storage type for a field decl and handles storage |
146 | /// for itanium bitfields that are smaller than their declared type. |
147 | llvm::Type *getStorageType(const FieldDecl *FD) { |
148 | llvm::Type *Type = Types.ConvertTypeForMem(T: FD->getType()); |
149 | if (!FD->isBitField()) return Type; |
150 | if (isDiscreteBitFieldABI()) return Type; |
151 | return getIntNType(NumBits: std::min(a: FD->getBitWidthValue(Ctx: Context), |
152 | b: (unsigned)Context.toBits(CharSize: getSize(Type)))); |
153 | } |
154 | /// Gets the llvm Basesubobject type from a CXXRecordDecl. |
155 | llvm::Type *getStorageType(const CXXRecordDecl *RD) { |
156 | return Types.getCGRecordLayout(RD).getBaseSubobjectLLVMType(); |
157 | } |
158 | CharUnits bitsToCharUnits(uint64_t BitOffset) { |
159 | return Context.toCharUnitsFromBits(BitSize: BitOffset); |
160 | } |
161 | CharUnits getSize(llvm::Type *Type) { |
162 | return CharUnits::fromQuantity(Quantity: DataLayout.getTypeAllocSize(Ty: Type)); |
163 | } |
164 | CharUnits getAlignment(llvm::Type *Type) { |
165 | return CharUnits::fromQuantity(Quantity: DataLayout.getABITypeAlign(Ty: Type)); |
166 | } |
167 | bool isZeroInitializable(const FieldDecl *FD) { |
168 | return Types.isZeroInitializable(FD->getType()); |
169 | } |
170 | bool isZeroInitializable(const RecordDecl *RD) { |
171 | return Types.isZeroInitializable(RD); |
172 | } |
173 | void appendPaddingBytes(CharUnits Size) { |
174 | if (!Size.isZero()) |
175 | FieldTypes.push_back(Elt: getByteArrayType(NumChars: Size)); |
176 | } |
177 | uint64_t getFieldBitOffset(const FieldDecl *FD) { |
178 | return Layout.getFieldOffset(FieldNo: FD->getFieldIndex()); |
179 | } |
180 | // Layout routines. |
181 | void setBitFieldInfo(const FieldDecl *FD, CharUnits StartOffset, |
182 | llvm::Type *StorageType); |
183 | /// Lowers an ASTRecordLayout to a llvm type. |
184 | void lower(bool NonVirtualBaseType); |
185 | void lowerUnion(bool isNoUniqueAddress); |
186 | void accumulateFields(); |
187 | void accumulateBitFields(RecordDecl::field_iterator Field, |
188 | RecordDecl::field_iterator FieldEnd); |
189 | void computeVolatileBitfields(); |
190 | void accumulateBases(); |
191 | void accumulateVPtrs(); |
192 | void accumulateVBases(); |
193 | /// Recursively searches all of the bases to find out if a vbase is |
194 | /// not the primary vbase of some base class. |
195 | bool hasOwnStorage(const CXXRecordDecl *Decl, const CXXRecordDecl *Query); |
196 | void calculateZeroInit(); |
197 | /// Lowers bitfield storage types to I8 arrays for bitfields with tail |
198 | /// padding that is or can potentially be used. |
199 | void clipTailPadding(); |
200 | /// Determines if we need a packed llvm struct. |
201 | void determinePacked(bool NVBaseType); |
202 | /// Inserts padding everywhere it's needed. |
203 | void insertPadding(); |
204 | /// Fills out the structures that are ultimately consumed. |
205 | void fillOutputFields(); |
206 | // Input memoization fields. |
207 | CodeGenTypes &Types; |
208 | const ASTContext &Context; |
209 | const RecordDecl *D; |
210 | const CXXRecordDecl *RD; |
211 | const ASTRecordLayout &Layout; |
212 | const llvm::DataLayout &DataLayout; |
213 | // Helpful intermediate data-structures. |
214 | std::vector<MemberInfo> Members; |
215 | // Output fields, consumed by CodeGenTypes::ComputeRecordLayout. |
216 | SmallVector<llvm::Type *, 16> FieldTypes; |
217 | llvm::DenseMap<const FieldDecl *, unsigned> Fields; |
218 | llvm::DenseMap<const FieldDecl *, CGBitFieldInfo> BitFields; |
219 | llvm::DenseMap<const CXXRecordDecl *, unsigned> NonVirtualBases; |
220 | llvm::DenseMap<const CXXRecordDecl *, unsigned> VirtualBases; |
221 | bool IsZeroInitializable : 1; |
222 | bool IsZeroInitializableAsBase : 1; |
223 | bool Packed : 1; |
224 | private: |
225 | CGRecordLowering(const CGRecordLowering &) = delete; |
226 | void operator =(const CGRecordLowering &) = delete; |
227 | }; |
228 | } // namespace { |
229 | |
230 | CGRecordLowering::CGRecordLowering(CodeGenTypes &Types, const RecordDecl *D, |
231 | bool Packed) |
232 | : Types(Types), Context(Types.getContext()), D(D), |
233 | RD(dyn_cast<CXXRecordDecl>(Val: D)), |
234 | Layout(Types.getContext().getASTRecordLayout(D)), |
235 | DataLayout(Types.getDataLayout()), IsZeroInitializable(true), |
236 | IsZeroInitializableAsBase(true), Packed(Packed) {} |
237 | |
238 | void CGRecordLowering::setBitFieldInfo( |
239 | const FieldDecl *FD, CharUnits StartOffset, llvm::Type *StorageType) { |
240 | CGBitFieldInfo &Info = BitFields[FD->getCanonicalDecl()]; |
241 | Info.IsSigned = FD->getType()->isSignedIntegerOrEnumerationType(); |
242 | Info.Offset = (unsigned)(getFieldBitOffset(FD) - Context.toBits(CharSize: StartOffset)); |
243 | Info.Size = FD->getBitWidthValue(Ctx: Context); |
244 | Info.StorageSize = (unsigned)DataLayout.getTypeAllocSizeInBits(Ty: StorageType); |
245 | Info.StorageOffset = StartOffset; |
246 | if (Info.Size > Info.StorageSize) |
247 | Info.Size = Info.StorageSize; |
248 | // Reverse the bit offsets for big endian machines. Because we represent |
249 | // a bitfield as a single large integer load, we can imagine the bits |
250 | // counting from the most-significant-bit instead of the |
251 | // least-significant-bit. |
252 | if (DataLayout.isBigEndian()) |
253 | Info.Offset = Info.StorageSize - (Info.Offset + Info.Size); |
254 | |
255 | Info.VolatileStorageSize = 0; |
256 | Info.VolatileOffset = 0; |
257 | Info.VolatileStorageOffset = CharUnits::Zero(); |
258 | } |
259 | |
260 | void CGRecordLowering::lower(bool NVBaseType) { |
261 | // The lowering process implemented in this function takes a variety of |
262 | // carefully ordered phases. |
263 | // 1) Store all members (fields and bases) in a list and sort them by offset. |
264 | // 2) Add a 1-byte capstone member at the Size of the structure. |
265 | // 3) Clip bitfield storages members if their tail padding is or might be |
266 | // used by another field or base. The clipping process uses the capstone |
267 | // by treating it as another object that occurs after the record. |
268 | // 4) Determine if the llvm-struct requires packing. It's important that this |
269 | // phase occur after clipping, because clipping changes the llvm type. |
270 | // This phase reads the offset of the capstone when determining packedness |
271 | // and updates the alignment of the capstone to be equal of the alignment |
272 | // of the record after doing so. |
273 | // 5) Insert padding everywhere it is needed. This phase requires 'Packed' to |
274 | // have been computed and needs to know the alignment of the record in |
275 | // order to understand if explicit tail padding is needed. |
276 | // 6) Remove the capstone, we don't need it anymore. |
277 | // 7) Determine if this record can be zero-initialized. This phase could have |
278 | // been placed anywhere after phase 1. |
279 | // 8) Format the complete list of members in a way that can be consumed by |
280 | // CodeGenTypes::ComputeRecordLayout. |
281 | CharUnits Size = NVBaseType ? Layout.getNonVirtualSize() : Layout.getSize(); |
282 | if (D->isUnion()) { |
283 | lowerUnion(isNoUniqueAddress: NVBaseType); |
284 | computeVolatileBitfields(); |
285 | return; |
286 | } |
287 | accumulateFields(); |
288 | // RD implies C++. |
289 | if (RD) { |
290 | accumulateVPtrs(); |
291 | accumulateBases(); |
292 | if (Members.empty()) { |
293 | appendPaddingBytes(Size); |
294 | computeVolatileBitfields(); |
295 | return; |
296 | } |
297 | if (!NVBaseType) |
298 | accumulateVBases(); |
299 | } |
300 | llvm::stable_sort(Range&: Members); |
301 | Members.push_back(x: StorageInfo(Offset: Size, Data: getIntNType(NumBits: 8))); |
302 | clipTailPadding(); |
303 | determinePacked(NVBaseType); |
304 | insertPadding(); |
305 | Members.pop_back(); |
306 | calculateZeroInit(); |
307 | fillOutputFields(); |
308 | computeVolatileBitfields(); |
309 | } |
310 | |
311 | void CGRecordLowering::lowerUnion(bool isNoUniqueAddress) { |
312 | CharUnits LayoutSize = |
313 | isNoUniqueAddress ? Layout.getDataSize() : Layout.getSize(); |
314 | llvm::Type *StorageType = nullptr; |
315 | bool SeenNamedMember = false; |
316 | // Iterate through the fields setting bitFieldInfo and the Fields array. Also |
317 | // locate the "most appropriate" storage type. The heuristic for finding the |
318 | // storage type isn't necessary, the first (non-0-length-bitfield) field's |
319 | // type would work fine and be simpler but would be different than what we've |
320 | // been doing and cause lit tests to change. |
321 | for (const auto *Field : D->fields()) { |
322 | if (Field->isBitField()) { |
323 | if (Field->isZeroLengthBitField(Ctx: Context)) |
324 | continue; |
325 | llvm::Type *FieldType = getStorageType(FD: Field); |
326 | if (LayoutSize < getSize(Type: FieldType)) |
327 | FieldType = getByteArrayType(NumChars: LayoutSize); |
328 | setBitFieldInfo(FD: Field, StartOffset: CharUnits::Zero(), StorageType: FieldType); |
329 | } |
330 | Fields[Field->getCanonicalDecl()] = 0; |
331 | llvm::Type *FieldType = getStorageType(FD: Field); |
332 | // Compute zero-initializable status. |
333 | // This union might not be zero initialized: it may contain a pointer to |
334 | // data member which might have some exotic initialization sequence. |
335 | // If this is the case, then we aught not to try and come up with a "better" |
336 | // type, it might not be very easy to come up with a Constant which |
337 | // correctly initializes it. |
338 | if (!SeenNamedMember) { |
339 | SeenNamedMember = Field->getIdentifier(); |
340 | if (!SeenNamedMember) |
341 | if (const auto *FieldRD = Field->getType()->getAsRecordDecl()) |
342 | SeenNamedMember = FieldRD->findFirstNamedDataMember(); |
343 | if (SeenNamedMember && !isZeroInitializable(FD: Field)) { |
344 | IsZeroInitializable = IsZeroInitializableAsBase = false; |
345 | StorageType = FieldType; |
346 | } |
347 | } |
348 | // Because our union isn't zero initializable, we won't be getting a better |
349 | // storage type. |
350 | if (!IsZeroInitializable) |
351 | continue; |
352 | // Conditionally update our storage type if we've got a new "better" one. |
353 | if (!StorageType || |
354 | getAlignment(Type: FieldType) > getAlignment(Type: StorageType) || |
355 | (getAlignment(Type: FieldType) == getAlignment(Type: StorageType) && |
356 | getSize(Type: FieldType) > getSize(Type: StorageType))) |
357 | StorageType = FieldType; |
358 | } |
359 | // If we have no storage type just pad to the appropriate size and return. |
360 | if (!StorageType) |
361 | return appendPaddingBytes(Size: LayoutSize); |
362 | // If our storage size was bigger than our required size (can happen in the |
363 | // case of packed bitfields on Itanium) then just use an I8 array. |
364 | if (LayoutSize < getSize(Type: StorageType)) |
365 | StorageType = getByteArrayType(NumChars: LayoutSize); |
366 | FieldTypes.push_back(Elt: StorageType); |
367 | appendPaddingBytes(Size: LayoutSize - getSize(Type: StorageType)); |
368 | // Set packed if we need it. |
369 | const auto StorageAlignment = getAlignment(Type: StorageType); |
370 | assert((Layout.getSize() % StorageAlignment == 0 || |
371 | Layout.getDataSize() % StorageAlignment) && |
372 | "Union's standard layout and no_unique_address layout must agree on " |
373 | "packedness" ); |
374 | if (Layout.getDataSize() % StorageAlignment) |
375 | Packed = true; |
376 | } |
377 | |
378 | void CGRecordLowering::accumulateFields() { |
379 | for (RecordDecl::field_iterator Field = D->field_begin(), |
380 | FieldEnd = D->field_end(); |
381 | Field != FieldEnd;) { |
382 | if (Field->isBitField()) { |
383 | RecordDecl::field_iterator Start = Field; |
384 | // Iterate to gather the list of bitfields. |
385 | for (++Field; Field != FieldEnd && Field->isBitField(); ++Field); |
386 | accumulateBitFields(Field: Start, FieldEnd: Field); |
387 | } else if (!Field->isZeroSize(Ctx: Context)) { |
388 | // Use base subobject layout for the potentially-overlapping field, |
389 | // as it is done in RecordLayoutBuilder |
390 | Members.push_back(x: MemberInfo( |
391 | bitsToCharUnits(BitOffset: getFieldBitOffset(FD: *Field)), MemberInfo::Field, |
392 | Field->isPotentiallyOverlapping() |
393 | ? getStorageType(Field->getType()->getAsCXXRecordDecl()) |
394 | : getStorageType(FD: *Field), |
395 | *Field)); |
396 | ++Field; |
397 | } else { |
398 | ++Field; |
399 | } |
400 | } |
401 | } |
402 | |
403 | void |
404 | CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field, |
405 | RecordDecl::field_iterator FieldEnd) { |
406 | // Run stores the first element of the current run of bitfields. FieldEnd is |
407 | // used as a special value to note that we don't have a current run. A |
408 | // bitfield run is a contiguous collection of bitfields that can be stored in |
409 | // the same storage block. Zero-sized bitfields and bitfields that would |
410 | // cross an alignment boundary break a run and start a new one. |
411 | RecordDecl::field_iterator Run = FieldEnd; |
412 | // Tail is the offset of the first bit off the end of the current run. It's |
413 | // used to determine if the ASTRecordLayout is treating these two bitfields as |
414 | // contiguous. StartBitOffset is offset of the beginning of the Run. |
415 | uint64_t StartBitOffset, Tail = 0; |
416 | if (isDiscreteBitFieldABI()) { |
417 | for (; Field != FieldEnd; ++Field) { |
418 | uint64_t BitOffset = getFieldBitOffset(FD: *Field); |
419 | // Zero-width bitfields end runs. |
420 | if (Field->isZeroLengthBitField(Ctx: Context)) { |
421 | Run = FieldEnd; |
422 | continue; |
423 | } |
424 | llvm::Type *Type = |
425 | Types.ConvertTypeForMem(T: Field->getType(), /*ForBitField=*/true); |
426 | // If we don't have a run yet, or don't live within the previous run's |
427 | // allocated storage then we allocate some storage and start a new run. |
428 | if (Run == FieldEnd || BitOffset >= Tail) { |
429 | Run = Field; |
430 | StartBitOffset = BitOffset; |
431 | Tail = StartBitOffset + DataLayout.getTypeAllocSizeInBits(Ty: Type); |
432 | // Add the storage member to the record. This must be added to the |
433 | // record before the bitfield members so that it gets laid out before |
434 | // the bitfields it contains get laid out. |
435 | Members.push_back(x: StorageInfo(Offset: bitsToCharUnits(BitOffset: StartBitOffset), Data: Type)); |
436 | } |
437 | // Bitfields get the offset of their storage but come afterward and remain |
438 | // there after a stable sort. |
439 | Members.push_back(x: MemberInfo(bitsToCharUnits(BitOffset: StartBitOffset), |
440 | MemberInfo::Field, nullptr, *Field)); |
441 | } |
442 | return; |
443 | } |
444 | |
445 | // Check if OffsetInRecord (the size in bits of the current run) is better |
446 | // as a single field run. When OffsetInRecord has legal integer width, and |
447 | // its bitfield offset is naturally aligned, it is better to make the |
448 | // bitfield a separate storage component so as it can be accessed directly |
449 | // with lower cost. |
450 | auto IsBetterAsSingleFieldRun = [&](uint64_t OffsetInRecord, |
451 | uint64_t StartBitOffset) { |
452 | if (!Types.getCodeGenOpts().FineGrainedBitfieldAccesses) |
453 | return false; |
454 | if (OffsetInRecord < 8 || !llvm::isPowerOf2_64(Value: OffsetInRecord) || |
455 | !DataLayout.fitsInLegalInteger(Width: OffsetInRecord)) |
456 | return false; |
457 | // Make sure StartBitOffset is naturally aligned if it is treated as an |
458 | // IType integer. |
459 | if (StartBitOffset % |
460 | Context.toBits(CharSize: getAlignment(Type: getIntNType(NumBits: OffsetInRecord))) != |
461 | 0) |
462 | return false; |
463 | return true; |
464 | }; |
465 | |
466 | // The start field is better as a single field run. |
467 | bool StartFieldAsSingleRun = false; |
468 | for (;;) { |
469 | // Check to see if we need to start a new run. |
470 | if (Run == FieldEnd) { |
471 | // If we're out of fields, return. |
472 | if (Field == FieldEnd) |
473 | break; |
474 | // Any non-zero-length bitfield can start a new run. |
475 | if (!Field->isZeroLengthBitField(Ctx: Context)) { |
476 | Run = Field; |
477 | StartBitOffset = getFieldBitOffset(FD: *Field); |
478 | Tail = StartBitOffset + Field->getBitWidthValue(Ctx: Context); |
479 | StartFieldAsSingleRun = IsBetterAsSingleFieldRun(Tail - StartBitOffset, |
480 | StartBitOffset); |
481 | } |
482 | ++Field; |
483 | continue; |
484 | } |
485 | |
486 | // If the start field of a new run is better as a single run, or |
487 | // if current field (or consecutive fields) is better as a single run, or |
488 | // if current field has zero width bitfield and either |
489 | // UseZeroLengthBitfieldAlignment or UseBitFieldTypeAlignment is set to |
490 | // true, or |
491 | // if the offset of current field is inconsistent with the offset of |
492 | // previous field plus its offset, |
493 | // skip the block below and go ahead to emit the storage. |
494 | // Otherwise, try to add bitfields to the run. |
495 | if (!StartFieldAsSingleRun && Field != FieldEnd && |
496 | !IsBetterAsSingleFieldRun(Tail - StartBitOffset, StartBitOffset) && |
497 | (!Field->isZeroLengthBitField(Ctx: Context) || |
498 | (!Context.getTargetInfo().useZeroLengthBitfieldAlignment() && |
499 | !Context.getTargetInfo().useBitFieldTypeAlignment())) && |
500 | Tail == getFieldBitOffset(FD: *Field)) { |
501 | Tail += Field->getBitWidthValue(Ctx: Context); |
502 | ++Field; |
503 | continue; |
504 | } |
505 | |
506 | // We've hit a break-point in the run and need to emit a storage field. |
507 | llvm::Type *Type = getIntNType(NumBits: Tail - StartBitOffset); |
508 | // Add the storage member to the record and set the bitfield info for all of |
509 | // the bitfields in the run. Bitfields get the offset of their storage but |
510 | // come afterward and remain there after a stable sort. |
511 | Members.push_back(x: StorageInfo(Offset: bitsToCharUnits(BitOffset: StartBitOffset), Data: Type)); |
512 | for (; Run != Field; ++Run) |
513 | Members.push_back(x: MemberInfo(bitsToCharUnits(BitOffset: StartBitOffset), |
514 | MemberInfo::Field, nullptr, *Run)); |
515 | Run = FieldEnd; |
516 | StartFieldAsSingleRun = false; |
517 | } |
518 | } |
519 | |
520 | void CGRecordLowering::accumulateBases() { |
521 | // If we've got a primary virtual base, we need to add it with the bases. |
522 | if (Layout.isPrimaryBaseVirtual()) { |
523 | const CXXRecordDecl *BaseDecl = Layout.getPrimaryBase(); |
524 | Members.push_back(x: MemberInfo(CharUnits::Zero(), MemberInfo::Base, |
525 | getStorageType(RD: BaseDecl), BaseDecl)); |
526 | } |
527 | // Accumulate the non-virtual bases. |
528 | for (const auto &Base : RD->bases()) { |
529 | if (Base.isVirtual()) |
530 | continue; |
531 | |
532 | // Bases can be zero-sized even if not technically empty if they |
533 | // contain only a trailing array member. |
534 | const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl(); |
535 | if (!BaseDecl->isEmpty() && |
536 | !Context.getASTRecordLayout(BaseDecl).getNonVirtualSize().isZero()) |
537 | Members.push_back(x: MemberInfo(Layout.getBaseClassOffset(Base: BaseDecl), |
538 | MemberInfo::Base, getStorageType(RD: BaseDecl), BaseDecl)); |
539 | } |
540 | } |
541 | |
542 | /// The AAPCS that defines that, when possible, bit-fields should |
543 | /// be accessed using containers of the declared type width: |
544 | /// When a volatile bit-field is read, and its container does not overlap with |
545 | /// any non-bit-field member or any zero length bit-field member, its container |
546 | /// must be read exactly once using the access width appropriate to the type of |
547 | /// the container. When a volatile bit-field is written, and its container does |
548 | /// not overlap with any non-bit-field member or any zero-length bit-field |
549 | /// member, its container must be read exactly once and written exactly once |
550 | /// using the access width appropriate to the type of the container. The two |
551 | /// accesses are not atomic. |
552 | /// |
553 | /// Enforcing the width restriction can be disabled using |
554 | /// -fno-aapcs-bitfield-width. |
555 | void CGRecordLowering::computeVolatileBitfields() { |
556 | if (!isAAPCS() || !Types.getCodeGenOpts().AAPCSBitfieldWidth) |
557 | return; |
558 | |
559 | for (auto &I : BitFields) { |
560 | const FieldDecl *Field = I.first; |
561 | CGBitFieldInfo &Info = I.second; |
562 | llvm::Type *ResLTy = Types.ConvertTypeForMem(T: Field->getType()); |
563 | // If the record alignment is less than the type width, we can't enforce a |
564 | // aligned load, bail out. |
565 | if ((uint64_t)(Context.toBits(CharSize: Layout.getAlignment())) < |
566 | ResLTy->getPrimitiveSizeInBits()) |
567 | continue; |
568 | // CGRecordLowering::setBitFieldInfo() pre-adjusts the bit-field offsets |
569 | // for big-endian targets, but it assumes a container of width |
570 | // Info.StorageSize. Since AAPCS uses a different container size (width |
571 | // of the type), we first undo that calculation here and redo it once |
572 | // the bit-field offset within the new container is calculated. |
573 | const unsigned OldOffset = |
574 | isBE() ? Info.StorageSize - (Info.Offset + Info.Size) : Info.Offset; |
575 | // Offset to the bit-field from the beginning of the struct. |
576 | const unsigned AbsoluteOffset = |
577 | Context.toBits(CharSize: Info.StorageOffset) + OldOffset; |
578 | |
579 | // Container size is the width of the bit-field type. |
580 | const unsigned StorageSize = ResLTy->getPrimitiveSizeInBits(); |
581 | // Nothing to do if the access uses the desired |
582 | // container width and is naturally aligned. |
583 | if (Info.StorageSize == StorageSize && (OldOffset % StorageSize == 0)) |
584 | continue; |
585 | |
586 | // Offset within the container. |
587 | unsigned Offset = AbsoluteOffset & (StorageSize - 1); |
588 | // Bail out if an aligned load of the container cannot cover the entire |
589 | // bit-field. This can happen for example, if the bit-field is part of a |
590 | // packed struct. AAPCS does not define access rules for such cases, we let |
591 | // clang to follow its own rules. |
592 | if (Offset + Info.Size > StorageSize) |
593 | continue; |
594 | |
595 | // Re-adjust offsets for big-endian targets. |
596 | if (isBE()) |
597 | Offset = StorageSize - (Offset + Info.Size); |
598 | |
599 | const CharUnits StorageOffset = |
600 | Context.toCharUnitsFromBits(BitSize: AbsoluteOffset & ~(StorageSize - 1)); |
601 | const CharUnits End = StorageOffset + |
602 | Context.toCharUnitsFromBits(BitSize: StorageSize) - |
603 | CharUnits::One(); |
604 | |
605 | const ASTRecordLayout &Layout = |
606 | Context.getASTRecordLayout(D: Field->getParent()); |
607 | // If we access outside memory outside the record, than bail out. |
608 | const CharUnits RecordSize = Layout.getSize(); |
609 | if (End >= RecordSize) |
610 | continue; |
611 | |
612 | // Bail out if performing this load would access non-bit-fields members. |
613 | bool Conflict = false; |
614 | for (const auto *F : D->fields()) { |
615 | // Allow sized bit-fields overlaps. |
616 | if (F->isBitField() && !F->isZeroLengthBitField(Ctx: Context)) |
617 | continue; |
618 | |
619 | const CharUnits FOffset = Context.toCharUnitsFromBits( |
620 | BitSize: Layout.getFieldOffset(FieldNo: F->getFieldIndex())); |
621 | |
622 | // As C11 defines, a zero sized bit-field defines a barrier, so |
623 | // fields after and before it should be race condition free. |
624 | // The AAPCS acknowledges it and imposes no restritions when the |
625 | // natural container overlaps a zero-length bit-field. |
626 | if (F->isZeroLengthBitField(Ctx: Context)) { |
627 | if (End > FOffset && StorageOffset < FOffset) { |
628 | Conflict = true; |
629 | break; |
630 | } |
631 | } |
632 | |
633 | const CharUnits FEnd = |
634 | FOffset + |
635 | Context.toCharUnitsFromBits( |
636 | BitSize: Types.ConvertTypeForMem(T: F->getType())->getPrimitiveSizeInBits()) - |
637 | CharUnits::One(); |
638 | // If no overlap, continue. |
639 | if (End < FOffset || FEnd < StorageOffset) |
640 | continue; |
641 | |
642 | // The desired load overlaps a non-bit-field member, bail out. |
643 | Conflict = true; |
644 | break; |
645 | } |
646 | |
647 | if (Conflict) |
648 | continue; |
649 | // Write the new bit-field access parameters. |
650 | // As the storage offset now is defined as the number of elements from the |
651 | // start of the structure, we should divide the Offset by the element size. |
652 | Info.VolatileStorageOffset = |
653 | StorageOffset / Context.toCharUnitsFromBits(BitSize: StorageSize).getQuantity(); |
654 | Info.VolatileStorageSize = StorageSize; |
655 | Info.VolatileOffset = Offset; |
656 | } |
657 | } |
658 | |
659 | void CGRecordLowering::accumulateVPtrs() { |
660 | if (Layout.hasOwnVFPtr()) |
661 | Members.push_back( |
662 | x: MemberInfo(CharUnits::Zero(), MemberInfo::VFPtr, |
663 | llvm::PointerType::getUnqual(C&: Types.getLLVMContext()))); |
664 | if (Layout.hasOwnVBPtr()) |
665 | Members.push_back( |
666 | x: MemberInfo(Layout.getVBPtrOffset(), MemberInfo::VBPtr, |
667 | llvm::PointerType::getUnqual(C&: Types.getLLVMContext()))); |
668 | } |
669 | |
670 | void CGRecordLowering::accumulateVBases() { |
671 | CharUnits ScissorOffset = Layout.getNonVirtualSize(); |
672 | // In the itanium ABI, it's possible to place a vbase at a dsize that is |
673 | // smaller than the nvsize. Here we check to see if such a base is placed |
674 | // before the nvsize and set the scissor offset to that, instead of the |
675 | // nvsize. |
676 | if (isOverlappingVBaseABI()) |
677 | for (const auto &Base : RD->vbases()) { |
678 | const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl(); |
679 | if (BaseDecl->isEmpty()) |
680 | continue; |
681 | // If the vbase is a primary virtual base of some base, then it doesn't |
682 | // get its own storage location but instead lives inside of that base. |
683 | if (Context.isNearlyEmpty(RD: BaseDecl) && !hasOwnStorage(Decl: RD, Query: BaseDecl)) |
684 | continue; |
685 | ScissorOffset = std::min(a: ScissorOffset, |
686 | b: Layout.getVBaseClassOffset(VBase: BaseDecl)); |
687 | } |
688 | Members.push_back(x: MemberInfo(ScissorOffset, MemberInfo::Scissor, nullptr, |
689 | RD)); |
690 | for (const auto &Base : RD->vbases()) { |
691 | const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl(); |
692 | if (BaseDecl->isEmpty()) |
693 | continue; |
694 | CharUnits Offset = Layout.getVBaseClassOffset(VBase: BaseDecl); |
695 | // If the vbase is a primary virtual base of some base, then it doesn't |
696 | // get its own storage location but instead lives inside of that base. |
697 | if (isOverlappingVBaseABI() && |
698 | Context.isNearlyEmpty(RD: BaseDecl) && |
699 | !hasOwnStorage(Decl: RD, Query: BaseDecl)) { |
700 | Members.push_back(x: MemberInfo(Offset, MemberInfo::VBase, nullptr, |
701 | BaseDecl)); |
702 | continue; |
703 | } |
704 | // If we've got a vtordisp, add it as a storage type. |
705 | if (Layout.getVBaseOffsetsMap().find(Val: BaseDecl)->second.hasVtorDisp()) |
706 | Members.push_back(x: StorageInfo(Offset: Offset - CharUnits::fromQuantity(Quantity: 4), |
707 | Data: getIntNType(NumBits: 32))); |
708 | Members.push_back(x: MemberInfo(Offset, MemberInfo::VBase, |
709 | getStorageType(RD: BaseDecl), BaseDecl)); |
710 | } |
711 | } |
712 | |
713 | bool CGRecordLowering::hasOwnStorage(const CXXRecordDecl *Decl, |
714 | const CXXRecordDecl *Query) { |
715 | const ASTRecordLayout &DeclLayout = Context.getASTRecordLayout(Decl); |
716 | if (DeclLayout.isPrimaryBaseVirtual() && DeclLayout.getPrimaryBase() == Query) |
717 | return false; |
718 | for (const auto &Base : Decl->bases()) |
719 | if (!hasOwnStorage(Decl: Base.getType()->getAsCXXRecordDecl(), Query)) |
720 | return false; |
721 | return true; |
722 | } |
723 | |
724 | void CGRecordLowering::calculateZeroInit() { |
725 | for (std::vector<MemberInfo>::const_iterator Member = Members.begin(), |
726 | MemberEnd = Members.end(); |
727 | IsZeroInitializableAsBase && Member != MemberEnd; ++Member) { |
728 | if (Member->Kind == MemberInfo::Field) { |
729 | if (!Member->FD || isZeroInitializable(FD: Member->FD)) |
730 | continue; |
731 | IsZeroInitializable = IsZeroInitializableAsBase = false; |
732 | } else if (Member->Kind == MemberInfo::Base || |
733 | Member->Kind == MemberInfo::VBase) { |
734 | if (isZeroInitializable(Member->RD)) |
735 | continue; |
736 | IsZeroInitializable = false; |
737 | if (Member->Kind == MemberInfo::Base) |
738 | IsZeroInitializableAsBase = false; |
739 | } |
740 | } |
741 | } |
742 | |
743 | void CGRecordLowering::clipTailPadding() { |
744 | std::vector<MemberInfo>::iterator Prior = Members.begin(); |
745 | CharUnits Tail = getSize(Type: Prior->Data); |
746 | for (std::vector<MemberInfo>::iterator Member = Prior + 1, |
747 | MemberEnd = Members.end(); |
748 | Member != MemberEnd; ++Member) { |
749 | // Only members with data and the scissor can cut into tail padding. |
750 | if (!Member->Data && Member->Kind != MemberInfo::Scissor) |
751 | continue; |
752 | if (Member->Offset < Tail) { |
753 | assert(Prior->Kind == MemberInfo::Field && |
754 | "Only storage fields have tail padding!" ); |
755 | if (!Prior->FD || Prior->FD->isBitField()) |
756 | Prior->Data = getByteArrayType(NumChars: bitsToCharUnits(BitOffset: llvm::alignTo( |
757 | Value: cast<llvm::IntegerType>(Val: Prior->Data)->getIntegerBitWidth(), Align: 8))); |
758 | else { |
759 | assert(Prior->FD->hasAttr<NoUniqueAddressAttr>() && |
760 | "should not have reused this field's tail padding" ); |
761 | Prior->Data = getByteArrayType( |
762 | NumChars: Context.getTypeInfoDataSizeInChars(T: Prior->FD->getType()).Width); |
763 | } |
764 | } |
765 | if (Member->Data) |
766 | Prior = Member; |
767 | Tail = Prior->Offset + getSize(Type: Prior->Data); |
768 | } |
769 | } |
770 | |
771 | void CGRecordLowering::determinePacked(bool NVBaseType) { |
772 | if (Packed) |
773 | return; |
774 | CharUnits Alignment = CharUnits::One(); |
775 | CharUnits NVAlignment = CharUnits::One(); |
776 | CharUnits NVSize = |
777 | !NVBaseType && RD ? Layout.getNonVirtualSize() : CharUnits::Zero(); |
778 | for (std::vector<MemberInfo>::const_iterator Member = Members.begin(), |
779 | MemberEnd = Members.end(); |
780 | Member != MemberEnd; ++Member) { |
781 | if (!Member->Data) |
782 | continue; |
783 | // If any member falls at an offset that it not a multiple of its alignment, |
784 | // then the entire record must be packed. |
785 | if (Member->Offset % getAlignment(Type: Member->Data)) |
786 | Packed = true; |
787 | if (Member->Offset < NVSize) |
788 | NVAlignment = std::max(a: NVAlignment, b: getAlignment(Type: Member->Data)); |
789 | Alignment = std::max(a: Alignment, b: getAlignment(Type: Member->Data)); |
790 | } |
791 | // If the size of the record (the capstone's offset) is not a multiple of the |
792 | // record's alignment, it must be packed. |
793 | if (Members.back().Offset % Alignment) |
794 | Packed = true; |
795 | // If the non-virtual sub-object is not a multiple of the non-virtual |
796 | // sub-object's alignment, it must be packed. We cannot have a packed |
797 | // non-virtual sub-object and an unpacked complete object or vise versa. |
798 | if (NVSize % NVAlignment) |
799 | Packed = true; |
800 | // Update the alignment of the sentinel. |
801 | if (!Packed) |
802 | Members.back().Data = getIntNType(NumBits: Context.toBits(CharSize: Alignment)); |
803 | } |
804 | |
805 | void CGRecordLowering::insertPadding() { |
806 | std::vector<std::pair<CharUnits, CharUnits> > Padding; |
807 | CharUnits Size = CharUnits::Zero(); |
808 | for (std::vector<MemberInfo>::const_iterator Member = Members.begin(), |
809 | MemberEnd = Members.end(); |
810 | Member != MemberEnd; ++Member) { |
811 | if (!Member->Data) |
812 | continue; |
813 | CharUnits Offset = Member->Offset; |
814 | assert(Offset >= Size); |
815 | // Insert padding if we need to. |
816 | if (Offset != |
817 | Size.alignTo(Align: Packed ? CharUnits::One() : getAlignment(Type: Member->Data))) |
818 | Padding.push_back(x: std::make_pair(x&: Size, y: Offset - Size)); |
819 | Size = Offset + getSize(Type: Member->Data); |
820 | } |
821 | if (Padding.empty()) |
822 | return; |
823 | // Add the padding to the Members list and sort it. |
824 | for (std::vector<std::pair<CharUnits, CharUnits> >::const_iterator |
825 | Pad = Padding.begin(), PadEnd = Padding.end(); |
826 | Pad != PadEnd; ++Pad) |
827 | Members.push_back(x: StorageInfo(Offset: Pad->first, Data: getByteArrayType(NumChars: Pad->second))); |
828 | llvm::stable_sort(Range&: Members); |
829 | } |
830 | |
831 | void CGRecordLowering::fillOutputFields() { |
832 | for (std::vector<MemberInfo>::const_iterator Member = Members.begin(), |
833 | MemberEnd = Members.end(); |
834 | Member != MemberEnd; ++Member) { |
835 | if (Member->Data) |
836 | FieldTypes.push_back(Elt: Member->Data); |
837 | if (Member->Kind == MemberInfo::Field) { |
838 | if (Member->FD) |
839 | Fields[Member->FD->getCanonicalDecl()] = FieldTypes.size() - 1; |
840 | // A field without storage must be a bitfield. |
841 | if (!Member->Data) |
842 | setBitFieldInfo(FD: Member->FD, StartOffset: Member->Offset, StorageType: FieldTypes.back()); |
843 | } else if (Member->Kind == MemberInfo::Base) |
844 | NonVirtualBases[Member->RD] = FieldTypes.size() - 1; |
845 | else if (Member->Kind == MemberInfo::VBase) |
846 | VirtualBases[Member->RD] = FieldTypes.size() - 1; |
847 | } |
848 | } |
849 | |
850 | CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types, |
851 | const FieldDecl *FD, |
852 | uint64_t Offset, uint64_t Size, |
853 | uint64_t StorageSize, |
854 | CharUnits StorageOffset) { |
855 | // This function is vestigial from CGRecordLayoutBuilder days but is still |
856 | // used in GCObjCRuntime.cpp. That usage has a "fixme" attached to it that |
857 | // when addressed will allow for the removal of this function. |
858 | llvm::Type *Ty = Types.ConvertTypeForMem(T: FD->getType()); |
859 | CharUnits TypeSizeInBytes = |
860 | CharUnits::fromQuantity(Quantity: Types.getDataLayout().getTypeAllocSize(Ty)); |
861 | uint64_t TypeSizeInBits = Types.getContext().toBits(CharSize: TypeSizeInBytes); |
862 | |
863 | bool IsSigned = FD->getType()->isSignedIntegerOrEnumerationType(); |
864 | |
865 | if (Size > TypeSizeInBits) { |
866 | // We have a wide bit-field. The extra bits are only used for padding, so |
867 | // if we have a bitfield of type T, with size N: |
868 | // |
869 | // T t : N; |
870 | // |
871 | // We can just assume that it's: |
872 | // |
873 | // T t : sizeof(T); |
874 | // |
875 | Size = TypeSizeInBits; |
876 | } |
877 | |
878 | // Reverse the bit offsets for big endian machines. Because we represent |
879 | // a bitfield as a single large integer load, we can imagine the bits |
880 | // counting from the most-significant-bit instead of the |
881 | // least-significant-bit. |
882 | if (Types.getDataLayout().isBigEndian()) { |
883 | Offset = StorageSize - (Offset + Size); |
884 | } |
885 | |
886 | return CGBitFieldInfo(Offset, Size, IsSigned, StorageSize, StorageOffset); |
887 | } |
888 | |
889 | std::unique_ptr<CGRecordLayout> |
890 | CodeGenTypes::ComputeRecordLayout(const RecordDecl *D, llvm::StructType *Ty) { |
891 | CGRecordLowering Builder(*this, D, /*Packed=*/false); |
892 | |
893 | Builder.lower(/*NonVirtualBaseType=*/NVBaseType: false); |
894 | |
895 | // If we're in C++, compute the base subobject type. |
896 | llvm::StructType *BaseTy = nullptr; |
897 | if (isa<CXXRecordDecl>(Val: D)) { |
898 | BaseTy = Ty; |
899 | if (Builder.Layout.getNonVirtualSize() != Builder.Layout.getSize()) { |
900 | CGRecordLowering BaseBuilder(*this, D, /*Packed=*/Builder.Packed); |
901 | BaseBuilder.lower(/*NonVirtualBaseType=*/NVBaseType: true); |
902 | BaseTy = llvm::StructType::create( |
903 | Context&: getLLVMContext(), Elements: BaseBuilder.FieldTypes, Name: "" , isPacked: BaseBuilder.Packed); |
904 | addRecordTypeName(RD: D, Ty: BaseTy, suffix: ".base" ); |
905 | // BaseTy and Ty must agree on their packedness for getLLVMFieldNo to work |
906 | // on both of them with the same index. |
907 | assert(Builder.Packed == BaseBuilder.Packed && |
908 | "Non-virtual and complete types must agree on packedness" ); |
909 | } |
910 | } |
911 | |
912 | // Fill in the struct *after* computing the base type. Filling in the body |
913 | // signifies that the type is no longer opaque and record layout is complete, |
914 | // but we may need to recursively layout D while laying D out as a base type. |
915 | Ty->setBody(Elements: Builder.FieldTypes, isPacked: Builder.Packed); |
916 | |
917 | auto RL = std::make_unique<CGRecordLayout>( |
918 | args&: Ty, args&: BaseTy, args: (bool)Builder.IsZeroInitializable, |
919 | args: (bool)Builder.IsZeroInitializableAsBase); |
920 | |
921 | RL->NonVirtualBases.swap(RHS&: Builder.NonVirtualBases); |
922 | RL->CompleteObjectVirtualBases.swap(RHS&: Builder.VirtualBases); |
923 | |
924 | // Add all the field numbers. |
925 | RL->FieldInfo.swap(RHS&: Builder.Fields); |
926 | |
927 | // Add bitfield info. |
928 | RL->BitFields.swap(RHS&: Builder.BitFields); |
929 | |
930 | // Dump the layout, if requested. |
931 | if (getContext().getLangOpts().DumpRecordLayouts) { |
932 | llvm::outs() << "\n*** Dumping IRgen Record Layout\n" ; |
933 | llvm::outs() << "Record: " ; |
934 | D->dump(llvm::outs()); |
935 | llvm::outs() << "\nLayout: " ; |
936 | RL->print(OS&: llvm::outs()); |
937 | } |
938 | |
939 | #ifndef NDEBUG |
940 | // Verify that the computed LLVM struct size matches the AST layout size. |
941 | const ASTRecordLayout &Layout = getContext().getASTRecordLayout(D); |
942 | |
943 | uint64_t TypeSizeInBits = getContext().toBits(CharSize: Layout.getSize()); |
944 | assert(TypeSizeInBits == getDataLayout().getTypeAllocSizeInBits(Ty) && |
945 | "Type size mismatch!" ); |
946 | |
947 | if (BaseTy) { |
948 | CharUnits NonVirtualSize = Layout.getNonVirtualSize(); |
949 | |
950 | uint64_t AlignedNonVirtualTypeSizeInBits = |
951 | getContext().toBits(CharSize: NonVirtualSize); |
952 | |
953 | assert(AlignedNonVirtualTypeSizeInBits == |
954 | getDataLayout().getTypeAllocSizeInBits(BaseTy) && |
955 | "Type size mismatch!" ); |
956 | } |
957 | |
958 | // Verify that the LLVM and AST field offsets agree. |
959 | llvm::StructType *ST = RL->getLLVMType(); |
960 | const llvm::StructLayout *SL = getDataLayout().getStructLayout(Ty: ST); |
961 | |
962 | const ASTRecordLayout &AST_RL = getContext().getASTRecordLayout(D); |
963 | RecordDecl::field_iterator it = D->field_begin(); |
964 | for (unsigned i = 0, e = AST_RL.getFieldCount(); i != e; ++i, ++it) { |
965 | const FieldDecl *FD = *it; |
966 | |
967 | // Ignore zero-sized fields. |
968 | if (FD->isZeroSize(Ctx: getContext())) |
969 | continue; |
970 | |
971 | // For non-bit-fields, just check that the LLVM struct offset matches the |
972 | // AST offset. |
973 | if (!FD->isBitField()) { |
974 | unsigned FieldNo = RL->getLLVMFieldNo(FD); |
975 | assert(AST_RL.getFieldOffset(i) == SL->getElementOffsetInBits(FieldNo) && |
976 | "Invalid field offset!" ); |
977 | continue; |
978 | } |
979 | |
980 | // Ignore unnamed bit-fields. |
981 | if (!FD->getDeclName()) |
982 | continue; |
983 | |
984 | const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD); |
985 | llvm::Type *ElementTy = ST->getTypeAtIndex(N: RL->getLLVMFieldNo(FD)); |
986 | |
987 | // Unions have overlapping elements dictating their layout, but for |
988 | // non-unions we can verify that this section of the layout is the exact |
989 | // expected size. |
990 | if (D->isUnion()) { |
991 | // For unions we verify that the start is zero and the size |
992 | // is in-bounds. However, on BE systems, the offset may be non-zero, but |
993 | // the size + offset should match the storage size in that case as it |
994 | // "starts" at the back. |
995 | if (getDataLayout().isBigEndian()) |
996 | assert(static_cast<unsigned>(Info.Offset + Info.Size) == |
997 | Info.StorageSize && |
998 | "Big endian union bitfield does not end at the back" ); |
999 | else |
1000 | assert(Info.Offset == 0 && |
1001 | "Little endian union bitfield with a non-zero offset" ); |
1002 | assert(Info.StorageSize <= SL->getSizeInBits() && |
1003 | "Union not large enough for bitfield storage" ); |
1004 | } else { |
1005 | assert((Info.StorageSize == |
1006 | getDataLayout().getTypeAllocSizeInBits(ElementTy) || |
1007 | Info.VolatileStorageSize == |
1008 | getDataLayout().getTypeAllocSizeInBits(ElementTy)) && |
1009 | "Storage size does not match the element type size" ); |
1010 | } |
1011 | assert(Info.Size > 0 && "Empty bitfield!" ); |
1012 | assert(static_cast<unsigned>(Info.Offset) + Info.Size <= Info.StorageSize && |
1013 | "Bitfield outside of its allocated storage" ); |
1014 | } |
1015 | #endif |
1016 | |
1017 | return RL; |
1018 | } |
1019 | |
1020 | void CGRecordLayout::print(raw_ostream &OS) const { |
1021 | OS << "<CGRecordLayout\n" ; |
1022 | OS << " LLVMType:" << *CompleteObjectType << "\n" ; |
1023 | if (BaseSubobjectType) |
1024 | OS << " NonVirtualBaseLLVMType:" << *BaseSubobjectType << "\n" ; |
1025 | OS << " IsZeroInitializable:" << IsZeroInitializable << "\n" ; |
1026 | OS << " BitFields:[\n" ; |
1027 | |
1028 | // Print bit-field infos in declaration order. |
1029 | std::vector<std::pair<unsigned, const CGBitFieldInfo*> > BFIs; |
1030 | for (llvm::DenseMap<const FieldDecl*, CGBitFieldInfo>::const_iterator |
1031 | it = BitFields.begin(), ie = BitFields.end(); |
1032 | it != ie; ++it) { |
1033 | const RecordDecl *RD = it->first->getParent(); |
1034 | unsigned Index = 0; |
1035 | for (RecordDecl::field_iterator |
1036 | it2 = RD->field_begin(); *it2 != it->first; ++it2) |
1037 | ++Index; |
1038 | BFIs.push_back(x: std::make_pair(x&: Index, y: &it->second)); |
1039 | } |
1040 | llvm::array_pod_sort(Start: BFIs.begin(), End: BFIs.end()); |
1041 | for (unsigned i = 0, e = BFIs.size(); i != e; ++i) { |
1042 | OS.indent(NumSpaces: 4); |
1043 | BFIs[i].second->print(OS); |
1044 | OS << "\n" ; |
1045 | } |
1046 | |
1047 | OS << "]>\n" ; |
1048 | } |
1049 | |
1050 | LLVM_DUMP_METHOD void CGRecordLayout::dump() const { |
1051 | print(OS&: llvm::errs()); |
1052 | } |
1053 | |
1054 | void CGBitFieldInfo::print(raw_ostream &OS) const { |
1055 | OS << "<CGBitFieldInfo" |
1056 | << " Offset:" << Offset << " Size:" << Size << " IsSigned:" << IsSigned |
1057 | << " StorageSize:" << StorageSize |
1058 | << " StorageOffset:" << StorageOffset.getQuantity() |
1059 | << " VolatileOffset:" << VolatileOffset |
1060 | << " VolatileStorageSize:" << VolatileStorageSize |
1061 | << " VolatileStorageOffset:" << VolatileStorageOffset.getQuantity() << ">" ; |
1062 | } |
1063 | |
1064 | LLVM_DUMP_METHOD void CGBitFieldInfo::dump() const { |
1065 | print(OS&: llvm::errs()); |
1066 | } |
1067 | |