1//===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// These classes wrap the information about a call or function
10// definition used to handle ABI compliancy.
11//
12//===----------------------------------------------------------------------===//
13
14#include "TargetInfo.h"
15#include "ABIInfo.h"
16#include "CGBlocks.h"
17#include "CGCXXABI.h"
18#include "CGValue.h"
19#include "CodeGenFunction.h"
20#include "clang/AST/Attr.h"
21#include "clang/AST/RecordLayout.h"
22#include "clang/Basic/CodeGenOptions.h"
23#include "clang/Basic/DiagnosticFrontend.h"
24#include "clang/Basic/Builtins.h"
25#include "clang/CodeGen/CGFunctionInfo.h"
26#include "clang/CodeGen/SwiftCallingConv.h"
27#include "llvm/ADT/SmallBitVector.h"
28#include "llvm/ADT/StringExtras.h"
29#include "llvm/ADT/StringSwitch.h"
30#include "llvm/ADT/Triple.h"
31#include "llvm/ADT/Twine.h"
32#include "llvm/IR/DataLayout.h"
33#include "llvm/IR/IntrinsicsNVPTX.h"
34#include "llvm/IR/IntrinsicsS390.h"
35#include "llvm/IR/Type.h"
36#include "llvm/Support/raw_ostream.h"
37#include <algorithm> // std::sort
38
39using namespace clang;
40using namespace CodeGen;
41
42// Helper for coercing an aggregate argument or return value into an integer
43// array of the same size (including padding) and alignment. This alternate
44// coercion happens only for the RenderScript ABI and can be removed after
45// runtimes that rely on it are no longer supported.
46//
47// RenderScript assumes that the size of the argument / return value in the IR
48// is the same as the size of the corresponding qualified type. This helper
49// coerces the aggregate type into an array of the same size (including
50// padding). This coercion is used in lieu of expansion of struct members or
51// other canonical coercions that return a coerced-type of larger size.
52//
53// Ty - The argument / return value type
54// Context - The associated ASTContext
55// LLVMContext - The associated LLVMContext
56static ABIArgInfo coerceToIntArray(QualType Ty,
57 ASTContext &Context,
58 llvm::LLVMContext &LLVMContext) {
59 // Alignment and Size are measured in bits.
60 const uint64_t Size = Context.getTypeSize(Ty);
61 const uint64_t Alignment = Context.getTypeAlign(Ty);
62 llvm::Type *IntType = llvm::Type::getIntNTy(LLVMContext, Alignment);
63 const uint64_t NumElements = (Size + Alignment - 1) / Alignment;
64 return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements));
65}
66
67static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder,
68 llvm::Value *Array,
69 llvm::Value *Value,
70 unsigned FirstIndex,
71 unsigned LastIndex) {
72 // Alternatively, we could emit this as a loop in the source.
73 for (unsigned I = FirstIndex; I <= LastIndex; ++I) {
74 llvm::Value *Cell =
75 Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array, I);
76 Builder.CreateAlignedStore(Value, Cell, CharUnits::One());
77 }
78}
79
80static bool isAggregateTypeForABI(QualType T) {
81 return !CodeGenFunction::hasScalarEvaluationKind(T) ||
82 T->isMemberFunctionPointerType();
83}
84
85ABIArgInfo ABIInfo::getNaturalAlignIndirect(QualType Ty, bool ByVal,
86 bool Realign,
87 llvm::Type *Padding) const {
88 return ABIArgInfo::getIndirect(getContext().getTypeAlignInChars(Ty), ByVal,
89 Realign, Padding);
90}
91
92ABIArgInfo
93ABIInfo::getNaturalAlignIndirectInReg(QualType Ty, bool Realign) const {
94 return ABIArgInfo::getIndirectInReg(getContext().getTypeAlignInChars(Ty),
95 /*ByVal*/ false, Realign);
96}
97
98Address ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
99 QualType Ty) const {
100 return Address::invalid();
101}
102
103bool ABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const {
104 if (Ty->isPromotableIntegerType())
105 return true;
106
107 if (const auto *EIT = Ty->getAs<ExtIntType>())
108 if (EIT->getNumBits() < getContext().getTypeSize(getContext().IntTy))
109 return true;
110
111 return false;
112}
113
114ABIInfo::~ABIInfo() {}
115
116/// Does the given lowering require more than the given number of
117/// registers when expanded?
118///
119/// This is intended to be the basis of a reasonable basic implementation
120/// of should{Pass,Return}IndirectlyForSwift.
121///
122/// For most targets, a limit of four total registers is reasonable; this
123/// limits the amount of code required in order to move around the value
124/// in case it wasn't produced immediately prior to the call by the caller
125/// (or wasn't produced in exactly the right registers) or isn't used
126/// immediately within the callee. But some targets may need to further
127/// limit the register count due to an inability to support that many
128/// return registers.
129static bool occupiesMoreThan(CodeGenTypes &cgt,
130 ArrayRef<llvm::Type*> scalarTypes,
131 unsigned maxAllRegisters) {
132 unsigned intCount = 0, fpCount = 0;
133 for (llvm::Type *type : scalarTypes) {
134 if (type->isPointerTy()) {
135 intCount++;
136 } else if (auto intTy = dyn_cast<llvm::IntegerType>(type)) {
137 auto ptrWidth = cgt.getTarget().getPointerWidth(0);
138 intCount += (intTy->getBitWidth() + ptrWidth - 1) / ptrWidth;
139 } else {
140 assert(type->isVectorTy() || type->isFloatingPointTy());
141 fpCount++;
142 }
143 }
144
145 return (intCount + fpCount > maxAllRegisters);
146}
147
148bool SwiftABIInfo::isLegalVectorTypeForSwift(CharUnits vectorSize,
149 llvm::Type *eltTy,
150 unsigned numElts) const {
151 // The default implementation of this assumes that the target guarantees
152 // 128-bit SIMD support but nothing more.
153 return (vectorSize.getQuantity() > 8 && vectorSize.getQuantity() <= 16);
154}
155
156static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT,
157 CGCXXABI &CXXABI) {
158 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
159 if (!RD) {
160 if (!RT->getDecl()->canPassInRegisters())
161 return CGCXXABI::RAA_Indirect;
162 return CGCXXABI::RAA_Default;
163 }
164 return CXXABI.getRecordArgABI(RD);
165}
166
167static CGCXXABI::RecordArgABI getRecordArgABI(QualType T,
168 CGCXXABI &CXXABI) {
169 const RecordType *RT = T->getAs<RecordType>();
170 if (!RT)
171 return CGCXXABI::RAA_Default;
172 return getRecordArgABI(RT, CXXABI);
173}
174
175static bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI,
176 const ABIInfo &Info) {
177 QualType Ty = FI.getReturnType();
178
179 if (const auto *RT = Ty->getAs<RecordType>())
180 if (!isa<CXXRecordDecl>(RT->getDecl()) &&
181 !RT->getDecl()->canPassInRegisters()) {
182 FI.getReturnInfo() = Info.getNaturalAlignIndirect(Ty);
183 return true;
184 }
185
186 return CXXABI.classifyReturnType(FI);
187}
188
189/// Pass transparent unions as if they were the type of the first element. Sema
190/// should ensure that all elements of the union have the same "machine type".
191static QualType useFirstFieldIfTransparentUnion(QualType Ty) {
192 if (const RecordType *UT = Ty->getAsUnionType()) {
193 const RecordDecl *UD = UT->getDecl();
194 if (UD->hasAttr<TransparentUnionAttr>()) {
195 assert(!UD->field_empty() && "sema created an empty transparent union");
196 return UD->field_begin()->getType();
197 }
198 }
199 return Ty;
200}
201
202CGCXXABI &ABIInfo::getCXXABI() const {
203 return CGT.getCXXABI();
204}
205
206ASTContext &ABIInfo::getContext() const {
207 return CGT.getContext();
208}
209
210llvm::LLVMContext &ABIInfo::getVMContext() const {
211 return CGT.getLLVMContext();
212}
213
214const llvm::DataLayout &ABIInfo::getDataLayout() const {
215 return CGT.getDataLayout();
216}
217
218const TargetInfo &ABIInfo::getTarget() const {
219 return CGT.getTarget();
220}
221
222const CodeGenOptions &ABIInfo::getCodeGenOpts() const {
223 return CGT.getCodeGenOpts();
224}
225
226bool ABIInfo::isAndroid() const { return getTarget().getTriple().isAndroid(); }
227
228bool ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
229 return false;
230}
231
232bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
233 uint64_t Members) const {
234 return false;
235}
236
237LLVM_DUMP_METHOD void ABIArgInfo::dump() const {
238 raw_ostream &OS = llvm::errs();
239 OS << "(ABIArgInfo Kind=";
240 switch (TheKind) {
241 case Direct:
242 OS << "Direct Type=";
243 if (llvm::Type *Ty = getCoerceToType())
244 Ty->print(OS);
245 else
246 OS << "null";
247 break;
248 case Extend:
249 OS << "Extend";
250 break;
251 case Ignore:
252 OS << "Ignore";
253 break;
254 case InAlloca:
255 OS << "InAlloca Offset=" << getInAllocaFieldIndex();
256 break;
257 case Indirect:
258 OS << "Indirect Align=" << getIndirectAlign().getQuantity()
259 << " ByVal=" << getIndirectByVal()
260 << " Realign=" << getIndirectRealign();
261 break;
262 case IndirectAliased:
263 OS << "Indirect Align=" << getIndirectAlign().getQuantity()
264 << " AadrSpace=" << getIndirectAddrSpace()
265 << " Realign=" << getIndirectRealign();
266 break;
267 case Expand:
268 OS << "Expand";
269 break;
270 case CoerceAndExpand:
271 OS << "CoerceAndExpand Type=";
272 getCoerceAndExpandType()->print(OS);
273 break;
274 }
275 OS << ")\n";
276}
277
278// Dynamically round a pointer up to a multiple of the given alignment.
279static llvm::Value *emitRoundPointerUpToAlignment(CodeGenFunction &CGF,
280 llvm::Value *Ptr,
281 CharUnits Align) {
282 llvm::Value *PtrAsInt = Ptr;
283 // OverflowArgArea = (OverflowArgArea + Align - 1) & -Align;
284 PtrAsInt = CGF.Builder.CreatePtrToInt(PtrAsInt, CGF.IntPtrTy);
285 PtrAsInt = CGF.Builder.CreateAdd(PtrAsInt,
286 llvm::ConstantInt::get(CGF.IntPtrTy, Align.getQuantity() - 1));
287 PtrAsInt = CGF.Builder.CreateAnd(PtrAsInt,
288 llvm::ConstantInt::get(CGF.IntPtrTy, -Align.getQuantity()));
289 PtrAsInt = CGF.Builder.CreateIntToPtr(PtrAsInt,
290 Ptr->getType(),
291 Ptr->getName() + ".aligned");
292 return PtrAsInt;
293}
294
295/// Emit va_arg for a platform using the common void* representation,
296/// where arguments are simply emitted in an array of slots on the stack.
297///
298/// This version implements the core direct-value passing rules.
299///
300/// \param SlotSize - The size and alignment of a stack slot.
301/// Each argument will be allocated to a multiple of this number of
302/// slots, and all the slots will be aligned to this value.
303/// \param AllowHigherAlign - The slot alignment is not a cap;
304/// an argument type with an alignment greater than the slot size
305/// will be emitted on a higher-alignment address, potentially
306/// leaving one or more empty slots behind as padding. If this
307/// is false, the returned address might be less-aligned than
308/// DirectAlign.
309static Address emitVoidPtrDirectVAArg(CodeGenFunction &CGF,
310 Address VAListAddr,
311 llvm::Type *DirectTy,
312 CharUnits DirectSize,
313 CharUnits DirectAlign,
314 CharUnits SlotSize,
315 bool AllowHigherAlign) {
316 // Cast the element type to i8* if necessary. Some platforms define
317 // va_list as a struct containing an i8* instead of just an i8*.
318 if (VAListAddr.getElementType() != CGF.Int8PtrTy)
319 VAListAddr = CGF.Builder.CreateElementBitCast(VAListAddr, CGF.Int8PtrTy);
320
321 llvm::Value *Ptr = CGF.Builder.CreateLoad(VAListAddr, "argp.cur");
322
323 // If the CC aligns values higher than the slot size, do so if needed.
324 Address Addr = Address::invalid();
325 if (AllowHigherAlign && DirectAlign > SlotSize) {
326 Addr = Address(emitRoundPointerUpToAlignment(CGF, Ptr, DirectAlign),
327 DirectAlign);
328 } else {
329 Addr = Address(Ptr, SlotSize);
330 }
331
332 // Advance the pointer past the argument, then store that back.
333 CharUnits FullDirectSize = DirectSize.alignTo(SlotSize);
334 Address NextPtr =
335 CGF.Builder.CreateConstInBoundsByteGEP(Addr, FullDirectSize, "argp.next");
336 CGF.Builder.CreateStore(NextPtr.getPointer(), VAListAddr);
337
338 // If the argument is smaller than a slot, and this is a big-endian
339 // target, the argument will be right-adjusted in its slot.
340 if (DirectSize < SlotSize && CGF.CGM.getDataLayout().isBigEndian() &&
341 !DirectTy->isStructTy()) {
342 Addr = CGF.Builder.CreateConstInBoundsByteGEP(Addr, SlotSize - DirectSize);
343 }
344
345 Addr = CGF.Builder.CreateElementBitCast(Addr, DirectTy);
346 return Addr;
347}
348
349/// Emit va_arg for a platform using the common void* representation,
350/// where arguments are simply emitted in an array of slots on the stack.
351///
352/// \param IsIndirect - Values of this type are passed indirectly.
353/// \param ValueInfo - The size and alignment of this type, generally
354/// computed with getContext().getTypeInfoInChars(ValueTy).
355/// \param SlotSizeAndAlign - The size and alignment of a stack slot.
356/// Each argument will be allocated to a multiple of this number of
357/// slots, and all the slots will be aligned to this value.
358/// \param AllowHigherAlign - The slot alignment is not a cap;
359/// an argument type with an alignment greater than the slot size
360/// will be emitted on a higher-alignment address, potentially
361/// leaving one or more empty slots behind as padding.
362static Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr,
363 QualType ValueTy, bool IsIndirect,
364 TypeInfoChars ValueInfo,
365 CharUnits SlotSizeAndAlign,
366 bool AllowHigherAlign) {
367 // The size and alignment of the value that was passed directly.
368 CharUnits DirectSize, DirectAlign;
369 if (IsIndirect) {
370 DirectSize = CGF.getPointerSize();
371 DirectAlign = CGF.getPointerAlign();
372 } else {
373 DirectSize = ValueInfo.Width;
374 DirectAlign = ValueInfo.Align;
375 }
376
377 // Cast the address we've calculated to the right type.
378 llvm::Type *DirectTy = CGF.ConvertTypeForMem(ValueTy);
379 if (IsIndirect)
380 DirectTy = DirectTy->getPointerTo(0);
381
382 Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, DirectTy,
383 DirectSize, DirectAlign,
384 SlotSizeAndAlign,
385 AllowHigherAlign);
386
387 if (IsIndirect) {
388 Addr = Address(CGF.Builder.CreateLoad(Addr), ValueInfo.Align);
389 }
390
391 return Addr;
392
393}
394
395static Address emitMergePHI(CodeGenFunction &CGF,
396 Address Addr1, llvm::BasicBlock *Block1,
397 Address Addr2, llvm::BasicBlock *Block2,
398 const llvm::Twine &Name = "") {
399 assert(Addr1.getType() == Addr2.getType());
400 llvm::PHINode *PHI = CGF.Builder.CreatePHI(Addr1.getType(), 2, Name);
401 PHI->addIncoming(Addr1.getPointer(), Block1);
402 PHI->addIncoming(Addr2.getPointer(), Block2);
403 CharUnits Align = std::min(Addr1.getAlignment(), Addr2.getAlignment());
404 return Address(PHI, Align);
405}
406
407TargetCodeGenInfo::~TargetCodeGenInfo() = default;
408
409// If someone can figure out a general rule for this, that would be great.
410// It's probably just doomed to be platform-dependent, though.
411unsigned TargetCodeGenInfo::getSizeOfUnwindException() const {
412 // Verified for:
413 // x86-64 FreeBSD, Linux, Darwin
414 // x86-32 FreeBSD, Linux, Darwin
415 // PowerPC Linux, Darwin
416 // ARM Darwin (*not* EABI)
417 // AArch64 Linux
418 return 32;
419}
420
421bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args,
422 const FunctionNoProtoType *fnType) const {
423 // The following conventions are known to require this to be false:
424 // x86_stdcall
425 // MIPS
426 // For everything else, we just prefer false unless we opt out.
427 return false;
428}
429
430void
431TargetCodeGenInfo::getDependentLibraryOption(llvm::StringRef Lib,
432 llvm::SmallString<24> &Opt) const {
433 // This assumes the user is passing a library name like "rt" instead of a
434 // filename like "librt.a/so", and that they don't care whether it's static or
435 // dynamic.
436 Opt = "-l";
437 Opt += Lib;
438}
439
440unsigned TargetCodeGenInfo::getOpenCLKernelCallingConv() const {
441 // OpenCL kernels are called via an explicit runtime API with arguments
442 // set with clSetKernelArg(), not as normal sub-functions.
443 // Return SPIR_KERNEL by default as the kernel calling convention to
444 // ensure the fingerprint is fixed such way that each OpenCL argument
445 // gets one matching argument in the produced kernel function argument
446 // list to enable feasible implementation of clSetKernelArg() with
447 // aggregates etc. In case we would use the default C calling conv here,
448 // clSetKernelArg() might break depending on the target-specific
449 // conventions; different targets might split structs passed as values
450 // to multiple function arguments etc.
451 return llvm::CallingConv::SPIR_KERNEL;
452}
453
454llvm::Constant *TargetCodeGenInfo::getNullPointer(const CodeGen::CodeGenModule &CGM,
455 llvm::PointerType *T, QualType QT) const {
456 return llvm::ConstantPointerNull::get(T);
457}
458
459LangAS TargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,
460 const VarDecl *D) const {
461 assert(!CGM.getLangOpts().OpenCL &&
462 !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) &&
463 "Address space agnostic languages only");
464 return D ? D->getType().getAddressSpace() : LangAS::Default;
465}
466
467llvm::Value *TargetCodeGenInfo::performAddrSpaceCast(
468 CodeGen::CodeGenFunction &CGF, llvm::Value *Src, LangAS SrcAddr,
469 LangAS DestAddr, llvm::Type *DestTy, bool isNonNull) const {
470 // Since target may map different address spaces in AST to the same address
471 // space, an address space conversion may end up as a bitcast.
472 if (auto *C = dyn_cast<llvm::Constant>(Src))
473 return performAddrSpaceCast(CGF.CGM, C, SrcAddr, DestAddr, DestTy);
474 // Try to preserve the source's name to make IR more readable.
475 return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
476 Src, DestTy, Src->hasName() ? Src->getName() + ".ascast" : "");
477}
478
479llvm::Constant *
480TargetCodeGenInfo::performAddrSpaceCast(CodeGenModule &CGM, llvm::Constant *Src,
481 LangAS SrcAddr, LangAS DestAddr,
482 llvm::Type *DestTy) const {
483 // Since target may map different address spaces in AST to the same address
484 // space, an address space conversion may end up as a bitcast.
485 return llvm::ConstantExpr::getPointerCast(Src, DestTy);
486}
487
488llvm::SyncScope::ID
489TargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &LangOpts,
490 SyncScope Scope,
491 llvm::AtomicOrdering Ordering,
492 llvm::LLVMContext &Ctx) const {
493 return Ctx.getOrInsertSyncScopeID(""); /* default sync scope */
494}
495
496static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
497
498/// isEmptyField - Return true iff a the field is "empty", that is it
499/// is an unnamed bit-field or an (array of) empty record(s).
500static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
501 bool AllowArrays) {
502 if (FD->isUnnamedBitfield())
503 return true;
504
505 QualType FT = FD->getType();
506
507 // Constant arrays of empty records count as empty, strip them off.
508 // Constant arrays of zero length always count as empty.
509 bool WasArray = false;
510 if (AllowArrays)
511 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
512 if (AT->getSize() == 0)
513 return true;
514 FT = AT->getElementType();
515 // The [[no_unique_address]] special case below does not apply to
516 // arrays of C++ empty records, so we need to remember this fact.
517 WasArray = true;
518 }
519
520 const RecordType *RT = FT->getAs<RecordType>();
521 if (!RT)
522 return false;
523
524 // C++ record fields are never empty, at least in the Itanium ABI.
525 //
526 // FIXME: We should use a predicate for whether this behavior is true in the
527 // current ABI.
528 //
529 // The exception to the above rule are fields marked with the
530 // [[no_unique_address]] attribute (since C++20). Those do count as empty
531 // according to the Itanium ABI. The exception applies only to records,
532 // not arrays of records, so we must also check whether we stripped off an
533 // array type above.
534 if (isa<CXXRecordDecl>(RT->getDecl()) &&
535 (WasArray || !FD->hasAttr<NoUniqueAddressAttr>()))
536 return false;
537
538 return isEmptyRecord(Context, FT, AllowArrays);
539}
540
541/// isEmptyRecord - Return true iff a structure contains only empty
542/// fields. Note that a structure with a flexible array member is not
543/// considered empty.
544static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
545 const RecordType *RT = T->getAs<RecordType>();
546 if (!RT)
547 return false;
548 const RecordDecl *RD = RT->getDecl();
549 if (RD->hasFlexibleArrayMember())
550 return false;
551
552 // If this is a C++ record, check the bases first.
553 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
554 for (const auto &I : CXXRD->bases())
555 if (!isEmptyRecord(Context, I.getType(), true))
556 return false;
557
558 for (const auto *I : RD->fields())
559 if (!isEmptyField(Context, I, AllowArrays))
560 return false;
561 return true;
562}
563
564/// isSingleElementStruct - Determine if a structure is a "single
565/// element struct", i.e. it has exactly one non-empty field or
566/// exactly one field which is itself a single element
567/// struct. Structures with flexible array members are never
568/// considered single element structs.
569///
570/// \return The field declaration for the single non-empty field, if
571/// it exists.
572static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
573 const RecordType *RT = T->getAs<RecordType>();
574 if (!RT)
575 return nullptr;
576
577 const RecordDecl *RD = RT->getDecl();
578 if (RD->hasFlexibleArrayMember())
579 return nullptr;
580
581 const Type *Found = nullptr;
582
583 // If this is a C++ record, check the bases first.
584 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
585 for (const auto &I : CXXRD->bases()) {
586 // Ignore empty records.
587 if (isEmptyRecord(Context, I.getType(), true))
588 continue;
589
590 // If we already found an element then this isn't a single-element struct.
591 if (Found)
592 return nullptr;
593
594 // If this is non-empty and not a single element struct, the composite
595 // cannot be a single element struct.
596 Found = isSingleElementStruct(I.getType(), Context);
597 if (!Found)
598 return nullptr;
599 }
600 }
601
602 // Check for single element.
603 for (const auto *FD : RD->fields()) {
604 QualType FT = FD->getType();
605
606 // Ignore empty fields.
607 if (isEmptyField(Context, FD, true))
608 continue;
609
610 // If we already found an element then this isn't a single-element
611 // struct.
612 if (Found)
613 return nullptr;
614
615 // Treat single element arrays as the element.
616 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
617 if (AT->getSize().getZExtValue() != 1)
618 break;
619 FT = AT->getElementType();
620 }
621
622 if (!isAggregateTypeForABI(FT)) {
623 Found = FT.getTypePtr();
624 } else {
625 Found = isSingleElementStruct(FT, Context);
626 if (!Found)
627 return nullptr;
628 }
629 }
630
631 // We don't consider a struct a single-element struct if it has
632 // padding beyond the element type.
633 if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T))
634 return nullptr;
635
636 return Found;
637}
638
639namespace {
640Address EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
641 const ABIArgInfo &AI) {
642 // This default implementation defers to the llvm backend's va_arg
643 // instruction. It can handle only passing arguments directly
644 // (typically only handled in the backend for primitive types), or
645 // aggregates passed indirectly by pointer (NOTE: if the "byval"
646 // flag has ABI impact in the callee, this implementation cannot
647 // work.)
648
649 // Only a few cases are covered here at the moment -- those needed
650 // by the default abi.
651 llvm::Value *Val;
652
653 if (AI.isIndirect()) {
654 assert(!AI.getPaddingType() &&
655 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
656 assert(
657 !AI.getIndirectRealign() &&
658 "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!");
659
660 auto TyInfo = CGF.getContext().getTypeInfoInChars(Ty);
661 CharUnits TyAlignForABI = TyInfo.Align;
662
663 llvm::Type *BaseTy =
664 llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
665 llvm::Value *Addr =
666 CGF.Builder.CreateVAArg(VAListAddr.getPointer(), BaseTy);
667 return Address(Addr, TyAlignForABI);
668 } else {
669 assert((AI.isDirect() || AI.isExtend()) &&
670 "Unexpected ArgInfo Kind in generic VAArg emitter!");
671
672 assert(!AI.getInReg() &&
673 "Unexpected InReg seen in arginfo in generic VAArg emitter!");
674 assert(!AI.getPaddingType() &&
675 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
676 assert(!AI.getDirectOffset() &&
677 "Unexpected DirectOffset seen in arginfo in generic VAArg emitter!");
678 assert(!AI.getCoerceToType() &&
679 "Unexpected CoerceToType seen in arginfo in generic VAArg emitter!");
680
681 Address Temp = CGF.CreateMemTemp(Ty, "varet");
682 Val = CGF.Builder.CreateVAArg(VAListAddr.getPointer(), CGF.ConvertType(Ty));
683 CGF.Builder.CreateStore(Val, Temp);
684 return Temp;
685 }
686}
687
688/// DefaultABIInfo - The default implementation for ABI specific
689/// details. This implementation provides information which results in
690/// self-consistent and sensible LLVM IR generation, but does not
691/// conform to any particular ABI.
692class DefaultABIInfo : public ABIInfo {
693public:
694 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
695
696 ABIArgInfo classifyReturnType(QualType RetTy) const;
697 ABIArgInfo classifyArgumentType(QualType RetTy) const;
698
699 void computeInfo(CGFunctionInfo &FI) const override {
700 if (!getCXXABI().classifyReturnType(FI))
701 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
702 for (auto &I : FI.arguments())
703 I.info = classifyArgumentType(I.type);
704 }
705
706 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
707 QualType Ty) const override {
708 return EmitVAArgInstr(CGF, VAListAddr, Ty, classifyArgumentType(Ty));
709 }
710};
711
712class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
713public:
714 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
715 : TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(CGT)) {}
716};
717
718ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
719 Ty = useFirstFieldIfTransparentUnion(Ty);
720
721 if (isAggregateTypeForABI(Ty)) {
722 // Records with non-trivial destructors/copy-constructors should not be
723 // passed by value.
724 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
725 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
726
727 return getNaturalAlignIndirect(Ty);
728 }
729
730 // Treat an enum type as its underlying type.
731 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
732 Ty = EnumTy->getDecl()->getIntegerType();
733
734 ASTContext &Context = getContext();
735 if (const auto *EIT = Ty->getAs<ExtIntType>())
736 if (EIT->getNumBits() >
737 Context.getTypeSize(Context.getTargetInfo().hasInt128Type()
738 ? Context.Int128Ty
739 : Context.LongLongTy))
740 return getNaturalAlignIndirect(Ty);
741
742 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
743 : ABIArgInfo::getDirect());
744}
745
746ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
747 if (RetTy->isVoidType())
748 return ABIArgInfo::getIgnore();
749
750 if (isAggregateTypeForABI(RetTy))
751 return getNaturalAlignIndirect(RetTy);
752
753 // Treat an enum type as its underlying type.
754 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
755 RetTy = EnumTy->getDecl()->getIntegerType();
756
757 if (const auto *EIT = RetTy->getAs<ExtIntType>())
758 if (EIT->getNumBits() >
759 getContext().getTypeSize(getContext().getTargetInfo().hasInt128Type()
760 ? getContext().Int128Ty
761 : getContext().LongLongTy))
762 return getNaturalAlignIndirect(RetTy);
763
764 return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
765 : ABIArgInfo::getDirect());
766}
767
768//===----------------------------------------------------------------------===//
769// WebAssembly ABI Implementation
770//
771// This is a very simple ABI that relies a lot on DefaultABIInfo.
772//===----------------------------------------------------------------------===//
773
774class WebAssemblyABIInfo final : public SwiftABIInfo {
775public:
776 enum ABIKind {
777 MVP = 0,
778 ExperimentalMV = 1,
779 };
780
781private:
782 DefaultABIInfo defaultInfo;
783 ABIKind Kind;
784
785public:
786 explicit WebAssemblyABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind)
787 : SwiftABIInfo(CGT), defaultInfo(CGT), Kind(Kind) {}
788
789private:
790 ABIArgInfo classifyReturnType(QualType RetTy) const;
791 ABIArgInfo classifyArgumentType(QualType Ty) const;
792
793 // DefaultABIInfo's classifyReturnType and classifyArgumentType are
794 // non-virtual, but computeInfo and EmitVAArg are virtual, so we
795 // overload them.
796 void computeInfo(CGFunctionInfo &FI) const override {
797 if (!getCXXABI().classifyReturnType(FI))
798 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
799 for (auto &Arg : FI.arguments())
800 Arg.info = classifyArgumentType(Arg.type);
801 }
802
803 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
804 QualType Ty) const override;
805
806 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
807 bool asReturnValue) const override {
808 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
809 }
810
811 bool isSwiftErrorInRegister() const override {
812 return false;
813 }
814};
815
816class WebAssemblyTargetCodeGenInfo final : public TargetCodeGenInfo {
817public:
818 explicit WebAssemblyTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
819 WebAssemblyABIInfo::ABIKind K)
820 : TargetCodeGenInfo(std::make_unique<WebAssemblyABIInfo>(CGT, K)) {}
821
822 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
823 CodeGen::CodeGenModule &CGM) const override {
824 TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
825 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
826 if (const auto *Attr = FD->getAttr<WebAssemblyImportModuleAttr>()) {
827 llvm::Function *Fn = cast<llvm::Function>(GV);
828 llvm::AttrBuilder B;
829 B.addAttribute("wasm-import-module", Attr->getImportModule());
830 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
831 }
832 if (const auto *Attr = FD->getAttr<WebAssemblyImportNameAttr>()) {
833 llvm::Function *Fn = cast<llvm::Function>(GV);
834 llvm::AttrBuilder B;
835 B.addAttribute("wasm-import-name", Attr->getImportName());
836 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
837 }
838 if (const auto *Attr = FD->getAttr<WebAssemblyExportNameAttr>()) {
839 llvm::Function *Fn = cast<llvm::Function>(GV);
840 llvm::AttrBuilder B;
841 B.addAttribute("wasm-export-name", Attr->getExportName());
842 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
843 }
844 }
845
846 if (auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
847 llvm::Function *Fn = cast<llvm::Function>(GV);
848 if (!FD->doesThisDeclarationHaveABody() && !FD->hasPrototype())
849 Fn->addFnAttr("no-prototype");
850 }
851 }
852};
853
854/// Classify argument of given type \p Ty.
855ABIArgInfo WebAssemblyABIInfo::classifyArgumentType(QualType Ty) const {
856 Ty = useFirstFieldIfTransparentUnion(Ty);
857
858 if (isAggregateTypeForABI(Ty)) {
859 // Records with non-trivial destructors/copy-constructors should not be
860 // passed by value.
861 if (auto RAA = getRecordArgABI(Ty, getCXXABI()))
862 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
863 // Ignore empty structs/unions.
864 if (isEmptyRecord(getContext(), Ty, true))
865 return ABIArgInfo::getIgnore();
866 // Lower single-element structs to just pass a regular value. TODO: We
867 // could do reasonable-size multiple-element structs too, using getExpand(),
868 // though watch out for things like bitfields.
869 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
870 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
871 // For the experimental multivalue ABI, fully expand all other aggregates
872 if (Kind == ABIKind::ExperimentalMV) {
873 const RecordType *RT = Ty->getAs<RecordType>();
874 assert(RT);
875 bool HasBitField = false;
876 for (auto *Field : RT->getDecl()->fields()) {
877 if (Field->isBitField()) {
878 HasBitField = true;
879 break;
880 }
881 }
882 if (!HasBitField)
883 return ABIArgInfo::getExpand();
884 }
885 }
886
887 // Otherwise just do the default thing.
888 return defaultInfo.classifyArgumentType(Ty);
889}
890
891ABIArgInfo WebAssemblyABIInfo::classifyReturnType(QualType RetTy) const {
892 if (isAggregateTypeForABI(RetTy)) {
893 // Records with non-trivial destructors/copy-constructors should not be
894 // returned by value.
895 if (!getRecordArgABI(RetTy, getCXXABI())) {
896 // Ignore empty structs/unions.
897 if (isEmptyRecord(getContext(), RetTy, true))
898 return ABIArgInfo::getIgnore();
899 // Lower single-element structs to just return a regular value. TODO: We
900 // could do reasonable-size multiple-element structs too, using
901 // ABIArgInfo::getDirect().
902 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
903 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
904 // For the experimental multivalue ABI, return all other aggregates
905 if (Kind == ABIKind::ExperimentalMV)
906 return ABIArgInfo::getDirect();
907 }
908 }
909
910 // Otherwise just do the default thing.
911 return defaultInfo.classifyReturnType(RetTy);
912}
913
914Address WebAssemblyABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
915 QualType Ty) const {
916 bool IsIndirect = isAggregateTypeForABI(Ty) &&
917 !isEmptyRecord(getContext(), Ty, true) &&
918 !isSingleElementStruct(Ty, getContext());
919 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
920 getContext().getTypeInfoInChars(Ty),
921 CharUnits::fromQuantity(4),
922 /*AllowHigherAlign=*/true);
923}
924
925//===----------------------------------------------------------------------===//
926// le32/PNaCl bitcode ABI Implementation
927//
928// This is a simplified version of the x86_32 ABI. Arguments and return values
929// are always passed on the stack.
930//===----------------------------------------------------------------------===//
931
932class PNaClABIInfo : public ABIInfo {
933 public:
934 PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
935
936 ABIArgInfo classifyReturnType(QualType RetTy) const;
937 ABIArgInfo classifyArgumentType(QualType RetTy) const;
938
939 void computeInfo(CGFunctionInfo &FI) const override;
940 Address EmitVAArg(CodeGenFunction &CGF,
941 Address VAListAddr, QualType Ty) const override;
942};
943
944class PNaClTargetCodeGenInfo : public TargetCodeGenInfo {
945 public:
946 PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
947 : TargetCodeGenInfo(std::make_unique<PNaClABIInfo>(CGT)) {}
948};
949
950void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const {
951 if (!getCXXABI().classifyReturnType(FI))
952 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
953
954 for (auto &I : FI.arguments())
955 I.info = classifyArgumentType(I.type);
956}
957
958Address PNaClABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
959 QualType Ty) const {
960 // The PNaCL ABI is a bit odd, in that varargs don't use normal
961 // function classification. Structs get passed directly for varargs
962 // functions, through a rewriting transform in
963 // pnacl-llvm/lib/Transforms/NaCl/ExpandVarArgs.cpp, which allows
964 // this target to actually support a va_arg instructions with an
965 // aggregate type, unlike other targets.
966 return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect());
967}
968
969/// Classify argument of given type \p Ty.
970ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const {
971 if (isAggregateTypeForABI(Ty)) {
972 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
973 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
974 return getNaturalAlignIndirect(Ty);
975 } else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
976 // Treat an enum type as its underlying type.
977 Ty = EnumTy->getDecl()->getIntegerType();
978 } else if (Ty->isFloatingType()) {
979 // Floating-point types don't go inreg.
980 return ABIArgInfo::getDirect();
981 } else if (const auto *EIT = Ty->getAs<ExtIntType>()) {
982 // Treat extended integers as integers if <=64, otherwise pass indirectly.
983 if (EIT->getNumBits() > 64)
984 return getNaturalAlignIndirect(Ty);
985 return ABIArgInfo::getDirect();
986 }
987
988 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
989 : ABIArgInfo::getDirect());
990}
991
992ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const {
993 if (RetTy->isVoidType())
994 return ABIArgInfo::getIgnore();
995
996 // In the PNaCl ABI we always return records/structures on the stack.
997 if (isAggregateTypeForABI(RetTy))
998 return getNaturalAlignIndirect(RetTy);
999
1000 // Treat extended integers as integers if <=64, otherwise pass indirectly.
1001 if (const auto *EIT = RetTy->getAs<ExtIntType>()) {
1002 if (EIT->getNumBits() > 64)
1003 return getNaturalAlignIndirect(RetTy);
1004 return ABIArgInfo::getDirect();
1005 }
1006
1007 // Treat an enum type as its underlying type.
1008 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
1009 RetTy = EnumTy->getDecl()->getIntegerType();
1010
1011 return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
1012 : ABIArgInfo::getDirect());
1013}
1014
1015/// IsX86_MMXType - Return true if this is an MMX type.
1016bool IsX86_MMXType(llvm::Type *IRType) {
1017 // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>.
1018 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
1019 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
1020 IRType->getScalarSizeInBits() != 64;
1021}
1022
1023static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
1024 StringRef Constraint,
1025 llvm::Type* Ty) {
1026 bool IsMMXCons = llvm::StringSwitch<bool>(Constraint)
1027 .Cases("y", "&y", "^Ym", true)
1028 .Default(false);
1029 if (IsMMXCons && Ty->isVectorTy()) {
1030 if (cast<llvm::VectorType>(Ty)->getPrimitiveSizeInBits().getFixedSize() !=
1031 64) {
1032 // Invalid MMX constraint
1033 return nullptr;
1034 }
1035
1036 return llvm::Type::getX86_MMXTy(CGF.getLLVMContext());
1037 }
1038
1039 // No operation needed
1040 return Ty;
1041}
1042
1043/// Returns true if this type can be passed in SSE registers with the
1044/// X86_VectorCall calling convention. Shared between x86_32 and x86_64.
1045static bool isX86VectorTypeForVectorCall(ASTContext &Context, QualType Ty) {
1046 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
1047 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half) {
1048 if (BT->getKind() == BuiltinType::LongDouble) {
1049 if (&Context.getTargetInfo().getLongDoubleFormat() ==
1050 &llvm::APFloat::x87DoubleExtended())
1051 return false;
1052 }
1053 return true;
1054 }
1055 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
1056 // vectorcall can pass XMM, YMM, and ZMM vectors. We don't pass SSE1 MMX
1057 // registers specially.
1058 unsigned VecSize = Context.getTypeSize(VT);
1059 if (VecSize == 128 || VecSize == 256 || VecSize == 512)
1060 return true;
1061 }
1062 return false;
1063}
1064
1065/// Returns true if this aggregate is small enough to be passed in SSE registers
1066/// in the X86_VectorCall calling convention. Shared between x86_32 and x86_64.
1067static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {
1068 return NumMembers <= 4;
1069}
1070
1071/// Returns a Homogeneous Vector Aggregate ABIArgInfo, used in X86.
1072static ABIArgInfo getDirectX86Hva(llvm::Type* T = nullptr) {
1073 auto AI = ABIArgInfo::getDirect(T);
1074 AI.setInReg(true);
1075 AI.setCanBeFlattened(false);
1076 return AI;
1077}
1078
1079//===----------------------------------------------------------------------===//
1080// X86-32 ABI Implementation
1081//===----------------------------------------------------------------------===//
1082
1083/// Similar to llvm::CCState, but for Clang.
1084struct CCState {
1085 CCState(CGFunctionInfo &FI)
1086 : IsPreassigned(FI.arg_size()), CC(FI.getCallingConvention()) {}
1087
1088 llvm::SmallBitVector IsPreassigned;
1089 unsigned CC = CallingConv::CC_C;
1090 unsigned FreeRegs = 0;
1091 unsigned FreeSSERegs = 0;
1092};
1093
1094/// X86_32ABIInfo - The X86-32 ABI information.
1095class X86_32ABIInfo : public SwiftABIInfo {
1096 enum Class {
1097 Integer,
1098 Float
1099 };
1100
1101 static const unsigned MinABIStackAlignInBytes = 4;
1102
1103 bool IsDarwinVectorABI;
1104 bool IsRetSmallStructInRegABI;
1105 bool IsWin32StructABI;
1106 bool IsSoftFloatABI;
1107 bool IsMCUABI;
1108 bool IsLinuxABI;
1109 unsigned DefaultNumRegisterParameters;
1110
1111 static bool isRegisterSize(unsigned Size) {
1112 return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
1113 }
1114
1115 bool isHomogeneousAggregateBaseType(QualType Ty) const override {
1116 // FIXME: Assumes vectorcall is in use.
1117 return isX86VectorTypeForVectorCall(getContext(), Ty);
1118 }
1119
1120 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
1121 uint64_t NumMembers) const override {
1122 // FIXME: Assumes vectorcall is in use.
1123 return isX86VectorCallAggregateSmallEnough(NumMembers);
1124 }
1125
1126 bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context) const;
1127
1128 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
1129 /// such that the argument will be passed in memory.
1130 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
1131
1132 ABIArgInfo getIndirectReturnResult(QualType Ty, CCState &State) const;
1133
1134 /// Return the alignment to use for the given type on the stack.
1135 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
1136
1137 Class classify(QualType Ty) const;
1138 ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const;
1139 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
1140
1141 /// Updates the number of available free registers, returns
1142 /// true if any registers were allocated.
1143 bool updateFreeRegs(QualType Ty, CCState &State) const;
1144
1145 bool shouldAggregateUseDirect(QualType Ty, CCState &State, bool &InReg,
1146 bool &NeedsPadding) const;
1147 bool shouldPrimitiveUseInReg(QualType Ty, CCState &State) const;
1148
1149 bool canExpandIndirectArgument(QualType Ty) const;
1150
1151 /// Rewrite the function info so that all memory arguments use
1152 /// inalloca.
1153 void rewriteWithInAlloca(CGFunctionInfo &FI) const;
1154
1155 void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
1156 CharUnits &StackOffset, ABIArgInfo &Info,
1157 QualType Type) const;
1158 void runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State) const;
1159
1160public:
1161
1162 void computeInfo(CGFunctionInfo &FI) const override;
1163 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
1164 QualType Ty) const override;
1165
1166 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
1167 bool RetSmallStructInRegABI, bool Win32StructABI,
1168 unsigned NumRegisterParameters, bool SoftFloatABI)
1169 : SwiftABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI),
1170 IsRetSmallStructInRegABI(RetSmallStructInRegABI),
1171 IsWin32StructABI(Win32StructABI), IsSoftFloatABI(SoftFloatABI),
1172 IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()),
1173 IsLinuxABI(CGT.getTarget().getTriple().isOSLinux()),
1174 DefaultNumRegisterParameters(NumRegisterParameters) {}
1175
1176 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
1177 bool asReturnValue) const override {
1178 // LLVM's x86-32 lowering currently only assigns up to three
1179 // integer registers and three fp registers. Oddly, it'll use up to
1180 // four vector registers for vectors, but those can overlap with the
1181 // scalar registers.
1182 return occupiesMoreThan(CGT, scalars, /*total*/ 3);
1183 }
1184
1185 bool isSwiftErrorInRegister() const override {
1186 // x86-32 lowering does not support passing swifterror in a register.
1187 return false;
1188 }
1189};
1190
1191class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
1192public:
1193 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
1194 bool RetSmallStructInRegABI, bool Win32StructABI,
1195 unsigned NumRegisterParameters, bool SoftFloatABI)
1196 : TargetCodeGenInfo(std::make_unique<X86_32ABIInfo>(
1197 CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
1198 NumRegisterParameters, SoftFloatABI)) {}
1199
1200 static bool isStructReturnInRegABI(
1201 const llvm::Triple &Triple, const CodeGenOptions &Opts);
1202
1203 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
1204 CodeGen::CodeGenModule &CGM) const override;
1205
1206 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
1207 // Darwin uses different dwarf register numbers for EH.
1208 if (CGM.getTarget().getTriple().isOSDarwin()) return 5;
1209 return 4;
1210 }
1211
1212 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1213 llvm::Value *Address) const override;
1214
1215 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
1216 StringRef Constraint,
1217 llvm::Type* Ty) const override {
1218 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
1219 }
1220
1221 void addReturnRegisterOutputs(CodeGenFunction &CGF, LValue ReturnValue,
1222 std::string &Constraints,
1223 std::vector<llvm::Type *> &ResultRegTypes,
1224 std::vector<llvm::Type *> &ResultTruncRegTypes,
1225 std::vector<LValue> &ResultRegDests,
1226 std::string &AsmString,
1227 unsigned NumOutputs) const override;
1228
1229 llvm::Constant *
1230 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
1231 unsigned Sig = (0xeb << 0) | // jmp rel8
1232 (0x06 << 8) | // .+0x08
1233 ('v' << 16) |
1234 ('2' << 24);
1235 return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
1236 }
1237
1238 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
1239 return "movl\t%ebp, %ebp"
1240 "\t\t// marker for objc_retainAutoreleaseReturnValue";
1241 }
1242};
1243
1244}
1245
1246/// Rewrite input constraint references after adding some output constraints.
1247/// In the case where there is one output and one input and we add one output,
1248/// we need to replace all operand references greater than or equal to 1:
1249/// mov $0, $1
1250/// mov eax, $1
1251/// The result will be:
1252/// mov $0, $2
1253/// mov eax, $2
1254static void rewriteInputConstraintReferences(unsigned FirstIn,
1255 unsigned NumNewOuts,
1256 std::string &AsmString) {
1257 std::string Buf;
1258 llvm::raw_string_ostream OS(Buf);
1259 size_t Pos = 0;
1260 while (Pos < AsmString.size()) {
1261 size_t DollarStart = AsmString.find('$', Pos);
1262 if (DollarStart == std::string::npos)
1263 DollarStart = AsmString.size();
1264 size_t DollarEnd = AsmString.find_first_not_of('$', DollarStart);
1265 if (DollarEnd == std::string::npos)
1266 DollarEnd = AsmString.size();
1267 OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
1268 Pos = DollarEnd;
1269 size_t NumDollars = DollarEnd - DollarStart;
1270 if (NumDollars % 2 != 0 && Pos < AsmString.size()) {
1271 // We have an operand reference.
1272 size_t DigitStart = Pos;
1273 if (AsmString[DigitStart] == '{') {
1274 OS << '{';
1275 ++DigitStart;
1276 }
1277 size_t DigitEnd = AsmString.find_first_not_of("0123456789", DigitStart);
1278 if (DigitEnd == std::string::npos)
1279 DigitEnd = AsmString.size();
1280 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
1281 unsigned OperandIndex;
1282 if (!OperandStr.getAsInteger(10, OperandIndex)) {
1283 if (OperandIndex >= FirstIn)
1284 OperandIndex += NumNewOuts;
1285 OS << OperandIndex;
1286 } else {
1287 OS << OperandStr;
1288 }
1289 Pos = DigitEnd;
1290 }
1291 }
1292 AsmString = std::move(OS.str());
1293}
1294
1295/// Add output constraints for EAX:EDX because they are return registers.
1296void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
1297 CodeGenFunction &CGF, LValue ReturnSlot, std::string &Constraints,
1298 std::vector<llvm::Type *> &ResultRegTypes,
1299 std::vector<llvm::Type *> &ResultTruncRegTypes,
1300 std::vector<LValue> &ResultRegDests, std::string &AsmString,
1301 unsigned NumOutputs) const {
1302 uint64_t RetWidth = CGF.getContext().getTypeSize(ReturnSlot.getType());
1303
1304 // Use the EAX constraint if the width is 32 or smaller and EAX:EDX if it is
1305 // larger.
1306 if (!Constraints.empty())
1307 Constraints += ',';
1308 if (RetWidth <= 32) {
1309 Constraints += "={eax}";
1310 ResultRegTypes.push_back(CGF.Int32Ty);
1311 } else {
1312 // Use the 'A' constraint for EAX:EDX.
1313 Constraints += "=A";
1314 ResultRegTypes.push_back(CGF.Int64Ty);
1315 }
1316
1317 // Truncate EAX or EAX:EDX to an integer of the appropriate size.
1318 llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.getLLVMContext(), RetWidth);
1319 ResultTruncRegTypes.push_back(CoerceTy);
1320
1321 // Coerce the integer by bitcasting the return slot pointer.
1322 ReturnSlot.setAddress(CGF.Builder.CreateBitCast(ReturnSlot.getAddress(CGF),
1323 CoerceTy->getPointerTo()));
1324 ResultRegDests.push_back(ReturnSlot);
1325
1326 rewriteInputConstraintReferences(NumOutputs, 1, AsmString);
1327}
1328
1329/// shouldReturnTypeInRegister - Determine if the given type should be
1330/// returned in a register (for the Darwin and MCU ABI).
1331bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
1332 ASTContext &Context) const {
1333 uint64_t Size = Context.getTypeSize(Ty);
1334
1335 // For i386, type must be register sized.
1336 // For the MCU ABI, it only needs to be <= 8-byte
1337 if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size)))
1338 return false;
1339
1340 if (Ty->isVectorType()) {
1341 // 64- and 128- bit vectors inside structures are not returned in
1342 // registers.
1343 if (Size == 64 || Size == 128)
1344 return false;
1345
1346 return true;
1347 }
1348
1349 // If this is a builtin, pointer, enum, complex type, member pointer, or
1350 // member function pointer it is ok.
1351 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() ||
1352 Ty->isAnyComplexType() || Ty->isEnumeralType() ||
1353 Ty->isBlockPointerType() || Ty->isMemberPointerType())
1354 return true;
1355
1356 // Arrays are treated like records.
1357 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
1358 return shouldReturnTypeInRegister(AT->getElementType(), Context);
1359
1360 // Otherwise, it must be a record type.
1361 const RecordType *RT = Ty->getAs<RecordType>();
1362 if (!RT) return false;
1363
1364 // FIXME: Traverse bases here too.
1365
1366 // Structure types are passed in register if all fields would be
1367 // passed in a register.
1368 for (const auto *FD : RT->getDecl()->fields()) {
1369 // Empty fields are ignored.
1370 if (isEmptyField(Context, FD, true))
1371 continue;
1372
1373 // Check fields recursively.
1374 if (!shouldReturnTypeInRegister(FD->getType(), Context))
1375 return false;
1376 }
1377 return true;
1378}
1379
1380static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
1381 // Treat complex types as the element type.
1382 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
1383 Ty = CTy->getElementType();
1384
1385 // Check for a type which we know has a simple scalar argument-passing
1386 // convention without any padding. (We're specifically looking for 32
1387 // and 64-bit integer and integer-equivalents, float, and double.)
1388 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() &&
1389 !Ty->isEnumeralType() && !Ty->isBlockPointerType())
1390 return false;
1391
1392 uint64_t Size = Context.getTypeSize(Ty);
1393 return Size == 32 || Size == 64;
1394}
1395
1396static bool addFieldSizes(ASTContext &Context, const RecordDecl *RD,
1397 uint64_t &Size) {
1398 for (const auto *FD : RD->fields()) {
1399 // Scalar arguments on the stack get 4 byte alignment on x86. If the
1400 // argument is smaller than 32-bits, expanding the struct will create
1401 // alignment padding.
1402 if (!is32Or64BitBasicType(FD->getType(), Context))
1403 return false;
1404
1405 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
1406 // how to expand them yet, and the predicate for telling if a bitfield still
1407 // counts as "basic" is more complicated than what we were doing previously.
1408 if (FD->isBitField())
1409 return false;
1410
1411 Size += Context.getTypeSize(FD->getType());
1412 }
1413 return true;
1414}
1415
1416static bool addBaseAndFieldSizes(ASTContext &Context, const CXXRecordDecl *RD,
1417 uint64_t &Size) {
1418 // Don't do this if there are any non-empty bases.
1419 for (const CXXBaseSpecifier &Base : RD->bases()) {
1420 if (!addBaseAndFieldSizes(Context, Base.getType()->getAsCXXRecordDecl(),
1421 Size))
1422 return false;
1423 }
1424 if (!addFieldSizes(Context, RD, Size))
1425 return false;
1426 return true;
1427}
1428
1429/// Test whether an argument type which is to be passed indirectly (on the
1430/// stack) would have the equivalent layout if it was expanded into separate
1431/// arguments. If so, we prefer to do the latter to avoid inhibiting
1432/// optimizations.
1433bool X86_32ABIInfo::canExpandIndirectArgument(QualType Ty) const {
1434 // We can only expand structure types.
1435 const RecordType *RT = Ty->getAs<RecordType>();
1436 if (!RT)
1437 return false;
1438 const RecordDecl *RD = RT->getDecl();
1439 uint64_t Size = 0;
1440 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1441 if (!IsWin32StructABI) {
1442 // On non-Windows, we have to conservatively match our old bitcode
1443 // prototypes in order to be ABI-compatible at the bitcode level.
1444 if (!CXXRD->isCLike())
1445 return false;
1446 } else {
1447 // Don't do this for dynamic classes.
1448 if (CXXRD->isDynamicClass())
1449 return false;
1450 }
1451 if (!addBaseAndFieldSizes(getContext(), CXXRD, Size))
1452 return false;
1453 } else {
1454 if (!addFieldSizes(getContext(), RD, Size))
1455 return false;
1456 }
1457
1458 // We can do this if there was no alignment padding.
1459 return Size == getContext().getTypeSize(Ty);
1460}
1461
1462ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(QualType RetTy, CCState &State) const {
1463 // If the return value is indirect, then the hidden argument is consuming one
1464 // integer register.
1465 if (State.FreeRegs) {
1466 --State.FreeRegs;
1467 if (!IsMCUABI)
1468 return getNaturalAlignIndirectInReg(RetTy);
1469 }
1470 return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
1471}
1472
1473ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
1474 CCState &State) const {
1475 if (RetTy->isVoidType())
1476 return ABIArgInfo::getIgnore();
1477
1478 const Type *Base = nullptr;
1479 uint64_t NumElts = 0;
1480 if ((State.CC == llvm::CallingConv::X86_VectorCall ||
1481 State.CC == llvm::CallingConv::X86_RegCall) &&
1482 isHomogeneousAggregate(RetTy, Base, NumElts)) {
1483 // The LLVM struct type for such an aggregate should lower properly.
1484 return ABIArgInfo::getDirect();
1485 }
1486
1487 if (const VectorType *VT = RetTy->getAs<VectorType>()) {
1488 // On Darwin, some vectors are returned in registers.
1489 if (IsDarwinVectorABI) {
1490 uint64_t Size = getContext().getTypeSize(RetTy);
1491
1492 // 128-bit vectors are a special case; they are returned in
1493 // registers and we need to make sure to pick a type the LLVM
1494 // backend will like.
1495 if (Size == 128)
1496 return ABIArgInfo::getDirect(llvm::FixedVectorType::get(
1497 llvm::Type::getInt64Ty(getVMContext()), 2));
1498
1499 // Always return in register if it fits in a general purpose
1500 // register, or if it is 64 bits and has a single element.
1501 if ((Size == 8 || Size == 16 || Size == 32) ||
1502 (Size == 64 && VT->getNumElements() == 1))
1503 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
1504 Size));
1505
1506 return getIndirectReturnResult(RetTy, State);
1507 }
1508
1509 return ABIArgInfo::getDirect();
1510 }
1511
1512 if (isAggregateTypeForABI(RetTy)) {
1513 if (const RecordType *RT = RetTy->getAs<RecordType>()) {
1514 // Structures with flexible arrays are always indirect.
1515 if (RT->getDecl()->hasFlexibleArrayMember())
1516 return getIndirectReturnResult(RetTy, State);
1517 }
1518
1519 // If specified, structs and unions are always indirect.
1520 if (!IsRetSmallStructInRegABI && !RetTy->isAnyComplexType())
1521 return getIndirectReturnResult(RetTy, State);
1522
1523 // Ignore empty structs/unions.
1524 if (isEmptyRecord(getContext(), RetTy, true))
1525 return ABIArgInfo::getIgnore();
1526
1527 // Small structures which are register sized are generally returned
1528 // in a register.
1529 if (shouldReturnTypeInRegister(RetTy, getContext())) {
1530 uint64_t Size = getContext().getTypeSize(RetTy);
1531
1532 // As a special-case, if the struct is a "single-element" struct, and
1533 // the field is of type "float" or "double", return it in a
1534 // floating-point register. (MSVC does not apply this special case.)
1535 // We apply a similar transformation for pointer types to improve the
1536 // quality of the generated IR.
1537 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
1538 if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
1539 || SeltTy->hasPointerRepresentation())
1540 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
1541
1542 // FIXME: We should be able to narrow this integer in cases with dead
1543 // padding.
1544 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size));
1545 }
1546
1547 return getIndirectReturnResult(RetTy, State);
1548 }
1549
1550 // Treat an enum type as its underlying type.
1551 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
1552 RetTy = EnumTy->getDecl()->getIntegerType();
1553
1554 if (const auto *EIT = RetTy->getAs<ExtIntType>())
1555 if (EIT->getNumBits() > 64)
1556 return getIndirectReturnResult(RetTy, State);
1557
1558 return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
1559 : ABIArgInfo::getDirect());
1560}
1561
1562static bool isSIMDVectorType(ASTContext &Context, QualType Ty) {
1563 return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128;
1564}
1565
1566static bool isRecordWithSIMDVectorType(ASTContext &Context, QualType Ty) {
1567 const RecordType *RT = Ty->getAs<RecordType>();
1568 if (!RT)
1569 return 0;
1570 const RecordDecl *RD = RT->getDecl();
1571
1572 // If this is a C++ record, check the bases first.
1573 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
1574 for (const auto &I : CXXRD->bases())
1575 if (!isRecordWithSIMDVectorType(Context, I.getType()))
1576 return false;
1577
1578 for (const auto *i : RD->fields()) {
1579 QualType FT = i->getType();
1580
1581 if (isSIMDVectorType(Context, FT))
1582 return true;
1583
1584 if (isRecordWithSIMDVectorType(Context, FT))
1585 return true;
1586 }
1587
1588 return false;
1589}
1590
1591unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
1592 unsigned Align) const {
1593 // Otherwise, if the alignment is less than or equal to the minimum ABI
1594 // alignment, just use the default; the backend will handle this.
1595 if (Align <= MinABIStackAlignInBytes)
1596 return 0; // Use default alignment.
1597
1598 if (IsLinuxABI) {
1599 // Exclude other System V OS (e.g Darwin, PS4 and FreeBSD) since we don't
1600 // want to spend any effort dealing with the ramifications of ABI breaks.
1601 //
1602 // If the vector type is __m128/__m256/__m512, return the default alignment.
1603 if (Ty->isVectorType() && (Align == 16 || Align == 32 || Align == 64))
1604 return Align;
1605 }
1606 // On non-Darwin, the stack type alignment is always 4.
1607 if (!IsDarwinVectorABI) {
1608 // Set explicit alignment, since we may need to realign the top.
1609 return MinABIStackAlignInBytes;
1610 }
1611
1612 // Otherwise, if the type contains an SSE vector type, the alignment is 16.
1613 if (Align >= 16 && (isSIMDVectorType(getContext(), Ty) ||
1614 isRecordWithSIMDVectorType(getContext(), Ty)))
1615 return 16;
1616
1617 return MinABIStackAlignInBytes;
1618}
1619
1620ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal,
1621 CCState &State) const {
1622 if (!ByVal) {
1623 if (State.FreeRegs) {
1624 --State.FreeRegs; // Non-byval indirects just use one pointer.
1625 if (!IsMCUABI)
1626 return getNaturalAlignIndirectInReg(Ty);
1627 }
1628 return getNaturalAlignIndirect(Ty, false);
1629 }
1630
1631 // Compute the byval alignment.
1632 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
1633 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
1634 if (StackAlign == 0)
1635 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true);
1636
1637 // If the stack alignment is less than the type alignment, realign the
1638 // argument.
1639 bool Realign = TypeAlign > StackAlign;
1640 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(StackAlign),
1641 /*ByVal=*/true, Realign);
1642}
1643
1644X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const {
1645 const Type *T = isSingleElementStruct(Ty, getContext());
1646 if (!T)
1647 T = Ty.getTypePtr();
1648
1649 if (const BuiltinType *BT = T->getAs<BuiltinType>()) {
1650 BuiltinType::Kind K = BT->getKind();
1651 if (K == BuiltinType::Float || K == BuiltinType::Double)
1652 return Float;
1653 }
1654 return Integer;
1655}
1656
1657bool X86_32ABIInfo::updateFreeRegs(QualType Ty, CCState &State) const {
1658 if (!IsSoftFloatABI) {
1659 Class C = classify(Ty);
1660 if (C == Float)
1661 return false;
1662 }
1663
1664 unsigned Size = getContext().getTypeSize(Ty);
1665 unsigned SizeInRegs = (Size + 31) / 32;
1666
1667 if (SizeInRegs == 0)
1668 return false;
1669
1670 if (!IsMCUABI) {
1671 if (SizeInRegs > State.FreeRegs) {
1672 State.FreeRegs = 0;
1673 return false;
1674 }
1675 } else {
1676 // The MCU psABI allows passing parameters in-reg even if there are
1677 // earlier parameters that are passed on the stack. Also,
1678 // it does not allow passing >8-byte structs in-register,
1679 // even if there are 3 free registers available.
1680 if (SizeInRegs > State.FreeRegs || SizeInRegs > 2)
1681 return false;
1682 }
1683
1684 State.FreeRegs -= SizeInRegs;
1685 return true;
1686}
1687
1688bool X86_32ABIInfo::shouldAggregateUseDirect(QualType Ty, CCState &State,
1689 bool &InReg,
1690 bool &NeedsPadding) const {
1691 // On Windows, aggregates other than HFAs are never passed in registers, and
1692 // they do not consume register slots. Homogenous floating-point aggregates
1693 // (HFAs) have already been dealt with at this point.
1694 if (IsWin32StructABI && isAggregateTypeForABI(Ty))
1695 return false;
1696
1697 NeedsPadding = false;
1698 InReg = !IsMCUABI;
1699
1700 if (!updateFreeRegs(Ty, State))
1701 return false;
1702
1703 if (IsMCUABI)
1704 return true;
1705
1706 if (State.CC == llvm::CallingConv::X86_FastCall ||
1707 State.CC == llvm::CallingConv::X86_VectorCall ||
1708 State.CC == llvm::CallingConv::X86_RegCall) {
1709 if (getContext().getTypeSize(Ty) <= 32 && State.FreeRegs)
1710 NeedsPadding = true;
1711
1712 return false;
1713 }
1714
1715 return true;
1716}
1717
1718bool X86_32ABIInfo::shouldPrimitiveUseInReg(QualType Ty, CCState &State) const {
1719 if (!updateFreeRegs(Ty, State))
1720 return false;
1721
1722 if (IsMCUABI)
1723 return false;
1724
1725 if (State.CC == llvm::CallingConv::X86_FastCall ||
1726 State.CC == llvm::CallingConv::X86_VectorCall ||
1727 State.CC == llvm::CallingConv::X86_RegCall) {
1728 if (getContext().getTypeSize(Ty) > 32)
1729 return false;
1730
1731 return (Ty->isIntegralOrEnumerationType() || Ty->isPointerType() ||
1732 Ty->isReferenceType());
1733 }
1734
1735 return true;
1736}
1737
1738void X86_32ABIInfo::runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State) const {
1739 // Vectorcall x86 works subtly different than in x64, so the format is
1740 // a bit different than the x64 version. First, all vector types (not HVAs)
1741 // are assigned, with the first 6 ending up in the [XYZ]MM0-5 registers.
1742 // This differs from the x64 implementation, where the first 6 by INDEX get
1743 // registers.
1744 // In the second pass over the arguments, HVAs are passed in the remaining
1745 // vector registers if possible, or indirectly by address. The address will be
1746 // passed in ECX/EDX if available. Any other arguments are passed according to
1747 // the usual fastcall rules.
1748 MutableArrayRef<CGFunctionInfoArgInfo> Args = FI.arguments();
1749 for (int I = 0, E = Args.size(); I < E; ++I) {
1750 const Type *Base = nullptr;
1751 uint64_t NumElts = 0;
1752 const QualType &Ty = Args[I].type;
1753 if ((Ty->isVectorType() || Ty->isBuiltinType()) &&
1754 isHomogeneousAggregate(Ty, Base, NumElts)) {
1755 if (State.FreeSSERegs >= NumElts) {
1756 State.FreeSSERegs -= NumElts;
1757 Args[I].info = ABIArgInfo::getDirectInReg();
1758 State.IsPreassigned.set(I);
1759 }
1760 }
1761 }
1762}
1763
1764ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
1765 CCState &State) const {
1766 // FIXME: Set alignment on indirect arguments.
1767 bool IsFastCall = State.CC == llvm::CallingConv::X86_FastCall;
1768 bool IsRegCall = State.CC == llvm::CallingConv::X86_RegCall;
1769 bool IsVectorCall = State.CC == llvm::CallingConv::X86_VectorCall;
1770
1771 Ty = useFirstFieldIfTransparentUnion(Ty);
1772 TypeInfo TI = getContext().getTypeInfo(Ty);
1773
1774 // Check with the C++ ABI first.
1775 const RecordType *RT = Ty->getAs<RecordType>();
1776 if (RT) {
1777 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
1778 if (RAA == CGCXXABI::RAA_Indirect) {
1779 return getIndirectResult(Ty, false, State);
1780 } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
1781 // The field index doesn't matter, we'll fix it up later.
1782 return ABIArgInfo::getInAlloca(/*FieldIndex=*/0);
1783 }
1784 }
1785
1786 // Regcall uses the concept of a homogenous vector aggregate, similar
1787 // to other targets.
1788 const Type *Base = nullptr;
1789 uint64_t NumElts = 0;
1790 if ((IsRegCall || IsVectorCall) &&
1791 isHomogeneousAggregate(Ty, Base, NumElts)) {
1792 if (State.FreeSSERegs >= NumElts) {
1793 State.FreeSSERegs -= NumElts;
1794
1795 // Vectorcall passes HVAs directly and does not flatten them, but regcall
1796 // does.
1797 if (IsVectorCall)
1798 return getDirectX86Hva();
1799
1800 if (Ty->isBuiltinType() || Ty->isVectorType())
1801 return ABIArgInfo::getDirect();
1802 return ABIArgInfo::getExpand();
1803 }
1804 return getIndirectResult(Ty, /*ByVal=*/false, State);
1805 }
1806
1807 if (isAggregateTypeForABI(Ty)) {
1808 // Structures with flexible arrays are always indirect.
1809 // FIXME: This should not be byval!
1810 if (RT && RT->getDecl()->hasFlexibleArrayMember())
1811 return getIndirectResult(Ty, true, State);
1812
1813 // Ignore empty structs/unions on non-Windows.
1814 if (!IsWin32StructABI && isEmptyRecord(getContext(), Ty, true))
1815 return ABIArgInfo::getIgnore();
1816
1817 llvm::LLVMContext &LLVMContext = getVMContext();
1818 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
1819 bool NeedsPadding = false;
1820 bool InReg;
1821 if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) {
1822 unsigned SizeInRegs = (TI.Width + 31) / 32;
1823 SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32);
1824 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
1825 if (InReg)
1826 return ABIArgInfo::getDirectInReg(Result);
1827 else
1828 return ABIArgInfo::getDirect(Result);
1829 }
1830 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr;
1831
1832 // Pass over-aligned aggregates on Windows indirectly. This behavior was
1833 // added in MSVC 2015.
1834 if (IsWin32StructABI && TI.AlignIsRequired && TI.Align > 32)
1835 return getIndirectResult(Ty, /*ByVal=*/false, State);
1836
1837 // Expand small (<= 128-bit) record types when we know that the stack layout
1838 // of those arguments will match the struct. This is important because the
1839 // LLVM backend isn't smart enough to remove byval, which inhibits many
1840 // optimizations.
1841 // Don't do this for the MCU if there are still free integer registers
1842 // (see X86_64 ABI for full explanation).
1843 if (TI.Width <= 4 * 32 && (!IsMCUABI || State.FreeRegs == 0) &&
1844 canExpandIndirectArgument(Ty))
1845 return ABIArgInfo::getExpandWithPadding(
1846 IsFastCall || IsVectorCall || IsRegCall, PaddingType);
1847
1848 return getIndirectResult(Ty, true, State);
1849 }
1850
1851 if (const VectorType *VT = Ty->getAs<VectorType>()) {
1852 // On Windows, vectors are passed directly if registers are available, or
1853 // indirectly if not. This avoids the need to align argument memory. Pass
1854 // user-defined vector types larger than 512 bits indirectly for simplicity.
1855 if (IsWin32StructABI) {
1856 if (TI.Width <= 512 && State.FreeSSERegs > 0) {
1857 --State.FreeSSERegs;
1858 return ABIArgInfo::getDirectInReg();
1859 }
1860 return getIndirectResult(Ty, /*ByVal=*/false, State);
1861 }
1862
1863 // On Darwin, some vectors are passed in memory, we handle this by passing
1864 // it as an i8/i16/i32/i64.
1865 if (IsDarwinVectorABI) {
1866 if ((TI.Width == 8 || TI.Width == 16 || TI.Width == 32) ||
1867 (TI.Width == 64 && VT->getNumElements() == 1))
1868 return ABIArgInfo::getDirect(
1869 llvm::IntegerType::get(getVMContext(), TI.Width));
1870 }
1871
1872 if (IsX86_MMXType(CGT.ConvertType(Ty)))
1873 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64));
1874
1875 return ABIArgInfo::getDirect();
1876 }
1877
1878
1879 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
1880 Ty = EnumTy->getDecl()->getIntegerType();
1881
1882 bool InReg = shouldPrimitiveUseInReg(Ty, State);
1883
1884 if (isPromotableIntegerTypeForABI(Ty)) {
1885 if (InReg)
1886 return ABIArgInfo::getExtendInReg(Ty);
1887 return ABIArgInfo::getExtend(Ty);
1888 }
1889
1890 if (const auto * EIT = Ty->getAs<ExtIntType>()) {
1891 if (EIT->getNumBits() <= 64) {
1892 if (InReg)
1893 return ABIArgInfo::getDirectInReg();
1894 return ABIArgInfo::getDirect();
1895 }
1896 return getIndirectResult(Ty, /*ByVal=*/false, State);
1897 }
1898
1899 if (InReg)
1900 return ABIArgInfo::getDirectInReg();
1901 return ABIArgInfo::getDirect();
1902}
1903
1904void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
1905 CCState State(FI);
1906 if (IsMCUABI)
1907 State.FreeRegs = 3;
1908 else if (State.CC == llvm::CallingConv::X86_FastCall) {
1909 State.FreeRegs = 2;
1910 State.FreeSSERegs = 3;
1911 } else if (State.CC == llvm::CallingConv::X86_VectorCall) {
1912 State.FreeRegs = 2;
1913 State.FreeSSERegs = 6;
1914 } else if (FI.getHasRegParm())
1915 State.FreeRegs = FI.getRegParm();
1916 else if (State.CC == llvm::CallingConv::X86_RegCall) {
1917 State.FreeRegs = 5;
1918 State.FreeSSERegs = 8;
1919 } else if (IsWin32StructABI) {
1920 // Since MSVC 2015, the first three SSE vectors have been passed in
1921 // registers. The rest are passed indirectly.
1922 State.FreeRegs = DefaultNumRegisterParameters;
1923 State.FreeSSERegs = 3;
1924 } else
1925 State.FreeRegs = DefaultNumRegisterParameters;
1926
1927 if (!::classifyReturnType(getCXXABI(), FI, *this)) {
1928 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), State);
1929 } else if (FI.getReturnInfo().isIndirect()) {
1930 // The C++ ABI is not aware of register usage, so we have to check if the
1931 // return value was sret and put it in a register ourselves if appropriate.
1932 if (State.FreeRegs) {
1933 --State.FreeRegs; // The sret parameter consumes a register.
1934 if (!IsMCUABI)
1935 FI.getReturnInfo().setInReg(true);
1936 }
1937 }
1938
1939 // The chain argument effectively gives us another free register.
1940 if (FI.isChainCall())
1941 ++State.FreeRegs;
1942
1943 // For vectorcall, do a first pass over the arguments, assigning FP and vector
1944 // arguments to XMM registers as available.
1945 if (State.CC == llvm::CallingConv::X86_VectorCall)
1946 runVectorCallFirstPass(FI, State);
1947
1948 bool UsedInAlloca = false;
1949 MutableArrayRef<CGFunctionInfoArgInfo> Args = FI.arguments();
1950 for (int I = 0, E = Args.size(); I < E; ++I) {
1951 // Skip arguments that have already been assigned.
1952 if (State.IsPreassigned.test(I))
1953 continue;
1954
1955 Args[I].info = classifyArgumentType(Args[I].type, State);
1956 UsedInAlloca |= (Args[I].info.getKind() == ABIArgInfo::InAlloca);
1957 }
1958
1959 // If we needed to use inalloca for any argument, do a second pass and rewrite
1960 // all the memory arguments to use inalloca.
1961 if (UsedInAlloca)
1962 rewriteWithInAlloca(FI);
1963}
1964
1965void
1966X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
1967 CharUnits &StackOffset, ABIArgInfo &Info,
1968 QualType Type) const {
1969 // Arguments are always 4-byte-aligned.
1970 CharUnits WordSize = CharUnits::fromQuantity(4);
1971 assert(StackOffset.isMultipleOf(WordSize) && "unaligned inalloca struct");
1972
1973 // sret pointers and indirect things will require an extra pointer
1974 // indirection, unless they are byval. Most things are byval, and will not
1975 // require this indirection.
1976 bool IsIndirect = false;
1977 if (Info.isIndirect() && !Info.getIndirectByVal())
1978 IsIndirect = true;
1979 Info = ABIArgInfo::getInAlloca(FrameFields.size(), IsIndirect);
1980 llvm::Type *LLTy = CGT.ConvertTypeForMem(Type);
1981 if (IsIndirect)
1982 LLTy = LLTy->getPointerTo(0);
1983 FrameFields.push_back(LLTy);
1984 StackOffset += IsIndirect ? WordSize : getContext().getTypeSizeInChars(Type);
1985
1986 // Insert padding bytes to respect alignment.
1987 CharUnits FieldEnd = StackOffset;
1988 StackOffset = FieldEnd.alignTo(WordSize);
1989 if (StackOffset != FieldEnd) {
1990 CharUnits NumBytes = StackOffset - FieldEnd;
1991 llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
1992 Ty = llvm::ArrayType::get(Ty, NumBytes.getQuantity());
1993 FrameFields.push_back(Ty);
1994 }
1995}
1996
1997static bool isArgInAlloca(const ABIArgInfo &Info) {
1998 // Leave ignored and inreg arguments alone.
1999 switch (Info.getKind()) {
2000 case ABIArgInfo::InAlloca:
2001 return true;
2002 case ABIArgInfo::Ignore:
2003 case ABIArgInfo::IndirectAliased:
2004 return false;
2005 case ABIArgInfo::Indirect:
2006 case ABIArgInfo::Direct:
2007 case ABIArgInfo::Extend:
2008 return !Info.getInReg();
2009 case ABIArgInfo::Expand:
2010 case ABIArgInfo::CoerceAndExpand:
2011 // These are aggregate types which are never passed in registers when
2012 // inalloca is involved.
2013 return true;
2014 }
2015 llvm_unreachable("invalid enum");
2016}
2017
2018void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const {
2019 assert(IsWin32StructABI && "inalloca only supported on win32");
2020
2021 // Build a packed struct type for all of the arguments in memory.
2022 SmallVector<llvm::Type *, 6> FrameFields;
2023
2024 // The stack alignment is always 4.
2025 CharUnits StackAlign = CharUnits::fromQuantity(4);
2026
2027 CharUnits StackOffset;
2028 CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end();
2029
2030 // Put 'this' into the struct before 'sret', if necessary.
2031 bool IsThisCall =
2032 FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall;
2033 ABIArgInfo &Ret = FI.getReturnInfo();
2034 if (Ret.isIndirect() && Ret.isSRetAfterThis() && !IsThisCall &&
2035 isArgInAlloca(I->info)) {
2036 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
2037 ++I;
2038 }
2039
2040 // Put the sret parameter into the inalloca struct if it's in memory.
2041 if (Ret.isIndirect() && !Ret.getInReg()) {
2042 addFieldToArgStruct(FrameFields, StackOffset, Ret, FI.getReturnType());
2043 // On Windows, the hidden sret parameter is always returned in eax.
2044 Ret.setInAllocaSRet(IsWin32StructABI);
2045 }
2046
2047 // Skip the 'this' parameter in ecx.
2048 if (IsThisCall)
2049 ++I;
2050
2051 // Put arguments passed in memory into the struct.
2052 for (; I != E; ++I) {
2053 if (isArgInAlloca(I->info))
2054 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
2055 }
2056
2057 FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
2058 /*isPacked=*/true),
2059 StackAlign);
2060}
2061
2062Address X86_32ABIInfo::EmitVAArg(CodeGenFunction &CGF,
2063 Address VAListAddr, QualType Ty) const {
2064
2065 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
2066
2067 // x86-32 changes the alignment of certain arguments on the stack.
2068 //
2069 // Just messing with TypeInfo like this works because we never pass
2070 // anything indirectly.
2071 TypeInfo.Align = CharUnits::fromQuantity(
2072 getTypeStackAlignInBytes(Ty, TypeInfo.Align.getQuantity()));
2073
2074 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false,
2075 TypeInfo, CharUnits::fromQuantity(4),
2076 /*AllowHigherAlign*/ true);
2077}
2078
2079bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
2080 const llvm::Triple &Triple, const CodeGenOptions &Opts) {
2081 assert(Triple.getArch() == llvm::Triple::x86);
2082
2083 switch (Opts.getStructReturnConvention()) {
2084 case CodeGenOptions::SRCK_Default:
2085 break;
2086 case CodeGenOptions::SRCK_OnStack: // -fpcc-struct-return
2087 return false;
2088 case CodeGenOptions::SRCK_InRegs: // -freg-struct-return
2089 return true;
2090 }
2091
2092 if (Triple.isOSDarwin() || Triple.isOSIAMCU())
2093 return true;
2094
2095 switch (Triple.getOS()) {
2096 case llvm::Triple::DragonFly:
2097 case llvm::Triple::FreeBSD:
2098 case llvm::Triple::OpenBSD:
2099 case llvm::Triple::Win32:
2100 return true;
2101 default:
2102 return false;
2103 }
2104}
2105
2106static void addX86InterruptAttrs(const FunctionDecl *FD, llvm::GlobalValue *GV,
2107 CodeGen::CodeGenModule &CGM) {
2108 if (!FD->hasAttr<AnyX86InterruptAttr>())
2109 return;
2110
2111 llvm::Function *Fn = cast<llvm::Function>(GV);
2112 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2113 if (FD->getNumParams() == 0)
2114 return;
2115
2116 auto PtrTy = cast<PointerType>(FD->getParamDecl(0)->getType());
2117 llvm::Type *ByValTy = CGM.getTypes().ConvertType(PtrTy->getPointeeType());
2118 llvm::Attribute NewAttr = llvm::Attribute::getWithByValType(
2119 Fn->getContext(), ByValTy);
2120 Fn->addParamAttr(0, NewAttr);
2121}
2122
2123void X86_32TargetCodeGenInfo::setTargetAttributes(
2124 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
2125 if (GV->isDeclaration())
2126 return;
2127 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2128 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
2129 llvm::Function *Fn = cast<llvm::Function>(GV);
2130 Fn->addFnAttr("stackrealign");
2131 }
2132
2133 addX86InterruptAttrs(FD, GV, CGM);
2134 }
2135}
2136
2137bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
2138 CodeGen::CodeGenFunction &CGF,
2139 llvm::Value *Address) const {
2140 CodeGen::CGBuilderTy &Builder = CGF.Builder;
2141
2142 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
2143
2144 // 0-7 are the eight integer registers; the order is different
2145 // on Darwin (for EH), but the range is the same.
2146 // 8 is %eip.
2147 AssignToArrayRange(Builder, Address, Four8, 0, 8);
2148
2149 if (CGF.CGM.getTarget().getTriple().isOSDarwin()) {
2150 // 12-16 are st(0..4). Not sure why we stop at 4.
2151 // These have size 16, which is sizeof(long double) on
2152 // platforms with 8-byte alignment for that type.
2153 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16);
2154 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16);
2155
2156 } else {
2157 // 9 is %eflags, which doesn't get a size on Darwin for some
2158 // reason.
2159 Builder.CreateAlignedStore(
2160 Four8, Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, Address, 9),
2161 CharUnits::One());
2162
2163 // 11-16 are st(0..5). Not sure why we stop at 5.
2164 // These have size 12, which is sizeof(long double) on
2165 // platforms with 4-byte alignment for that type.
2166 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12);
2167 AssignToArrayRange(Builder, Address, Twelve8, 11, 16);
2168 }
2169
2170 return false;
2171}
2172
2173//===----------------------------------------------------------------------===//
2174// X86-64 ABI Implementation
2175//===----------------------------------------------------------------------===//
2176
2177
2178namespace {
2179/// The AVX ABI level for X86 targets.
2180enum class X86AVXABILevel {
2181 None,
2182 AVX,
2183 AVX512
2184};
2185
2186/// \p returns the size in bits of the largest (native) vector for \p AVXLevel.
2187static unsigned getNativeVectorSizeForAVXABI(X86AVXABILevel AVXLevel) {
2188 switch (AVXLevel) {
2189 case X86AVXABILevel::AVX512:
2190 return 512;
2191 case X86AVXABILevel::AVX:
2192 return 256;
2193 case X86AVXABILevel::None:
2194 return 128;
2195 }
2196 llvm_unreachable("Unknown AVXLevel");
2197}
2198
2199/// X86_64ABIInfo - The X86_64 ABI information.
2200class X86_64ABIInfo : public SwiftABIInfo {
2201 enum Class {
2202 Integer = 0,
2203 SSE,
2204 SSEUp,
2205 X87,
2206 X87Up,
2207 ComplexX87,
2208 NoClass,
2209 Memory
2210 };
2211
2212 /// merge - Implement the X86_64 ABI merging algorithm.
2213 ///
2214 /// Merge an accumulating classification \arg Accum with a field
2215 /// classification \arg Field.
2216 ///
2217 /// \param Accum - The accumulating classification. This should
2218 /// always be either NoClass or the result of a previous merge
2219 /// call. In addition, this should never be Memory (the caller
2220 /// should just return Memory for the aggregate).
2221 static Class merge(Class Accum, Class Field);
2222
2223 /// postMerge - Implement the X86_64 ABI post merging algorithm.
2224 ///
2225 /// Post merger cleanup, reduces a malformed Hi and Lo pair to
2226 /// final MEMORY or SSE classes when necessary.
2227 ///
2228 /// \param AggregateSize - The size of the current aggregate in
2229 /// the classification process.
2230 ///
2231 /// \param Lo - The classification for the parts of the type
2232 /// residing in the low word of the containing object.
2233 ///
2234 /// \param Hi - The classification for the parts of the type
2235 /// residing in the higher words of the containing object.
2236 ///
2237 void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const;
2238
2239 /// classify - Determine the x86_64 register classes in which the
2240 /// given type T should be passed.
2241 ///
2242 /// \param Lo - The classification for the parts of the type
2243 /// residing in the low word of the containing object.
2244 ///
2245 /// \param Hi - The classification for the parts of the type
2246 /// residing in the high word of the containing object.
2247 ///
2248 /// \param OffsetBase - The bit offset of this type in the
2249 /// containing object. Some parameters are classified different
2250 /// depending on whether they straddle an eightbyte boundary.
2251 ///
2252 /// \param isNamedArg - Whether the argument in question is a "named"
2253 /// argument, as used in AMD64-ABI 3.5.7.
2254 ///
2255 /// If a word is unused its result will be NoClass; if a type should
2256 /// be passed in Memory then at least the classification of \arg Lo
2257 /// will be Memory.
2258 ///
2259 /// The \arg Lo class will be NoClass iff the argument is ignored.
2260 ///
2261 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
2262 /// also be ComplexX87.
2263 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
2264 bool isNamedArg) const;
2265
2266 llvm::Type *GetByteVectorType(QualType Ty) const;
2267 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
2268 unsigned IROffset, QualType SourceTy,
2269 unsigned SourceOffset) const;
2270 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
2271 unsigned IROffset, QualType SourceTy,
2272 unsigned SourceOffset) const;
2273
2274 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
2275 /// such that the argument will be returned in memory.
2276 ABIArgInfo getIndirectReturnResult(QualType Ty) const;
2277
2278 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
2279 /// such that the argument will be passed in memory.
2280 ///
2281 /// \param freeIntRegs - The number of free integer registers remaining
2282 /// available.
2283 ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const;
2284
2285 ABIArgInfo classifyReturnType(QualType RetTy) const;
2286
2287 ABIArgInfo classifyArgumentType(QualType Ty, unsigned freeIntRegs,
2288 unsigned &neededInt, unsigned &neededSSE,
2289 bool isNamedArg) const;
2290
2291 ABIArgInfo classifyRegCallStructType(QualType Ty, unsigned &NeededInt,
2292 unsigned &NeededSSE) const;
2293
2294 ABIArgInfo classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
2295 unsigned &NeededSSE) const;
2296
2297 bool IsIllegalVectorType(QualType Ty) const;
2298
2299 /// The 0.98 ABI revision clarified a lot of ambiguities,
2300 /// unfortunately in ways that were not always consistent with
2301 /// certain previous compilers. In particular, platforms which
2302 /// required strict binary compatibility with older versions of GCC
2303 /// may need to exempt themselves.
2304 bool honorsRevision0_98() const {
2305 return !getTarget().getTriple().isOSDarwin();
2306 }
2307
2308 /// GCC classifies <1 x long long> as SSE but some platform ABIs choose to
2309 /// classify it as INTEGER (for compatibility with older clang compilers).
2310 bool classifyIntegerMMXAsSSE() const {
2311 // Clang <= 3.8 did not do this.
2312 if (getContext().getLangOpts().getClangABICompat() <=
2313 LangOptions::ClangABI::Ver3_8)
2314 return false;
2315
2316 const llvm::Triple &Triple = getTarget().getTriple();
2317 if (Triple.isOSDarwin() || Triple.getOS() == llvm::Triple::PS4)
2318 return false;
2319 if (Triple.isOSFreeBSD() && Triple.getOSMajorVersion() >= 10)
2320 return false;
2321 return true;
2322 }
2323
2324 // GCC classifies vectors of __int128 as memory.
2325 bool passInt128VectorsInMem() const {
2326 // Clang <= 9.0 did not do this.
2327 if (getContext().getLangOpts().getClangABICompat() <=
2328 LangOptions::ClangABI::Ver9)
2329 return false;
2330
2331 const llvm::Triple &T = getTarget().getTriple();
2332 return T.isOSLinux() || T.isOSNetBSD();
2333 }
2334
2335 X86AVXABILevel AVXLevel;
2336 // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on
2337 // 64-bit hardware.
2338 bool Has64BitPointers;
2339
2340public:
2341 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) :
2342 SwiftABIInfo(CGT), AVXLevel(AVXLevel),
2343 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {
2344 }
2345
2346 bool isPassedUsingAVXType(QualType type) const {
2347 unsigned neededInt, neededSSE;
2348 // The freeIntRegs argument doesn't matter here.
2349 ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE,
2350 /*isNamedArg*/true);
2351 if (info.isDirect()) {
2352 llvm::Type *ty = info.getCoerceToType();
2353 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
2354 return vectorTy->getPrimitiveSizeInBits().getFixedSize() > 128;
2355 }
2356 return false;
2357 }
2358
2359 void computeInfo(CGFunctionInfo &FI) const override;
2360
2361 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
2362 QualType Ty) const override;
2363 Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
2364 QualType Ty) const override;
2365
2366 bool has64BitPointers() const {
2367 return Has64BitPointers;
2368