1//===- ARM.cpp ------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "ABIInfoImpl.h"
10#include "TargetInfo.h"
11
12using namespace clang;
13using namespace clang::CodeGen;
14
15//===----------------------------------------------------------------------===//
16// ARM ABI Implementation
17//===----------------------------------------------------------------------===//
18
19namespace {
20
21class ARMABIInfo : public ABIInfo {
22 ARMABIKind Kind;
23 bool IsFloatABISoftFP;
24
25public:
26 ARMABIInfo(CodeGenTypes &CGT, ARMABIKind Kind) : ABIInfo(CGT), Kind(Kind) {
27 setCCs();
28 IsFloatABISoftFP = CGT.getCodeGenOpts().FloatABI == "softfp" ||
29 CGT.getCodeGenOpts().FloatABI == ""; // default
30 }
31
32 bool isEABI() const {
33 switch (getTarget().getTriple().getEnvironment()) {
34 case llvm::Triple::Android:
35 case llvm::Triple::EABI:
36 case llvm::Triple::EABIHF:
37 case llvm::Triple::GNUEABI:
38 case llvm::Triple::GNUEABIHF:
39 case llvm::Triple::MuslEABI:
40 case llvm::Triple::MuslEABIHF:
41 return true;
42 default:
43 return getTarget().getTriple().isOHOSFamily();
44 }
45 }
46
47 bool isEABIHF() const {
48 switch (getTarget().getTriple().getEnvironment()) {
49 case llvm::Triple::EABIHF:
50 case llvm::Triple::GNUEABIHF:
51 case llvm::Triple::MuslEABIHF:
52 return true;
53 default:
54 return false;
55 }
56 }
57
58 ARMABIKind getABIKind() const { return Kind; }
59
60 bool allowBFloatArgsAndRet() const override {
61 return !IsFloatABISoftFP && getTarget().hasBFloat16Type();
62 }
63
64private:
65 ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic,
66 unsigned functionCallConv) const;
67 ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic,
68 unsigned functionCallConv) const;
69 ABIArgInfo classifyHomogeneousAggregate(QualType Ty, const Type *Base,
70 uint64_t Members) const;
71 ABIArgInfo coerceIllegalVector(QualType Ty) const;
72 bool isIllegalVectorType(QualType Ty) const;
73 bool containsAnyFP16Vectors(QualType Ty) const;
74
75 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
76 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
77 uint64_t Members) const override;
78 bool isZeroLengthBitfieldPermittedInHomogeneousAggregate() const override;
79
80 bool isEffectivelyAAPCS_VFP(unsigned callConvention, bool acceptHalf) const;
81
82 void computeInfo(CGFunctionInfo &FI) const override;
83
84 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
85 QualType Ty) const override;
86
87 llvm::CallingConv::ID getLLVMDefaultCC() const;
88 llvm::CallingConv::ID getABIDefaultCC() const;
89 void setCCs();
90};
91
92class ARMSwiftABIInfo : public SwiftABIInfo {
93public:
94 explicit ARMSwiftABIInfo(CodeGenTypes &CGT)
95 : SwiftABIInfo(CGT, /*SwiftErrorInRegister=*/true) {}
96
97 bool isLegalVectorType(CharUnits VectorSize, llvm::Type *EltTy,
98 unsigned NumElts) const override;
99};
100
101class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
102public:
103 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIKind K)
104 : TargetCodeGenInfo(std::make_unique<ARMABIInfo>(args&: CGT, args&: K)) {
105 SwiftInfo = std::make_unique<ARMSwiftABIInfo>(args&: CGT);
106 }
107
108 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
109 return 13;
110 }
111
112 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
113 return "mov\tr7, r7\t\t// marker for objc_retainAutoreleaseReturnValue";
114 }
115
116 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
117 llvm::Value *Address) const override {
118 llvm::Value *Four8 = llvm::ConstantInt::get(Ty: CGF.Int8Ty, V: 4);
119
120 // 0-15 are the 16 integer registers.
121 AssignToArrayRange(Builder&: CGF.Builder, Array: Address, Value: Four8, FirstIndex: 0, LastIndex: 15);
122 return false;
123 }
124
125 unsigned getSizeOfUnwindException() const override {
126 if (getABIInfo<ARMABIInfo>().isEABI())
127 return 88;
128 return TargetCodeGenInfo::getSizeOfUnwindException();
129 }
130
131 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
132 CodeGen::CodeGenModule &CGM) const override {
133 if (GV->isDeclaration())
134 return;
135 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(Val: D);
136 if (!FD)
137 return;
138 auto *Fn = cast<llvm::Function>(Val: GV);
139
140 if (const auto *TA = FD->getAttr<TargetAttr>()) {
141 ParsedTargetAttr Attr =
142 CGM.getTarget().parseTargetAttr(Str: TA->getFeaturesStr());
143 if (!Attr.BranchProtection.empty()) {
144 TargetInfo::BranchProtectionInfo BPI;
145 StringRef DiagMsg;
146 StringRef Arch =
147 Attr.CPU.empty() ? CGM.getTarget().getTargetOpts().CPU : Attr.CPU;
148 if (!CGM.getTarget().validateBranchProtection(Spec: Attr.BranchProtection,
149 Arch, BPI, Err&: DiagMsg)) {
150 CGM.getDiags().Report(
151 D->getLocation(),
152 diag::warn_target_unsupported_branch_protection_attribute)
153 << Arch;
154 } else {
155 Fn->addFnAttr(Kind: "sign-return-address", Val: BPI.getSignReturnAddrStr());
156 Fn->addFnAttr(Kind: "branch-target-enforcement",
157 Val: BPI.BranchTargetEnforcement ? "true" : "false");
158 }
159 } else if (CGM.getLangOpts().BranchTargetEnforcement ||
160 CGM.getLangOpts().hasSignReturnAddress()) {
161 // If the Branch Protection attribute is missing, validate the target
162 // Architecture attribute against Branch Protection command line
163 // settings.
164 if (!CGM.getTarget().isBranchProtectionSupportedArch(Attr.CPU))
165 CGM.getDiags().Report(
166 D->getLocation(),
167 diag::warn_target_unsupported_branch_protection_attribute)
168 << Attr.CPU;
169 }
170 }
171
172 const ARMInterruptAttr *Attr = FD->getAttr<ARMInterruptAttr>();
173 if (!Attr)
174 return;
175
176 const char *Kind;
177 switch (Attr->getInterrupt()) {
178 case ARMInterruptAttr::Generic: Kind = ""; break;
179 case ARMInterruptAttr::IRQ: Kind = "IRQ"; break;
180 case ARMInterruptAttr::FIQ: Kind = "FIQ"; break;
181 case ARMInterruptAttr::SWI: Kind = "SWI"; break;
182 case ARMInterruptAttr::ABORT: Kind = "ABORT"; break;
183 case ARMInterruptAttr::UNDEF: Kind = "UNDEF"; break;
184 }
185
186 Fn->addFnAttr(Kind: "interrupt", Val: Kind);
187
188 ARMABIKind ABI = getABIInfo<ARMABIInfo>().getABIKind();
189 if (ABI == ARMABIKind::APCS)
190 return;
191
192 // AAPCS guarantees that sp will be 8-byte aligned on any public interface,
193 // however this is not necessarily true on taking any interrupt. Instruct
194 // the backend to perform a realignment as part of the function prologue.
195 llvm::AttrBuilder B(Fn->getContext());
196 B.addStackAlignmentAttr(Align: 8);
197 Fn->addFnAttrs(Attrs: B);
198 }
199};
200
201class WindowsARMTargetCodeGenInfo : public ARMTargetCodeGenInfo {
202public:
203 WindowsARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIKind K)
204 : ARMTargetCodeGenInfo(CGT, K) {}
205
206 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
207 CodeGen::CodeGenModule &CGM) const override;
208
209 void getDependentLibraryOption(llvm::StringRef Lib,
210 llvm::SmallString<24> &Opt) const override {
211 Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
212 }
213
214 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value,
215 llvm::SmallString<32> &Opt) const override {
216 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
217 }
218};
219
220void WindowsARMTargetCodeGenInfo::setTargetAttributes(
221 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
222 ARMTargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
223 if (GV->isDeclaration())
224 return;
225 addStackProbeTargetAttributes(D, GV, CGM);
226}
227}
228
229void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
230 if (!::classifyReturnType(CXXABI: getCXXABI(), FI, Info: *this))
231 FI.getReturnInfo() = classifyReturnType(RetTy: FI.getReturnType(), isVariadic: FI.isVariadic(),
232 functionCallConv: FI.getCallingConvention());
233
234 for (auto &I : FI.arguments())
235 I.info = classifyArgumentType(RetTy: I.type, isVariadic: FI.isVariadic(),
236 functionCallConv: FI.getCallingConvention());
237
238
239 // Always honor user-specified calling convention.
240 if (FI.getCallingConvention() != llvm::CallingConv::C)
241 return;
242
243 llvm::CallingConv::ID cc = getRuntimeCC();
244 if (cc != llvm::CallingConv::C)
245 FI.setEffectiveCallingConvention(cc);
246}
247
248/// Return the default calling convention that LLVM will use.
249llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const {
250 // The default calling convention that LLVM will infer.
251 if (isEABIHF() || getTarget().getTriple().isWatchABI())
252 return llvm::CallingConv::ARM_AAPCS_VFP;
253 else if (isEABI())
254 return llvm::CallingConv::ARM_AAPCS;
255 else
256 return llvm::CallingConv::ARM_APCS;
257}
258
259/// Return the calling convention that our ABI would like us to use
260/// as the C calling convention.
261llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const {
262 switch (getABIKind()) {
263 case ARMABIKind::APCS:
264 return llvm::CallingConv::ARM_APCS;
265 case ARMABIKind::AAPCS:
266 return llvm::CallingConv::ARM_AAPCS;
267 case ARMABIKind::AAPCS_VFP:
268 return llvm::CallingConv::ARM_AAPCS_VFP;
269 case ARMABIKind::AAPCS16_VFP:
270 return llvm::CallingConv::ARM_AAPCS_VFP;
271 }
272 llvm_unreachable("bad ABI kind");
273}
274
275void ARMABIInfo::setCCs() {
276 assert(getRuntimeCC() == llvm::CallingConv::C);
277
278 // Don't muddy up the IR with a ton of explicit annotations if
279 // they'd just match what LLVM will infer from the triple.
280 llvm::CallingConv::ID abiCC = getABIDefaultCC();
281 if (abiCC != getLLVMDefaultCC())
282 RuntimeCC = abiCC;
283}
284
285ABIArgInfo ARMABIInfo::coerceIllegalVector(QualType Ty) const {
286 uint64_t Size = getContext().getTypeSize(T: Ty);
287 if (Size <= 32) {
288 llvm::Type *ResType =
289 llvm::Type::getInt32Ty(C&: getVMContext());
290 return ABIArgInfo::getDirect(T: ResType);
291 }
292 if (Size == 64 || Size == 128) {
293 auto *ResType = llvm::FixedVectorType::get(
294 ElementType: llvm::Type::getInt32Ty(C&: getVMContext()), NumElts: Size / 32);
295 return ABIArgInfo::getDirect(T: ResType);
296 }
297 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
298}
299
300ABIArgInfo ARMABIInfo::classifyHomogeneousAggregate(QualType Ty,
301 const Type *Base,
302 uint64_t Members) const {
303 assert(Base && "Base class should be set for homogeneous aggregate");
304 // Base can be a floating-point or a vector.
305 if (const VectorType *VT = Base->getAs<VectorType>()) {
306 // FP16 vectors should be converted to integer vectors
307 if (!getTarget().hasLegalHalfType() && containsAnyFP16Vectors(Ty)) {
308 uint64_t Size = getContext().getTypeSize(VT);
309 auto *NewVecTy = llvm::FixedVectorType::get(
310 ElementType: llvm::Type::getInt32Ty(C&: getVMContext()), NumElts: Size / 32);
311 llvm::Type *Ty = llvm::ArrayType::get(ElementType: NewVecTy, NumElements: Members);
312 return ABIArgInfo::getDirect(T: Ty, Offset: 0, Padding: nullptr, CanBeFlattened: false);
313 }
314 }
315 unsigned Align = 0;
316 if (getABIKind() == ARMABIKind::AAPCS ||
317 getABIKind() == ARMABIKind::AAPCS_VFP) {
318 // For alignment adjusted HFAs, cap the argument alignment to 8, leave it
319 // default otherwise.
320 Align = getContext().getTypeUnadjustedAlignInChars(T: Ty).getQuantity();
321 unsigned BaseAlign = getContext().getTypeAlignInChars(T: Base).getQuantity();
322 Align = (Align > BaseAlign && Align >= 8) ? 8 : 0;
323 }
324 return ABIArgInfo::getDirect(T: nullptr, Offset: 0, Padding: nullptr, CanBeFlattened: false, Align);
325}
326
327ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic,
328 unsigned functionCallConv) const {
329 // 6.1.2.1 The following argument types are VFP CPRCs:
330 // A single-precision floating-point type (including promoted
331 // half-precision types); A double-precision floating-point type;
332 // A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate
333 // with a Base Type of a single- or double-precision floating-point type,
334 // 64-bit containerized vectors or 128-bit containerized vectors with one
335 // to four Elements.
336 // Variadic functions should always marshal to the base standard.
337 bool IsAAPCS_VFP =
338 !isVariadic && isEffectivelyAAPCS_VFP(callConvention: functionCallConv, /* AAPCS16 */ acceptHalf: false);
339
340 Ty = useFirstFieldIfTransparentUnion(Ty);
341
342 // Handle illegal vector types here.
343 if (isIllegalVectorType(Ty))
344 return coerceIllegalVector(Ty);
345
346 if (!isAggregateTypeForABI(T: Ty)) {
347 // Treat an enum type as its underlying type.
348 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
349 Ty = EnumTy->getDecl()->getIntegerType();
350 }
351
352 if (const auto *EIT = Ty->getAs<BitIntType>())
353 if (EIT->getNumBits() > 64)
354 return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
355
356 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
357 : ABIArgInfo::getDirect());
358 }
359
360 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(T: Ty, CXXABI&: getCXXABI())) {
361 return getNaturalAlignIndirect(Ty, ByVal: RAA == CGCXXABI::RAA_DirectInMemory);
362 }
363
364 // Ignore empty records.
365 if (isEmptyRecord(Context&: getContext(), T: Ty, AllowArrays: true))
366 return ABIArgInfo::getIgnore();
367
368 if (IsAAPCS_VFP) {
369 // Homogeneous Aggregates need to be expanded when we can fit the aggregate
370 // into VFP registers.
371 const Type *Base = nullptr;
372 uint64_t Members = 0;
373 if (isHomogeneousAggregate(Ty, Base, Members))
374 return classifyHomogeneousAggregate(Ty, Base, Members);
375 } else if (getABIKind() == ARMABIKind::AAPCS16_VFP) {
376 // WatchOS does have homogeneous aggregates. Note that we intentionally use
377 // this convention even for a variadic function: the backend will use GPRs
378 // if needed.
379 const Type *Base = nullptr;
380 uint64_t Members = 0;
381 if (isHomogeneousAggregate(Ty, Base, Members)) {
382 assert(Base && Members <= 4 && "unexpected homogeneous aggregate");
383 llvm::Type *Ty =
384 llvm::ArrayType::get(ElementType: CGT.ConvertType(T: QualType(Base, 0)), NumElements: Members);
385 return ABIArgInfo::getDirect(T: Ty, Offset: 0, Padding: nullptr, CanBeFlattened: false);
386 }
387 }
388
389 if (getABIKind() == ARMABIKind::AAPCS16_VFP &&
390 getContext().getTypeSizeInChars(T: Ty) > CharUnits::fromQuantity(Quantity: 16)) {
391 // WatchOS is adopting the 64-bit AAPCS rule on composite types: if they're
392 // bigger than 128-bits, they get placed in space allocated by the caller,
393 // and a pointer is passed.
394 return ABIArgInfo::getIndirect(
395 Alignment: CharUnits::fromQuantity(Quantity: getContext().getTypeAlign(T: Ty) / 8), ByVal: false);
396 }
397
398 // Support byval for ARM.
399 // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at
400 // most 8-byte. We realign the indirect argument if type alignment is bigger
401 // than ABI alignment.
402 uint64_t ABIAlign = 4;
403 uint64_t TyAlign;
404 if (getABIKind() == ARMABIKind::AAPCS_VFP ||
405 getABIKind() == ARMABIKind::AAPCS) {
406 TyAlign = getContext().getTypeUnadjustedAlignInChars(T: Ty).getQuantity();
407 ABIAlign = std::clamp(val: TyAlign, lo: (uint64_t)4, hi: (uint64_t)8);
408 } else {
409 TyAlign = getContext().getTypeAlignInChars(T: Ty).getQuantity();
410 }
411 if (getContext().getTypeSizeInChars(T: Ty) > CharUnits::fromQuantity(Quantity: 64)) {
412 assert(getABIKind() != ARMABIKind::AAPCS16_VFP && "unexpected byval");
413 return ABIArgInfo::getIndirect(Alignment: CharUnits::fromQuantity(Quantity: ABIAlign),
414 /*ByVal=*/true,
415 /*Realign=*/TyAlign > ABIAlign);
416 }
417
418 // On RenderScript, coerce Aggregates <= 64 bytes to an integer array of
419 // same size and alignment.
420 if (getTarget().isRenderScriptTarget()) {
421 return coerceToIntArray(Ty, Context&: getContext(), LLVMContext&: getVMContext());
422 }
423
424 // Otherwise, pass by coercing to a structure of the appropriate size.
425 llvm::Type* ElemTy;
426 unsigned SizeRegs;
427 // FIXME: Try to match the types of the arguments more accurately where
428 // we can.
429 if (TyAlign <= 4) {
430 ElemTy = llvm::Type::getInt32Ty(C&: getVMContext());
431 SizeRegs = (getContext().getTypeSize(T: Ty) + 31) / 32;
432 } else {
433 ElemTy = llvm::Type::getInt64Ty(C&: getVMContext());
434 SizeRegs = (getContext().getTypeSize(T: Ty) + 63) / 64;
435 }
436
437 return ABIArgInfo::getDirect(T: llvm::ArrayType::get(ElementType: ElemTy, NumElements: SizeRegs));
438}
439
440static bool isIntegerLikeType(QualType Ty, ASTContext &Context,
441 llvm::LLVMContext &VMContext) {
442 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure
443 // is called integer-like if its size is less than or equal to one word, and
444 // the offset of each of its addressable sub-fields is zero.
445
446 uint64_t Size = Context.getTypeSize(T: Ty);
447
448 // Check that the type fits in a word.
449 if (Size > 32)
450 return false;
451
452 // FIXME: Handle vector types!
453 if (Ty->isVectorType())
454 return false;
455
456 // Float types are never treated as "integer like".
457 if (Ty->isRealFloatingType())
458 return false;
459
460 // If this is a builtin or pointer type then it is ok.
461 if (Ty->getAs<BuiltinType>() || Ty->isPointerType())
462 return true;
463
464 // Small complex integer types are "integer like".
465 if (const ComplexType *CT = Ty->getAs<ComplexType>())
466 return isIntegerLikeType(Ty: CT->getElementType(), Context, VMContext);
467
468 // Single element and zero sized arrays should be allowed, by the definition
469 // above, but they are not.
470
471 // Otherwise, it must be a record type.
472 const RecordType *RT = Ty->getAs<RecordType>();
473 if (!RT) return false;
474
475 // Ignore records with flexible arrays.
476 const RecordDecl *RD = RT->getDecl();
477 if (RD->hasFlexibleArrayMember())
478 return false;
479
480 // Check that all sub-fields are at offset 0, and are themselves "integer
481 // like".
482 const ASTRecordLayout &Layout = Context.getASTRecordLayout(D: RD);
483
484 bool HadField = false;
485 unsigned idx = 0;
486 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
487 i != e; ++i, ++idx) {
488 const FieldDecl *FD = *i;
489
490 // Bit-fields are not addressable, we only need to verify they are "integer
491 // like". We still have to disallow a subsequent non-bitfield, for example:
492 // struct { int : 0; int x }
493 // is non-integer like according to gcc.
494 if (FD->isBitField()) {
495 if (!RD->isUnion())
496 HadField = true;
497
498 if (!isIntegerLikeType(FD->getType(), Context, VMContext))
499 return false;
500
501 continue;
502 }
503
504 // Check if this field is at offset 0.
505 if (Layout.getFieldOffset(FieldNo: idx) != 0)
506 return false;
507
508 if (!isIntegerLikeType(FD->getType(), Context, VMContext))
509 return false;
510
511 // Only allow at most one field in a structure. This doesn't match the
512 // wording above, but follows gcc in situations with a field following an
513 // empty structure.
514 if (!RD->isUnion()) {
515 if (HadField)
516 return false;
517
518 HadField = true;
519 }
520 }
521
522 return true;
523}
524
525ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, bool isVariadic,
526 unsigned functionCallConv) const {
527
528 // Variadic functions should always marshal to the base standard.
529 bool IsAAPCS_VFP =
530 !isVariadic && isEffectivelyAAPCS_VFP(callConvention: functionCallConv, /* AAPCS16 */ acceptHalf: true);
531
532 if (RetTy->isVoidType())
533 return ABIArgInfo::getIgnore();
534
535 if (const VectorType *VT = RetTy->getAs<VectorType>()) {
536 // Large vector types should be returned via memory.
537 if (getContext().getTypeSize(T: RetTy) > 128)
538 return getNaturalAlignIndirect(Ty: RetTy);
539 // TODO: FP16/BF16 vectors should be converted to integer vectors
540 // This check is similar to isIllegalVectorType - refactor?
541 if ((!getTarget().hasLegalHalfType() &&
542 (VT->getElementType()->isFloat16Type() ||
543 VT->getElementType()->isHalfType())) ||
544 (IsFloatABISoftFP &&
545 VT->getElementType()->isBFloat16Type()))
546 return coerceIllegalVector(Ty: RetTy);
547 }
548
549 if (!isAggregateTypeForABI(T: RetTy)) {
550 // Treat an enum type as its underlying type.
551 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
552 RetTy = EnumTy->getDecl()->getIntegerType();
553
554 if (const auto *EIT = RetTy->getAs<BitIntType>())
555 if (EIT->getNumBits() > 64)
556 return getNaturalAlignIndirect(Ty: RetTy, /*ByVal=*/false);
557
558 return isPromotableIntegerTypeForABI(Ty: RetTy) ? ABIArgInfo::getExtend(Ty: RetTy)
559 : ABIArgInfo::getDirect();
560 }
561
562 // Are we following APCS?
563 if (getABIKind() == ARMABIKind::APCS) {
564 if (isEmptyRecord(Context&: getContext(), T: RetTy, AllowArrays: false))
565 return ABIArgInfo::getIgnore();
566
567 // Complex types are all returned as packed integers.
568 //
569 // FIXME: Consider using 2 x vector types if the back end handles them
570 // correctly.
571 if (RetTy->isAnyComplexType())
572 return ABIArgInfo::getDirect(T: llvm::IntegerType::get(
573 C&: getVMContext(), NumBits: getContext().getTypeSize(T: RetTy)));
574
575 // Integer like structures are returned in r0.
576 if (isIntegerLikeType(Ty: RetTy, Context&: getContext(), VMContext&: getVMContext())) {
577 // Return in the smallest viable integer type.
578 uint64_t Size = getContext().getTypeSize(T: RetTy);
579 if (Size <= 8)
580 return ABIArgInfo::getDirect(T: llvm::Type::getInt8Ty(C&: getVMContext()));
581 if (Size <= 16)
582 return ABIArgInfo::getDirect(T: llvm::Type::getInt16Ty(C&: getVMContext()));
583 return ABIArgInfo::getDirect(T: llvm::Type::getInt32Ty(C&: getVMContext()));
584 }
585
586 // Otherwise return in memory.
587 return getNaturalAlignIndirect(Ty: RetTy);
588 }
589
590 // Otherwise this is an AAPCS variant.
591
592 if (isEmptyRecord(Context&: getContext(), T: RetTy, AllowArrays: true))
593 return ABIArgInfo::getIgnore();
594
595 // Check for homogeneous aggregates with AAPCS-VFP.
596 if (IsAAPCS_VFP) {
597 const Type *Base = nullptr;
598 uint64_t Members = 0;
599 if (isHomogeneousAggregate(Ty: RetTy, Base, Members))
600 return classifyHomogeneousAggregate(Ty: RetTy, Base, Members);
601 }
602
603 // Aggregates <= 4 bytes are returned in r0; other aggregates
604 // are returned indirectly.
605 uint64_t Size = getContext().getTypeSize(T: RetTy);
606 if (Size <= 32) {
607 // On RenderScript, coerce Aggregates <= 4 bytes to an integer array of
608 // same size and alignment.
609 if (getTarget().isRenderScriptTarget()) {
610 return coerceToIntArray(Ty: RetTy, Context&: getContext(), LLVMContext&: getVMContext());
611 }
612 if (getDataLayout().isBigEndian())
613 // Return in 32 bit integer integer type (as if loaded by LDR, AAPCS 5.4)
614 return ABIArgInfo::getDirect(T: llvm::Type::getInt32Ty(C&: getVMContext()));
615
616 // Return in the smallest viable integer type.
617 if (Size <= 8)
618 return ABIArgInfo::getDirect(T: llvm::Type::getInt8Ty(C&: getVMContext()));
619 if (Size <= 16)
620 return ABIArgInfo::getDirect(T: llvm::Type::getInt16Ty(C&: getVMContext()));
621 return ABIArgInfo::getDirect(T: llvm::Type::getInt32Ty(C&: getVMContext()));
622 } else if (Size <= 128 && getABIKind() == ARMABIKind::AAPCS16_VFP) {
623 llvm::Type *Int32Ty = llvm::Type::getInt32Ty(C&: getVMContext());
624 llvm::Type *CoerceTy =
625 llvm::ArrayType::get(ElementType: Int32Ty, NumElements: llvm::alignTo(Value: Size, Align: 32) / 32);
626 return ABIArgInfo::getDirect(T: CoerceTy);
627 }
628
629 return getNaturalAlignIndirect(Ty: RetTy);
630}
631
632/// isIllegalVector - check whether Ty is an illegal vector type.
633bool ARMABIInfo::isIllegalVectorType(QualType Ty) const {
634 if (const VectorType *VT = Ty->getAs<VectorType> ()) {
635 // On targets that don't support half, fp16 or bfloat, they are expanded
636 // into float, and we don't want the ABI to depend on whether or not they
637 // are supported in hardware. Thus return false to coerce vectors of these
638 // types into integer vectors.
639 // We do not depend on hasLegalHalfType for bfloat as it is a
640 // separate IR type.
641 if ((!getTarget().hasLegalHalfType() &&
642 (VT->getElementType()->isFloat16Type() ||
643 VT->getElementType()->isHalfType())) ||
644 (IsFloatABISoftFP &&
645 VT->getElementType()->isBFloat16Type()))
646 return true;
647 if (isAndroid()) {
648 // Android shipped using Clang 3.1, which supported a slightly different
649 // vector ABI. The primary differences were that 3-element vector types
650 // were legal, and so were sub 32-bit vectors (i.e. <2 x i8>). This path
651 // accepts that legacy behavior for Android only.
652 // Check whether VT is legal.
653 unsigned NumElements = VT->getNumElements();
654 // NumElements should be power of 2 or equal to 3.
655 if (!llvm::isPowerOf2_32(Value: NumElements) && NumElements != 3)
656 return true;
657 } else {
658 // Check whether VT is legal.
659 unsigned NumElements = VT->getNumElements();
660 uint64_t Size = getContext().getTypeSize(VT);
661 // NumElements should be power of 2.
662 if (!llvm::isPowerOf2_32(Value: NumElements))
663 return true;
664 // Size should be greater than 32 bits.
665 return Size <= 32;
666 }
667 }
668 return false;
669}
670
671/// Return true if a type contains any 16-bit floating point vectors
672bool ARMABIInfo::containsAnyFP16Vectors(QualType Ty) const {
673 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(T: Ty)) {
674 uint64_t NElements = AT->getZExtSize();
675 if (NElements == 0)
676 return false;
677 return containsAnyFP16Vectors(Ty: AT->getElementType());
678 } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
679 const RecordDecl *RD = RT->getDecl();
680
681 // If this is a C++ record, check the bases first.
682 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(Val: RD))
683 if (llvm::any_of(Range: CXXRD->bases(), P: [this](const CXXBaseSpecifier &B) {
684 return containsAnyFP16Vectors(Ty: B.getType());
685 }))
686 return true;
687
688 if (llvm::any_of(Range: RD->fields(), P: [this](FieldDecl *FD) {
689 return FD && containsAnyFP16Vectors(FD->getType());
690 }))
691 return true;
692
693 return false;
694 } else {
695 if (const VectorType *VT = Ty->getAs<VectorType>())
696 return (VT->getElementType()->isFloat16Type() ||
697 VT->getElementType()->isBFloat16Type() ||
698 VT->getElementType()->isHalfType());
699 return false;
700 }
701}
702
703bool ARMSwiftABIInfo::isLegalVectorType(CharUnits VectorSize, llvm::Type *EltTy,
704 unsigned NumElts) const {
705 if (!llvm::isPowerOf2_32(Value: NumElts))
706 return false;
707 unsigned size = CGT.getDataLayout().getTypeStoreSizeInBits(Ty: EltTy);
708 if (size > 64)
709 return false;
710 if (VectorSize.getQuantity() != 8 &&
711 (VectorSize.getQuantity() != 16 || NumElts == 1))
712 return false;
713 return true;
714}
715
716bool ARMABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
717 // Homogeneous aggregates for AAPCS-VFP must have base types of float,
718 // double, or 64-bit or 128-bit vectors.
719 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
720 if (BT->getKind() == BuiltinType::Float ||
721 BT->getKind() == BuiltinType::Double ||
722 BT->getKind() == BuiltinType::LongDouble)
723 return true;
724 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
725 unsigned VecSize = getContext().getTypeSize(VT);
726 if (VecSize == 64 || VecSize == 128)
727 return true;
728 }
729 return false;
730}
731
732bool ARMABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
733 uint64_t Members) const {
734 return Members <= 4;
735}
736
737bool ARMABIInfo::isZeroLengthBitfieldPermittedInHomogeneousAggregate() const {
738 // AAPCS32 says that the rule for whether something is a homogeneous
739 // aggregate is applied to the output of the data layout decision. So
740 // anything that doesn't affect the data layout also does not affect
741 // homogeneity. In particular, zero-length bitfields don't stop a struct
742 // being homogeneous.
743 return true;
744}
745
746bool ARMABIInfo::isEffectivelyAAPCS_VFP(unsigned callConvention,
747 bool acceptHalf) const {
748 // Give precedence to user-specified calling conventions.
749 if (callConvention != llvm::CallingConv::C)
750 return (callConvention == llvm::CallingConv::ARM_AAPCS_VFP);
751 else
752 return (getABIKind() == ARMABIKind::AAPCS_VFP) ||
753 (acceptHalf && (getABIKind() == ARMABIKind::AAPCS16_VFP));
754}
755
756Address ARMABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
757 QualType Ty) const {
758 CharUnits SlotSize = CharUnits::fromQuantity(Quantity: 4);
759
760 // Empty records are ignored for parameter passing purposes.
761 if (isEmptyRecord(Context&: getContext(), T: Ty, AllowArrays: true)) {
762 VAListAddr = VAListAddr.withElementType(ElemTy: CGF.Int8PtrTy);
763 auto *Load = CGF.Builder.CreateLoad(Addr: VAListAddr);
764 return Address(Load, CGF.ConvertTypeForMem(T: Ty), SlotSize);
765 }
766
767 CharUnits TySize = getContext().getTypeSizeInChars(T: Ty);
768 CharUnits TyAlignForABI = getContext().getTypeUnadjustedAlignInChars(T: Ty);
769
770 // Use indirect if size of the illegal vector is bigger than 16 bytes.
771 bool IsIndirect = false;
772 const Type *Base = nullptr;
773 uint64_t Members = 0;
774 if (TySize > CharUnits::fromQuantity(Quantity: 16) && isIllegalVectorType(Ty)) {
775 IsIndirect = true;
776
777 // ARMv7k passes structs bigger than 16 bytes indirectly, in space
778 // allocated by the caller.
779 } else if (TySize > CharUnits::fromQuantity(Quantity: 16) &&
780 getABIKind() == ARMABIKind::AAPCS16_VFP &&
781 !isHomogeneousAggregate(Ty, Base, Members)) {
782 IsIndirect = true;
783
784 // Otherwise, bound the type's ABI alignment.
785 // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for
786 // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte.
787 // Our callers should be prepared to handle an under-aligned address.
788 } else if (getABIKind() == ARMABIKind::AAPCS_VFP ||
789 getABIKind() == ARMABIKind::AAPCS) {
790 TyAlignForABI = std::max(a: TyAlignForABI, b: CharUnits::fromQuantity(Quantity: 4));
791 TyAlignForABI = std::min(a: TyAlignForABI, b: CharUnits::fromQuantity(Quantity: 8));
792 } else if (getABIKind() == ARMABIKind::AAPCS16_VFP) {
793 // ARMv7k allows type alignment up to 16 bytes.
794 TyAlignForABI = std::max(a: TyAlignForABI, b: CharUnits::fromQuantity(Quantity: 4));
795 TyAlignForABI = std::min(a: TyAlignForABI, b: CharUnits::fromQuantity(Quantity: 16));
796 } else {
797 TyAlignForABI = CharUnits::fromQuantity(Quantity: 4);
798 }
799
800 TypeInfoChars TyInfo(TySize, TyAlignForABI, AlignRequirementKind::None);
801 return emitVoidPtrVAArg(CGF, VAListAddr, ValueTy: Ty, IsIndirect, ValueInfo: TyInfo,
802 SlotSizeAndAlign: SlotSize, /*AllowHigherAlign*/ true);
803}
804
805std::unique_ptr<TargetCodeGenInfo>
806CodeGen::createARMTargetCodeGenInfo(CodeGenModule &CGM, ARMABIKind Kind) {
807 return std::make_unique<ARMTargetCodeGenInfo>(args&: CGM.getTypes(), args&: Kind);
808}
809
810std::unique_ptr<TargetCodeGenInfo>
811CodeGen::createWindowsARMTargetCodeGenInfo(CodeGenModule &CGM, ARMABIKind K) {
812 return std::make_unique<WindowsARMTargetCodeGenInfo>(args&: CGM.getTypes(), args&: K);
813}
814

source code of clang/lib/CodeGen/Targets/ARM.cpp