1//===- RISCV.cpp ----------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "ABIInfoImpl.h"
10#include "TargetInfo.h"
11
12using namespace clang;
13using namespace clang::CodeGen;
14
15//===----------------------------------------------------------------------===//
16// RISC-V ABI Implementation
17//===----------------------------------------------------------------------===//
18
19namespace {
20class RISCVABIInfo : public DefaultABIInfo {
21private:
22 // Size of the integer ('x') registers in bits.
23 unsigned XLen;
24 // Size of the floating point ('f') registers in bits. Note that the target
25 // ISA might have a wider FLen than the selected ABI (e.g. an RV32IF target
26 // with soft float ABI has FLen==0).
27 unsigned FLen;
28 const int NumArgGPRs;
29 const int NumArgFPRs;
30 const bool EABI;
31 bool detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff,
32 llvm::Type *&Field1Ty,
33 CharUnits &Field1Off,
34 llvm::Type *&Field2Ty,
35 CharUnits &Field2Off) const;
36
37public:
38 RISCVABIInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen, unsigned FLen,
39 bool EABI)
40 : DefaultABIInfo(CGT), XLen(XLen), FLen(FLen), NumArgGPRs(EABI ? 6 : 8),
41 NumArgFPRs(FLen != 0 ? 8 : 0), EABI(EABI) {}
42
43 // DefaultABIInfo's classifyReturnType and classifyArgumentType are
44 // non-virtual, but computeInfo is virtual, so we overload it.
45 void computeInfo(CGFunctionInfo &FI) const override;
46
47 ABIArgInfo classifyArgumentType(QualType Ty, bool IsFixed, int &ArgGPRsLeft,
48 int &ArgFPRsLeft) const;
49 ABIArgInfo classifyReturnType(QualType RetTy) const;
50
51 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
52 QualType Ty) const override;
53
54 ABIArgInfo extendType(QualType Ty) const;
55
56 bool detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty,
57 CharUnits &Field1Off, llvm::Type *&Field2Ty,
58 CharUnits &Field2Off, int &NeededArgGPRs,
59 int &NeededArgFPRs) const;
60 ABIArgInfo coerceAndExpandFPCCEligibleStruct(llvm::Type *Field1Ty,
61 CharUnits Field1Off,
62 llvm::Type *Field2Ty,
63 CharUnits Field2Off) const;
64
65 ABIArgInfo coerceVLSVector(QualType Ty) const;
66};
67} // end anonymous namespace
68
69void RISCVABIInfo::computeInfo(CGFunctionInfo &FI) const {
70 QualType RetTy = FI.getReturnType();
71 if (!getCXXABI().classifyReturnType(FI))
72 FI.getReturnInfo() = classifyReturnType(RetTy);
73
74 // IsRetIndirect is true if classifyArgumentType indicated the value should
75 // be passed indirect, or if the type size is a scalar greater than 2*XLen
76 // and not a complex type with elements <= FLen. e.g. fp128 is passed direct
77 // in LLVM IR, relying on the backend lowering code to rewrite the argument
78 // list and pass indirectly on RV32.
79 bool IsRetIndirect = FI.getReturnInfo().getKind() == ABIArgInfo::Indirect;
80 if (!IsRetIndirect && RetTy->isScalarType() &&
81 getContext().getTypeSize(T: RetTy) > (2 * XLen)) {
82 if (RetTy->isComplexType() && FLen) {
83 QualType EltTy = RetTy->castAs<ComplexType>()->getElementType();
84 IsRetIndirect = getContext().getTypeSize(T: EltTy) > FLen;
85 } else {
86 // This is a normal scalar > 2*XLen, such as fp128 on RV32.
87 IsRetIndirect = true;
88 }
89 }
90
91 int ArgGPRsLeft = IsRetIndirect ? NumArgGPRs - 1 : NumArgGPRs;
92 int ArgFPRsLeft = NumArgFPRs;
93 int NumFixedArgs = FI.getNumRequiredArgs();
94
95 int ArgNum = 0;
96 for (auto &ArgInfo : FI.arguments()) {
97 bool IsFixed = ArgNum < NumFixedArgs;
98 ArgInfo.info =
99 classifyArgumentType(Ty: ArgInfo.type, IsFixed, ArgGPRsLeft, ArgFPRsLeft);
100 ArgNum++;
101 }
102}
103
104// Returns true if the struct is a potential candidate for the floating point
105// calling convention. If this function returns true, the caller is
106// responsible for checking that if there is only a single field then that
107// field is a float.
108bool RISCVABIInfo::detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff,
109 llvm::Type *&Field1Ty,
110 CharUnits &Field1Off,
111 llvm::Type *&Field2Ty,
112 CharUnits &Field2Off) const {
113 bool IsInt = Ty->isIntegralOrEnumerationType();
114 bool IsFloat = Ty->isRealFloatingType();
115
116 if (IsInt || IsFloat) {
117 uint64_t Size = getContext().getTypeSize(T: Ty);
118 if (IsInt && Size > XLen)
119 return false;
120 // Can't be eligible if larger than the FP registers. Handling of half
121 // precision values has been specified in the ABI, so don't block those.
122 if (IsFloat && Size > FLen)
123 return false;
124 // Can't be eligible if an integer type was already found (int+int pairs
125 // are not eligible).
126 if (IsInt && Field1Ty && Field1Ty->isIntegerTy())
127 return false;
128 if (!Field1Ty) {
129 Field1Ty = CGT.ConvertType(T: Ty);
130 Field1Off = CurOff;
131 return true;
132 }
133 if (!Field2Ty) {
134 Field2Ty = CGT.ConvertType(T: Ty);
135 Field2Off = CurOff;
136 return true;
137 }
138 return false;
139 }
140
141 if (auto CTy = Ty->getAs<ComplexType>()) {
142 if (Field1Ty)
143 return false;
144 QualType EltTy = CTy->getElementType();
145 if (getContext().getTypeSize(T: EltTy) > FLen)
146 return false;
147 Field1Ty = CGT.ConvertType(T: EltTy);
148 Field1Off = CurOff;
149 Field2Ty = Field1Ty;
150 Field2Off = Field1Off + getContext().getTypeSizeInChars(T: EltTy);
151 return true;
152 }
153
154 if (const ConstantArrayType *ATy = getContext().getAsConstantArrayType(T: Ty)) {
155 uint64_t ArraySize = ATy->getZExtSize();
156 QualType EltTy = ATy->getElementType();
157 // Non-zero-length arrays of empty records make the struct ineligible for
158 // the FP calling convention in C++.
159 if (const auto *RTy = EltTy->getAs<RecordType>()) {
160 if (ArraySize != 0 && isa<CXXRecordDecl>(RTy->getDecl()) &&
161 isEmptyRecord(Context&: getContext(), T: EltTy, AllowArrays: true, AsIfNoUniqueAddr: true))
162 return false;
163 }
164 CharUnits EltSize = getContext().getTypeSizeInChars(T: EltTy);
165 for (uint64_t i = 0; i < ArraySize; ++i) {
166 bool Ret = detectFPCCEligibleStructHelper(Ty: EltTy, CurOff, Field1Ty,
167 Field1Off, Field2Ty, Field2Off);
168 if (!Ret)
169 return false;
170 CurOff += EltSize;
171 }
172 return true;
173 }
174
175 if (const auto *RTy = Ty->getAs<RecordType>()) {
176 // Structures with either a non-trivial destructor or a non-trivial
177 // copy constructor are not eligible for the FP calling convention.
178 if (getRecordArgABI(T: Ty, CXXABI&: CGT.getCXXABI()))
179 return false;
180 if (isEmptyRecord(Context&: getContext(), T: Ty, AllowArrays: true, AsIfNoUniqueAddr: true))
181 return true;
182 const RecordDecl *RD = RTy->getDecl();
183 // Unions aren't eligible unless they're empty (which is caught above).
184 if (RD->isUnion())
185 return false;
186 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(D: RD);
187 // If this is a C++ record, check the bases first.
188 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(Val: RD)) {
189 for (const CXXBaseSpecifier &B : CXXRD->bases()) {
190 const auto *BDecl =
191 cast<CXXRecordDecl>(Val: B.getType()->castAs<RecordType>()->getDecl());
192 CharUnits BaseOff = Layout.getBaseClassOffset(Base: BDecl);
193 bool Ret = detectFPCCEligibleStructHelper(Ty: B.getType(), CurOff: CurOff + BaseOff,
194 Field1Ty, Field1Off, Field2Ty,
195 Field2Off);
196 if (!Ret)
197 return false;
198 }
199 }
200 int ZeroWidthBitFieldCount = 0;
201 for (const FieldDecl *FD : RD->fields()) {
202 uint64_t FieldOffInBits = Layout.getFieldOffset(FieldNo: FD->getFieldIndex());
203 QualType QTy = FD->getType();
204 if (FD->isBitField()) {
205 unsigned BitWidth = FD->getBitWidthValue(Ctx: getContext());
206 // Allow a bitfield with a type greater than XLen as long as the
207 // bitwidth is XLen or less.
208 if (getContext().getTypeSize(T: QTy) > XLen && BitWidth <= XLen)
209 QTy = getContext().getIntTypeForBitwidth(DestWidth: XLen, Signed: false);
210 if (BitWidth == 0) {
211 ZeroWidthBitFieldCount++;
212 continue;
213 }
214 }
215
216 bool Ret = detectFPCCEligibleStructHelper(
217 Ty: QTy, CurOff: CurOff + getContext().toCharUnitsFromBits(BitSize: FieldOffInBits),
218 Field1Ty, Field1Off, Field2Ty, Field2Off);
219 if (!Ret)
220 return false;
221
222 // As a quirk of the ABI, zero-width bitfields aren't ignored for fp+fp
223 // or int+fp structs, but are ignored for a struct with an fp field and
224 // any number of zero-width bitfields.
225 if (Field2Ty && ZeroWidthBitFieldCount > 0)
226 return false;
227 }
228 return Field1Ty != nullptr;
229 }
230
231 return false;
232}
233
234// Determine if a struct is eligible for passing according to the floating
235// point calling convention (i.e., when flattened it contains a single fp
236// value, fp+fp, or int+fp of appropriate size). If so, NeededArgFPRs and
237// NeededArgGPRs are incremented appropriately.
238bool RISCVABIInfo::detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty,
239 CharUnits &Field1Off,
240 llvm::Type *&Field2Ty,
241 CharUnits &Field2Off,
242 int &NeededArgGPRs,
243 int &NeededArgFPRs) const {
244 Field1Ty = nullptr;
245 Field2Ty = nullptr;
246 NeededArgGPRs = 0;
247 NeededArgFPRs = 0;
248 bool IsCandidate = detectFPCCEligibleStructHelper(
249 Ty, CurOff: CharUnits::Zero(), Field1Ty, Field1Off, Field2Ty, Field2Off);
250 if (!Field1Ty)
251 return false;
252 // Not really a candidate if we have a single int but no float.
253 if (Field1Ty && !Field2Ty && !Field1Ty->isFloatingPointTy())
254 return false;
255 if (!IsCandidate)
256 return false;
257 if (Field1Ty && Field1Ty->isFloatingPointTy())
258 NeededArgFPRs++;
259 else if (Field1Ty)
260 NeededArgGPRs++;
261 if (Field2Ty && Field2Ty->isFloatingPointTy())
262 NeededArgFPRs++;
263 else if (Field2Ty)
264 NeededArgGPRs++;
265 return true;
266}
267
268// Call getCoerceAndExpand for the two-element flattened struct described by
269// Field1Ty, Field1Off, Field2Ty, Field2Off. This method will create an
270// appropriate coerceToType and unpaddedCoerceToType.
271ABIArgInfo RISCVABIInfo::coerceAndExpandFPCCEligibleStruct(
272 llvm::Type *Field1Ty, CharUnits Field1Off, llvm::Type *Field2Ty,
273 CharUnits Field2Off) const {
274 SmallVector<llvm::Type *, 3> CoerceElts;
275 SmallVector<llvm::Type *, 2> UnpaddedCoerceElts;
276 if (!Field1Off.isZero())
277 CoerceElts.push_back(Elt: llvm::ArrayType::get(
278 ElementType: llvm::Type::getInt8Ty(C&: getVMContext()), NumElements: Field1Off.getQuantity()));
279
280 CoerceElts.push_back(Elt: Field1Ty);
281 UnpaddedCoerceElts.push_back(Elt: Field1Ty);
282
283 if (!Field2Ty) {
284 return ABIArgInfo::getCoerceAndExpand(
285 coerceToType: llvm::StructType::get(Context&: getVMContext(), Elements: CoerceElts, isPacked: !Field1Off.isZero()),
286 unpaddedCoerceToType: UnpaddedCoerceElts[0]);
287 }
288
289 CharUnits Field2Align =
290 CharUnits::fromQuantity(Quantity: getDataLayout().getABITypeAlign(Ty: Field2Ty));
291 CharUnits Field1End = Field1Off +
292 CharUnits::fromQuantity(Quantity: getDataLayout().getTypeStoreSize(Ty: Field1Ty));
293 CharUnits Field2OffNoPadNoPack = Field1End.alignTo(Align: Field2Align);
294
295 CharUnits Padding = CharUnits::Zero();
296 if (Field2Off > Field2OffNoPadNoPack)
297 Padding = Field2Off - Field2OffNoPadNoPack;
298 else if (Field2Off != Field2Align && Field2Off > Field1End)
299 Padding = Field2Off - Field1End;
300
301 bool IsPacked = !Field2Off.isMultipleOf(N: Field2Align);
302
303 if (!Padding.isZero())
304 CoerceElts.push_back(Elt: llvm::ArrayType::get(
305 ElementType: llvm::Type::getInt8Ty(C&: getVMContext()), NumElements: Padding.getQuantity()));
306
307 CoerceElts.push_back(Elt: Field2Ty);
308 UnpaddedCoerceElts.push_back(Elt: Field2Ty);
309
310 auto CoerceToType =
311 llvm::StructType::get(Context&: getVMContext(), Elements: CoerceElts, isPacked: IsPacked);
312 auto UnpaddedCoerceToType =
313 llvm::StructType::get(Context&: getVMContext(), Elements: UnpaddedCoerceElts, isPacked: IsPacked);
314
315 return ABIArgInfo::getCoerceAndExpand(coerceToType: CoerceToType, unpaddedCoerceToType: UnpaddedCoerceToType);
316}
317
318// Fixed-length RVV vectors are represented as scalable vectors in function
319// args/return and must be coerced from fixed vectors.
320ABIArgInfo RISCVABIInfo::coerceVLSVector(QualType Ty) const {
321 assert(Ty->isVectorType() && "expected vector type!");
322
323 const auto *VT = Ty->castAs<VectorType>();
324 assert(VT->getElementType()->isBuiltinType() && "expected builtin type!");
325
326 auto VScale =
327 getContext().getTargetInfo().getVScaleRange(LangOpts: getContext().getLangOpts());
328
329 unsigned NumElts = VT->getNumElements();
330 llvm::Type *EltType;
331 if (VT->getVectorKind() == VectorKind::RVVFixedLengthMask) {
332 NumElts *= 8;
333 EltType = llvm::Type::getInt1Ty(C&: getVMContext());
334 } else {
335 assert(VT->getVectorKind() == VectorKind::RVVFixedLengthData &&
336 "Unexpected vector kind");
337 EltType = CGT.ConvertType(T: VT->getElementType());
338 }
339
340 // The MinNumElts is simplified from equation:
341 // NumElts / VScale =
342 // (EltSize * NumElts / (VScale * RVVBitsPerBlock))
343 // * (RVVBitsPerBlock / EltSize)
344 llvm::ScalableVectorType *ResType =
345 llvm::ScalableVectorType::get(ElementType: EltType, MinNumElts: NumElts / VScale->first);
346 return ABIArgInfo::getDirect(T: ResType);
347}
348
349ABIArgInfo RISCVABIInfo::classifyArgumentType(QualType Ty, bool IsFixed,
350 int &ArgGPRsLeft,
351 int &ArgFPRsLeft) const {
352 assert(ArgGPRsLeft <= NumArgGPRs && "Arg GPR tracking underflow");
353 Ty = useFirstFieldIfTransparentUnion(Ty);
354
355 // Structures with either a non-trivial destructor or a non-trivial
356 // copy constructor are always passed indirectly.
357 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(T: Ty, CXXABI&: getCXXABI())) {
358 if (ArgGPRsLeft)
359 ArgGPRsLeft -= 1;
360 return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
361 CGCXXABI::RAA_DirectInMemory);
362 }
363
364 // Ignore empty structs/unions.
365 if (isEmptyRecord(Context&: getContext(), T: Ty, AllowArrays: true))
366 return ABIArgInfo::getIgnore();
367
368 uint64_t Size = getContext().getTypeSize(T: Ty);
369
370 // Pass floating point values via FPRs if possible.
371 if (IsFixed && Ty->isFloatingType() && !Ty->isComplexType() &&
372 FLen >= Size && ArgFPRsLeft) {
373 ArgFPRsLeft--;
374 return ABIArgInfo::getDirect();
375 }
376
377 // Complex types for the hard float ABI must be passed direct rather than
378 // using CoerceAndExpand.
379 if (IsFixed && Ty->isComplexType() && FLen && ArgFPRsLeft >= 2) {
380 QualType EltTy = Ty->castAs<ComplexType>()->getElementType();
381 if (getContext().getTypeSize(T: EltTy) <= FLen) {
382 ArgFPRsLeft -= 2;
383 return ABIArgInfo::getDirect();
384 }
385 }
386
387 if (IsFixed && FLen && Ty->isStructureOrClassType()) {
388 llvm::Type *Field1Ty = nullptr;
389 llvm::Type *Field2Ty = nullptr;
390 CharUnits Field1Off = CharUnits::Zero();
391 CharUnits Field2Off = CharUnits::Zero();
392 int NeededArgGPRs = 0;
393 int NeededArgFPRs = 0;
394 bool IsCandidate =
395 detectFPCCEligibleStruct(Ty, Field1Ty, Field1Off, Field2Ty, Field2Off,
396 NeededArgGPRs, NeededArgFPRs);
397 if (IsCandidate && NeededArgGPRs <= ArgGPRsLeft &&
398 NeededArgFPRs <= ArgFPRsLeft) {
399 ArgGPRsLeft -= NeededArgGPRs;
400 ArgFPRsLeft -= NeededArgFPRs;
401 return coerceAndExpandFPCCEligibleStruct(Field1Ty, Field1Off, Field2Ty,
402 Field2Off);
403 }
404 }
405
406 uint64_t NeededAlign = getContext().getTypeAlign(T: Ty);
407 // Determine the number of GPRs needed to pass the current argument
408 // according to the ABI. 2*XLen-aligned varargs are passed in "aligned"
409 // register pairs, so may consume 3 registers.
410 // TODO: To be compatible with GCC's behaviors, we don't align registers
411 // currently if we are using ILP32E calling convention. This behavior may be
412 // changed when RV32E/ILP32E is ratified.
413 int NeededArgGPRs = 1;
414 if (!IsFixed && NeededAlign == 2 * XLen)
415 NeededArgGPRs = 2 + (EABI && XLen == 32 ? 0 : (ArgGPRsLeft % 2));
416 else if (Size > XLen && Size <= 2 * XLen)
417 NeededArgGPRs = 2;
418
419 if (NeededArgGPRs > ArgGPRsLeft) {
420 NeededArgGPRs = ArgGPRsLeft;
421 }
422
423 ArgGPRsLeft -= NeededArgGPRs;
424
425 if (!isAggregateTypeForABI(T: Ty) && !Ty->isVectorType()) {
426 // Treat an enum type as its underlying type.
427 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
428 Ty = EnumTy->getDecl()->getIntegerType();
429
430 // All integral types are promoted to XLen width
431 if (Size < XLen && Ty->isIntegralOrEnumerationType()) {
432 return extendType(Ty);
433 }
434
435 if (const auto *EIT = Ty->getAs<BitIntType>()) {
436 if (EIT->getNumBits() < XLen)
437 return extendType(Ty);
438 if (EIT->getNumBits() > 128 ||
439 (!getContext().getTargetInfo().hasInt128Type() &&
440 EIT->getNumBits() > 64))
441 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
442 }
443
444 ABIArgInfo Info = ABIArgInfo::getDirect();
445
446 // If it is tuple type, it can't be flattened.
447 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(Val: CGT.ConvertType(T: Ty)))
448 Info.setCanBeFlattened(!STy->containsHomogeneousScalableVectorTypes());
449
450 return Info;
451 }
452
453 if (const VectorType *VT = Ty->getAs<VectorType>())
454 if (VT->getVectorKind() == VectorKind::RVVFixedLengthData ||
455 VT->getVectorKind() == VectorKind::RVVFixedLengthMask)
456 return coerceVLSVector(Ty);
457
458 // Aggregates which are <= 2*XLen will be passed in registers if possible,
459 // so coerce to integers.
460 if (Size <= 2 * XLen) {
461 unsigned Alignment = getContext().getTypeAlign(T: Ty);
462
463 // Use a single XLen int if possible, 2*XLen if 2*XLen alignment is
464 // required, and a 2-element XLen array if only XLen alignment is required.
465 if (Size <= XLen) {
466 return ABIArgInfo::getDirect(
467 T: llvm::IntegerType::get(C&: getVMContext(), NumBits: XLen));
468 } else if (Alignment == 2 * XLen) {
469 return ABIArgInfo::getDirect(
470 T: llvm::IntegerType::get(C&: getVMContext(), NumBits: 2 * XLen));
471 } else {
472 return ABIArgInfo::getDirect(T: llvm::ArrayType::get(
473 ElementType: llvm::IntegerType::get(C&: getVMContext(), NumBits: XLen), NumElements: 2));
474 }
475 }
476 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
477}
478
479ABIArgInfo RISCVABIInfo::classifyReturnType(QualType RetTy) const {
480 if (RetTy->isVoidType())
481 return ABIArgInfo::getIgnore();
482
483 int ArgGPRsLeft = 2;
484 int ArgFPRsLeft = FLen ? 2 : 0;
485
486 // The rules for return and argument types are the same, so defer to
487 // classifyArgumentType.
488 return classifyArgumentType(Ty: RetTy, /*IsFixed=*/true, ArgGPRsLeft,
489 ArgFPRsLeft);
490}
491
492Address RISCVABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
493 QualType Ty) const {
494 CharUnits SlotSize = CharUnits::fromQuantity(Quantity: XLen / 8);
495
496 // Empty records are ignored for parameter passing purposes.
497 if (isEmptyRecord(Context&: getContext(), T: Ty, AllowArrays: true)) {
498 return Address(CGF.Builder.CreateLoad(Addr: VAListAddr),
499 CGF.ConvertTypeForMem(T: Ty), SlotSize);
500 }
501
502 auto TInfo = getContext().getTypeInfoInChars(T: Ty);
503
504 // TODO: To be compatible with GCC's behaviors, we force arguments with
505 // 2×XLEN-bit alignment and size at most 2×XLEN bits like `long long`,
506 // `unsigned long long` and `double` to have 4-byte alignment. This
507 // behavior may be changed when RV32E/ILP32E is ratified.
508 if (EABI && XLen == 32)
509 TInfo.Align = std::min(a: TInfo.Align, b: CharUnits::fromQuantity(Quantity: 4));
510
511 // Arguments bigger than 2*Xlen bytes are passed indirectly.
512 bool IsIndirect = TInfo.Width > 2 * SlotSize;
513
514 return emitVoidPtrVAArg(CGF, VAListAddr, ValueTy: Ty, IsIndirect, ValueInfo: TInfo,
515 SlotSizeAndAlign: SlotSize, /*AllowHigherAlign=*/true);
516}
517
518ABIArgInfo RISCVABIInfo::extendType(QualType Ty) const {
519 int TySize = getContext().getTypeSize(T: Ty);
520 // RV64 ABI requires unsigned 32 bit integers to be sign extended.
521 if (XLen == 64 && Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32)
522 return ABIArgInfo::getSignExtend(Ty);
523 return ABIArgInfo::getExtend(Ty);
524}
525
526namespace {
527class RISCVTargetCodeGenInfo : public TargetCodeGenInfo {
528public:
529 RISCVTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen,
530 unsigned FLen, bool EABI)
531 : TargetCodeGenInfo(
532 std::make_unique<RISCVABIInfo>(args&: CGT, args&: XLen, args&: FLen, args&: EABI)) {
533 SwiftInfo =
534 std::make_unique<SwiftABIInfo>(args&: CGT, /*SwiftErrorInRegister=*/args: false);
535 }
536
537 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
538 CodeGen::CodeGenModule &CGM) const override {
539 const auto *FD = dyn_cast_or_null<FunctionDecl>(Val: D);
540 if (!FD) return;
541
542 const auto *Attr = FD->getAttr<RISCVInterruptAttr>();
543 if (!Attr)
544 return;
545
546 const char *Kind;
547 switch (Attr->getInterrupt()) {
548 case RISCVInterruptAttr::supervisor: Kind = "supervisor"; break;
549 case RISCVInterruptAttr::machine: Kind = "machine"; break;
550 }
551
552 auto *Fn = cast<llvm::Function>(Val: GV);
553
554 Fn->addFnAttr(Kind: "interrupt", Val: Kind);
555 }
556};
557} // namespace
558
559std::unique_ptr<TargetCodeGenInfo>
560CodeGen::createRISCVTargetCodeGenInfo(CodeGenModule &CGM, unsigned XLen,
561 unsigned FLen, bool EABI) {
562 return std::make_unique<RISCVTargetCodeGenInfo>(args&: CGM.getTypes(), args&: XLen, args&: FLen,
563 args&: EABI);
564}
565

source code of clang/lib/CodeGen/Targets/RISCV.cpp