1//===-- RISCVCallLowering.cpp - Call lowering -------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file implements the lowering of LLVM calls to machine code calls for
11/// GlobalISel.
12//
13//===----------------------------------------------------------------------===//
14
15#include "RISCVCallLowering.h"
16#include "RISCVISelLowering.h"
17#include "RISCVMachineFunctionInfo.h"
18#include "RISCVSubtarget.h"
19#include "llvm/CodeGen/Analysis.h"
20#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
21#include "llvm/CodeGen/MachineFrameInfo.h"
22
23using namespace llvm;
24
25namespace {
26
27struct RISCVOutgoingValueAssigner : public CallLowering::OutgoingValueAssigner {
28private:
29 // The function used internally to assign args - we ignore the AssignFn stored
30 // by OutgoingValueAssigner since RISC-V implements its CC using a custom
31 // function with a different signature.
32 RISCVTargetLowering::RISCVCCAssignFn *RISCVAssignFn;
33
34 // Whether this is assigning args for a return.
35 bool IsRet;
36
37 RVVArgDispatcher &RVVDispatcher;
38
39public:
40 RISCVOutgoingValueAssigner(
41 RISCVTargetLowering::RISCVCCAssignFn *RISCVAssignFn_, bool IsRet,
42 RVVArgDispatcher &RVVDispatcher)
43 : CallLowering::OutgoingValueAssigner(nullptr),
44 RISCVAssignFn(RISCVAssignFn_), IsRet(IsRet),
45 RVVDispatcher(RVVDispatcher) {}
46
47 bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,
48 CCValAssign::LocInfo LocInfo,
49 const CallLowering::ArgInfo &Info, ISD::ArgFlagsTy Flags,
50 CCState &State) override {
51 MachineFunction &MF = State.getMachineFunction();
52 const DataLayout &DL = MF.getDataLayout();
53 const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
54
55 if (RISCVAssignFn(DL, Subtarget.getTargetABI(), ValNo, ValVT, LocVT,
56 LocInfo, Flags, State, Info.IsFixed, IsRet, Info.Ty,
57 *Subtarget.getTargetLowering(), RVVDispatcher))
58 return true;
59
60 StackSize = State.getStackSize();
61 return false;
62 }
63};
64
65struct RISCVOutgoingValueHandler : public CallLowering::OutgoingValueHandler {
66 RISCVOutgoingValueHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI,
67 MachineInstrBuilder MIB)
68 : OutgoingValueHandler(B, MRI), MIB(MIB),
69 Subtarget(MIRBuilder.getMF().getSubtarget<RISCVSubtarget>()) {}
70 Register getStackAddress(uint64_t MemSize, int64_t Offset,
71 MachinePointerInfo &MPO,
72 ISD::ArgFlagsTy Flags) override {
73 MachineFunction &MF = MIRBuilder.getMF();
74 LLT p0 = LLT::pointer(AddressSpace: 0, SizeInBits: Subtarget.getXLen());
75 LLT sXLen = LLT::scalar(SizeInBits: Subtarget.getXLen());
76
77 if (!SPReg)
78 SPReg = MIRBuilder.buildCopy(p0, Register(RISCV::X2)).getReg(0);
79
80 auto OffsetReg = MIRBuilder.buildConstant(Res: sXLen, Val: Offset);
81
82 auto AddrReg = MIRBuilder.buildPtrAdd(Res: p0, Op0: SPReg, Op1: OffsetReg);
83
84 MPO = MachinePointerInfo::getStack(MF, Offset);
85 return AddrReg.getReg(Idx: 0);
86 }
87
88 void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy,
89 const MachinePointerInfo &MPO,
90 const CCValAssign &VA) override {
91 MachineFunction &MF = MIRBuilder.getMF();
92 uint64_t LocMemOffset = VA.getLocMemOffset();
93
94 // TODO: Move StackAlignment to subtarget and share with FrameLowering.
95 auto MMO =
96 MF.getMachineMemOperand(PtrInfo: MPO, f: MachineMemOperand::MOStore, MemTy,
97 base_alignment: commonAlignment(A: Align(16), Offset: LocMemOffset));
98
99 Register ExtReg = extendRegister(ValReg: ValVReg, VA);
100 MIRBuilder.buildStore(Val: ExtReg, Addr, MMO&: *MMO);
101 }
102
103 void assignValueToReg(Register ValVReg, Register PhysReg,
104 const CCValAssign &VA) override {
105 // If we're passing an f32 value into an i64, anyextend before copying.
106 if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
107 ValVReg = MIRBuilder.buildAnyExt(Res: LLT::scalar(SizeInBits: 64), Op: ValVReg).getReg(Idx: 0);
108
109 Register ExtReg = extendRegister(ValReg: ValVReg, VA);
110 MIRBuilder.buildCopy(Res: PhysReg, Op: ExtReg);
111 MIB.addUse(RegNo: PhysReg, Flags: RegState::Implicit);
112 }
113
114 unsigned assignCustomValue(CallLowering::ArgInfo &Arg,
115 ArrayRef<CCValAssign> VAs,
116 std::function<void()> *Thunk) override {
117 assert(VAs.size() >= 2 && "Expected at least 2 VAs.");
118 const CCValAssign &VALo = VAs[0];
119 const CCValAssign &VAHi = VAs[1];
120
121 assert(VAHi.needsCustom() && "Value doesn't need custom handling");
122 assert(VALo.getValNo() == VAHi.getValNo() &&
123 "Values belong to different arguments");
124
125 assert(VALo.getLocVT() == MVT::i32 && VAHi.getLocVT() == MVT::i32 &&
126 VALo.getValVT() == MVT::f64 && VAHi.getValVT() == MVT::f64 &&
127 "unexpected custom value");
128
129 Register NewRegs[] = {MRI.createGenericVirtualRegister(Ty: LLT::scalar(SizeInBits: 32)),
130 MRI.createGenericVirtualRegister(Ty: LLT::scalar(SizeInBits: 32))};
131 MIRBuilder.buildUnmerge(Res: NewRegs, Op: Arg.Regs[0]);
132
133 if (VAHi.isMemLoc()) {
134 LLT MemTy(VAHi.getLocVT());
135
136 MachinePointerInfo MPO;
137 Register StackAddr = getStackAddress(
138 MemSize: MemTy.getSizeInBytes(), Offset: VAHi.getLocMemOffset(), MPO, Flags: Arg.Flags[0]);
139
140 assignValueToAddress(ValVReg: NewRegs[1], Addr: StackAddr, MemTy, MPO,
141 VA: const_cast<CCValAssign &>(VAHi));
142 }
143
144 auto assignFunc = [=]() {
145 assignValueToReg(ValVReg: NewRegs[0], PhysReg: VALo.getLocReg(), VA: VALo);
146 if (VAHi.isRegLoc())
147 assignValueToReg(ValVReg: NewRegs[1], PhysReg: VAHi.getLocReg(), VA: VAHi);
148 };
149
150 if (Thunk) {
151 *Thunk = assignFunc;
152 return 2;
153 }
154
155 assignFunc();
156 return 2;
157 }
158
159private:
160 MachineInstrBuilder MIB;
161
162 // Cache the SP register vreg if we need it more than once in this call site.
163 Register SPReg;
164
165 const RISCVSubtarget &Subtarget;
166};
167
168struct RISCVIncomingValueAssigner : public CallLowering::IncomingValueAssigner {
169private:
170 // The function used internally to assign args - we ignore the AssignFn stored
171 // by IncomingValueAssigner since RISC-V implements its CC using a custom
172 // function with a different signature.
173 RISCVTargetLowering::RISCVCCAssignFn *RISCVAssignFn;
174
175 // Whether this is assigning args from a return.
176 bool IsRet;
177
178 RVVArgDispatcher &RVVDispatcher;
179
180public:
181 RISCVIncomingValueAssigner(
182 RISCVTargetLowering::RISCVCCAssignFn *RISCVAssignFn_, bool IsRet,
183 RVVArgDispatcher &RVVDispatcher)
184 : CallLowering::IncomingValueAssigner(nullptr),
185 RISCVAssignFn(RISCVAssignFn_), IsRet(IsRet),
186 RVVDispatcher(RVVDispatcher) {}
187
188 bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,
189 CCValAssign::LocInfo LocInfo,
190 const CallLowering::ArgInfo &Info, ISD::ArgFlagsTy Flags,
191 CCState &State) override {
192 MachineFunction &MF = State.getMachineFunction();
193 const DataLayout &DL = MF.getDataLayout();
194 const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
195
196 if (LocVT.isScalableVector())
197 MF.getInfo<RISCVMachineFunctionInfo>()->setIsVectorCall();
198
199 if (RISCVAssignFn(DL, Subtarget.getTargetABI(), ValNo, ValVT, LocVT,
200 LocInfo, Flags, State, /*IsFixed=*/true, IsRet, Info.Ty,
201 *Subtarget.getTargetLowering(), RVVDispatcher))
202 return true;
203
204 StackSize = State.getStackSize();
205 return false;
206 }
207};
208
209struct RISCVIncomingValueHandler : public CallLowering::IncomingValueHandler {
210 RISCVIncomingValueHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI)
211 : IncomingValueHandler(B, MRI),
212 Subtarget(MIRBuilder.getMF().getSubtarget<RISCVSubtarget>()) {}
213
214 Register getStackAddress(uint64_t MemSize, int64_t Offset,
215 MachinePointerInfo &MPO,
216 ISD::ArgFlagsTy Flags) override {
217 MachineFrameInfo &MFI = MIRBuilder.getMF().getFrameInfo();
218
219 int FI = MFI.CreateFixedObject(Size: MemSize, SPOffset: Offset, /*Immutable=*/IsImmutable: true);
220 MPO = MachinePointerInfo::getFixedStack(MF&: MIRBuilder.getMF(), FI);
221 return MIRBuilder.buildFrameIndex(Res: LLT::pointer(AddressSpace: 0, SizeInBits: Subtarget.getXLen()), Idx: FI)
222 .getReg(Idx: 0);
223 }
224
225 void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy,
226 const MachinePointerInfo &MPO,
227 const CCValAssign &VA) override {
228 MachineFunction &MF = MIRBuilder.getMF();
229 auto MMO = MF.getMachineMemOperand(PtrInfo: MPO, f: MachineMemOperand::MOLoad, MemTy,
230 base_alignment: inferAlignFromPtrInfo(MF, MPO));
231 MIRBuilder.buildLoad(Res: ValVReg, Addr, MMO&: *MMO);
232 }
233
234 void assignValueToReg(Register ValVReg, Register PhysReg,
235 const CCValAssign &VA) override {
236 markPhysRegUsed(PhysReg);
237 IncomingValueHandler::assignValueToReg(ValVReg, PhysReg, VA);
238 }
239
240 unsigned assignCustomValue(CallLowering::ArgInfo &Arg,
241 ArrayRef<CCValAssign> VAs,
242 std::function<void()> *Thunk) override {
243 assert(VAs.size() >= 2 && "Expected at least 2 VAs.");
244 const CCValAssign &VALo = VAs[0];
245 const CCValAssign &VAHi = VAs[1];
246
247 assert(VAHi.needsCustom() && "Value doesn't need custom handling");
248 assert(VALo.getValNo() == VAHi.getValNo() &&
249 "Values belong to different arguments");
250
251 assert(VALo.getLocVT() == MVT::i32 && VAHi.getLocVT() == MVT::i32 &&
252 VALo.getValVT() == MVT::f64 && VAHi.getValVT() == MVT::f64 &&
253 "unexpected custom value");
254
255 Register NewRegs[] = {MRI.createGenericVirtualRegister(Ty: LLT::scalar(SizeInBits: 32)),
256 MRI.createGenericVirtualRegister(Ty: LLT::scalar(SizeInBits: 32))};
257
258 if (VAHi.isMemLoc()) {
259 LLT MemTy(VAHi.getLocVT());
260
261 MachinePointerInfo MPO;
262 Register StackAddr = getStackAddress(
263 MemSize: MemTy.getSizeInBytes(), Offset: VAHi.getLocMemOffset(), MPO, Flags: Arg.Flags[0]);
264
265 assignValueToAddress(ValVReg: NewRegs[1], Addr: StackAddr, MemTy, MPO,
266 VA: const_cast<CCValAssign &>(VAHi));
267 }
268
269 assignValueToReg(ValVReg: NewRegs[0], PhysReg: VALo.getLocReg(), VA: VALo);
270 if (VAHi.isRegLoc())
271 assignValueToReg(ValVReg: NewRegs[1], PhysReg: VAHi.getLocReg(), VA: VAHi);
272
273 MIRBuilder.buildMergeLikeInstr(Res: Arg.Regs[0], Ops: NewRegs);
274
275 return 2;
276 }
277
278 /// How the physical register gets marked varies between formal
279 /// parameters (it's a basic-block live-in), and a call instruction
280 /// (it's an implicit-def of the BL).
281 virtual void markPhysRegUsed(MCRegister PhysReg) = 0;
282
283private:
284 const RISCVSubtarget &Subtarget;
285};
286
287struct RISCVFormalArgHandler : public RISCVIncomingValueHandler {
288 RISCVFormalArgHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI)
289 : RISCVIncomingValueHandler(B, MRI) {}
290
291 void markPhysRegUsed(MCRegister PhysReg) override {
292 MIRBuilder.getMRI()->addLiveIn(Reg: PhysReg);
293 MIRBuilder.getMBB().addLiveIn(PhysReg);
294 }
295};
296
297struct RISCVCallReturnHandler : public RISCVIncomingValueHandler {
298 RISCVCallReturnHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI,
299 MachineInstrBuilder &MIB)
300 : RISCVIncomingValueHandler(B, MRI), MIB(MIB) {}
301
302 void markPhysRegUsed(MCRegister PhysReg) override {
303 MIB.addDef(RegNo: PhysReg, Flags: RegState::Implicit);
304 }
305
306 MachineInstrBuilder MIB;
307};
308
309} // namespace
310
311RISCVCallLowering::RISCVCallLowering(const RISCVTargetLowering &TLI)
312 : CallLowering(&TLI) {}
313
314/// Return true if scalable vector with ScalarTy is legal for lowering.
315static bool isLegalElementTypeForRVV(Type *EltTy,
316 const RISCVSubtarget &Subtarget) {
317 if (EltTy->isPointerTy())
318 return Subtarget.is64Bit() ? Subtarget.hasVInstructionsI64() : true;
319 if (EltTy->isIntegerTy(Bitwidth: 1) || EltTy->isIntegerTy(Bitwidth: 8) ||
320 EltTy->isIntegerTy(Bitwidth: 16) || EltTy->isIntegerTy(Bitwidth: 32))
321 return true;
322 if (EltTy->isIntegerTy(Bitwidth: 64))
323 return Subtarget.hasVInstructionsI64();
324 if (EltTy->isHalfTy())
325 return Subtarget.hasVInstructionsF16();
326 if (EltTy->isBFloatTy())
327 return Subtarget.hasVInstructionsBF16();
328 if (EltTy->isFloatTy())
329 return Subtarget.hasVInstructionsF32();
330 if (EltTy->isDoubleTy())
331 return Subtarget.hasVInstructionsF64();
332 return false;
333}
334
335// TODO: Support all argument types.
336// TODO: Remove IsLowerArgs argument by adding support for vectors in lowerCall.
337static bool isSupportedArgumentType(Type *T, const RISCVSubtarget &Subtarget,
338 bool IsLowerArgs = false) {
339 // TODO: Integers larger than 2*XLen are passed indirectly which is not
340 // supported yet.
341 if (T->isIntegerTy())
342 return T->getIntegerBitWidth() <= Subtarget.getXLen() * 2;
343 if (T->isFloatTy() || T->isDoubleTy())
344 return true;
345 if (T->isPointerTy())
346 return true;
347 // TODO: Support fixed vector types.
348 if (IsLowerArgs && T->isVectorTy() && Subtarget.hasVInstructions() &&
349 T->isScalableTy() &&
350 isLegalElementTypeForRVV(EltTy: T->getScalarType(), Subtarget))
351 return true;
352 return false;
353}
354
355// TODO: Only integer, pointer and aggregate types are supported now.
356// TODO: Remove IsLowerRetVal argument by adding support for vectors in
357// lowerCall.
358static bool isSupportedReturnType(Type *T, const RISCVSubtarget &Subtarget,
359 bool IsLowerRetVal = false) {
360 // TODO: Integers larger than 2*XLen are passed indirectly which is not
361 // supported yet.
362 if (T->isIntegerTy())
363 return T->getIntegerBitWidth() <= Subtarget.getXLen() * 2;
364 if (T->isFloatTy() || T->isDoubleTy())
365 return true;
366 if (T->isPointerTy())
367 return true;
368
369 if (T->isArrayTy())
370 return isSupportedReturnType(T: T->getArrayElementType(), Subtarget);
371
372 if (T->isStructTy()) {
373 auto StructT = cast<StructType>(Val: T);
374 for (unsigned i = 0, e = StructT->getNumElements(); i != e; ++i)
375 if (!isSupportedReturnType(T: StructT->getElementType(N: i), Subtarget))
376 return false;
377 return true;
378 }
379
380 if (IsLowerRetVal && T->isVectorTy() && Subtarget.hasVInstructions() &&
381 T->isScalableTy() &&
382 isLegalElementTypeForRVV(EltTy: T->getScalarType(), Subtarget))
383 return true;
384
385 return false;
386}
387
388bool RISCVCallLowering::lowerReturnVal(MachineIRBuilder &MIRBuilder,
389 const Value *Val,
390 ArrayRef<Register> VRegs,
391 MachineInstrBuilder &Ret) const {
392 if (!Val)
393 return true;
394
395 const RISCVSubtarget &Subtarget =
396 MIRBuilder.getMF().getSubtarget<RISCVSubtarget>();
397 if (!isSupportedReturnType(T: Val->getType(), Subtarget, /*IsLowerRetVal=*/true))
398 return false;
399
400 MachineFunction &MF = MIRBuilder.getMF();
401 const DataLayout &DL = MF.getDataLayout();
402 const Function &F = MF.getFunction();
403 CallingConv::ID CC = F.getCallingConv();
404
405 ArgInfo OrigRetInfo(VRegs, Val->getType(), 0);
406 setArgFlags(Arg&: OrigRetInfo, OpIdx: AttributeList::ReturnIndex, DL, FuncInfo: F);
407
408 SmallVector<ArgInfo, 4> SplitRetInfos;
409 splitToValueTypes(OrigArgInfo: OrigRetInfo, SplitArgs&: SplitRetInfos, DL, CallConv: CC);
410
411 RVVArgDispatcher Dispatcher{&MF, getTLI<RISCVTargetLowering>(),
412 ArrayRef(F.getReturnType())};
413 RISCVOutgoingValueAssigner Assigner(
414 CC == CallingConv::Fast ? RISCV::CC_RISCV_FastCC : RISCV::CC_RISCV,
415 /*IsRet=*/true, Dispatcher);
416 RISCVOutgoingValueHandler Handler(MIRBuilder, MF.getRegInfo(), Ret);
417 return determineAndHandleAssignments(Handler, Assigner, Args&: SplitRetInfos,
418 MIRBuilder, CallConv: CC, IsVarArg: F.isVarArg());
419}
420
421bool RISCVCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
422 const Value *Val, ArrayRef<Register> VRegs,
423 FunctionLoweringInfo &FLI) const {
424 assert(!Val == VRegs.empty() && "Return value without a vreg");
425 MachineInstrBuilder Ret = MIRBuilder.buildInstrNoInsert(RISCV::Opcode: PseudoRET);
426
427 if (!lowerReturnVal(MIRBuilder, Val, VRegs, Ret))
428 return false;
429
430 MIRBuilder.insertInstr(MIB: Ret);
431 return true;
432}
433
434/// If there are varargs that were passed in a0-a7, the data in those registers
435/// must be copied to the varargs save area on the stack.
436void RISCVCallLowering::saveVarArgRegisters(
437 MachineIRBuilder &MIRBuilder, CallLowering::IncomingValueHandler &Handler,
438 IncomingValueAssigner &Assigner, CCState &CCInfo) const {
439 MachineFunction &MF = MIRBuilder.getMF();
440 const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
441 unsigned XLenInBytes = Subtarget.getXLen() / 8;
442 ArrayRef<MCPhysReg> ArgRegs = RISCV::getArgGPRs(ABI: Subtarget.getTargetABI());
443 MachineRegisterInfo &MRI = MF.getRegInfo();
444 unsigned Idx = CCInfo.getFirstUnallocated(Regs: ArgRegs);
445 MachineFrameInfo &MFI = MF.getFrameInfo();
446 RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
447
448 // Size of the vararg save area. For now, the varargs save area is either
449 // zero or large enough to hold a0-a7.
450 int VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
451 int FI;
452
453 // If all registers are allocated, then all varargs must be passed on the
454 // stack and we don't need to save any argregs.
455 if (VarArgsSaveSize == 0) {
456 int VaArgOffset = Assigner.StackSize;
457 FI = MFI.CreateFixedObject(Size: XLenInBytes, SPOffset: VaArgOffset, IsImmutable: true);
458 } else {
459 int VaArgOffset = -VarArgsSaveSize;
460 FI = MFI.CreateFixedObject(Size: VarArgsSaveSize, SPOffset: VaArgOffset, IsImmutable: true);
461
462 // If saving an odd number of registers then create an extra stack slot to
463 // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
464 // offsets to even-numbered registered remain 2*XLEN-aligned.
465 if (Idx % 2) {
466 MFI.CreateFixedObject(Size: XLenInBytes,
467 SPOffset: VaArgOffset - static_cast<int>(XLenInBytes), IsImmutable: true);
468 VarArgsSaveSize += XLenInBytes;
469 }
470
471 const LLT p0 = LLT::pointer(AddressSpace: MF.getDataLayout().getAllocaAddrSpace(),
472 SizeInBits: Subtarget.getXLen());
473 const LLT sXLen = LLT::scalar(SizeInBits: Subtarget.getXLen());
474
475 auto FIN = MIRBuilder.buildFrameIndex(Res: p0, Idx: FI);
476 auto Offset = MIRBuilder.buildConstant(
477 Res: MRI.createGenericVirtualRegister(Ty: sXLen), Val: XLenInBytes);
478
479 // Copy the integer registers that may have been used for passing varargs
480 // to the vararg save area.
481 const MVT XLenVT = Subtarget.getXLenVT();
482 for (unsigned I = Idx; I < ArgRegs.size(); ++I) {
483 const Register VReg = MRI.createGenericVirtualRegister(Ty: sXLen);
484 Handler.assignValueToReg(
485 ValVReg: VReg, PhysReg: ArgRegs[I],
486 VA: CCValAssign::getReg(ValNo: I + MF.getFunction().getNumOperands(), ValVT: XLenVT,
487 RegNo: ArgRegs[I], LocVT: XLenVT, HTP: CCValAssign::Full));
488 auto MPO =
489 MachinePointerInfo::getFixedStack(MF, FI, Offset: (I - Idx) * XLenInBytes);
490 MIRBuilder.buildStore(Val: VReg, Addr: FIN, PtrInfo: MPO, Alignment: inferAlignFromPtrInfo(MF, MPO));
491 FIN = MIRBuilder.buildPtrAdd(Res: MRI.createGenericVirtualRegister(Ty: p0),
492 Op0: FIN.getReg(Idx: 0), Op1: Offset);
493 }
494 }
495
496 // Record the frame index of the first variable argument which is a value
497 // necessary to G_VASTART.
498 RVFI->setVarArgsFrameIndex(FI);
499 RVFI->setVarArgsSaveSize(VarArgsSaveSize);
500}
501
502bool RISCVCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
503 const Function &F,
504 ArrayRef<ArrayRef<Register>> VRegs,
505 FunctionLoweringInfo &FLI) const {
506 // Early exit if there are no arguments. varargs are not part of F.args() but
507 // must be lowered.
508 if (F.arg_empty() && !F.isVarArg())
509 return true;
510
511 const RISCVSubtarget &Subtarget =
512 MIRBuilder.getMF().getSubtarget<RISCVSubtarget>();
513 for (auto &Arg : F.args()) {
514 if (!isSupportedArgumentType(T: Arg.getType(), Subtarget,
515 /*IsLowerArgs=*/true))
516 return false;
517 }
518
519 MachineFunction &MF = MIRBuilder.getMF();
520 const DataLayout &DL = MF.getDataLayout();
521 CallingConv::ID CC = F.getCallingConv();
522
523 SmallVector<ArgInfo, 32> SplitArgInfos;
524 SmallVector<Type *, 4> TypeList;
525 unsigned Index = 0;
526 for (auto &Arg : F.args()) {
527 // Construct the ArgInfo object from destination register and argument type.
528 ArgInfo AInfo(VRegs[Index], Arg.getType(), Index);
529 setArgFlags(Arg&: AInfo, OpIdx: Index + AttributeList::FirstArgIndex, DL, FuncInfo: F);
530
531 // Handle any required merging from split value types from physical
532 // registers into the desired VReg. ArgInfo objects are constructed
533 // correspondingly and appended to SplitArgInfos.
534 splitToValueTypes(OrigArgInfo: AInfo, SplitArgs&: SplitArgInfos, DL, CallConv: CC);
535
536 TypeList.push_back(Elt: Arg.getType());
537
538 ++Index;
539 }
540
541 RVVArgDispatcher Dispatcher{&MF, getTLI<RISCVTargetLowering>(),
542 ArrayRef(TypeList)};
543 RISCVIncomingValueAssigner Assigner(
544 CC == CallingConv::Fast ? RISCV::CC_RISCV_FastCC : RISCV::CC_RISCV,
545 /*IsRet=*/false, Dispatcher);
546 RISCVFormalArgHandler Handler(MIRBuilder, MF.getRegInfo());
547
548 SmallVector<CCValAssign, 16> ArgLocs;
549 CCState CCInfo(CC, F.isVarArg(), MIRBuilder.getMF(), ArgLocs, F.getContext());
550 if (!determineAssignments(Assigner, Args&: SplitArgInfos, CCInfo) ||
551 !handleAssignments(Handler, Args&: SplitArgInfos, CCState&: CCInfo, ArgLocs, MIRBuilder))
552 return false;
553
554 if (F.isVarArg())
555 saveVarArgRegisters(MIRBuilder, Handler, Assigner, CCInfo);
556
557 return true;
558}
559
560bool RISCVCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
561 CallLoweringInfo &Info) const {
562 MachineFunction &MF = MIRBuilder.getMF();
563 const DataLayout &DL = MF.getDataLayout();
564 const Function &F = MF.getFunction();
565 CallingConv::ID CC = F.getCallingConv();
566
567 const RISCVSubtarget &Subtarget =
568 MIRBuilder.getMF().getSubtarget<RISCVSubtarget>();
569 for (auto &AInfo : Info.OrigArgs) {
570 if (!isSupportedArgumentType(T: AInfo.Ty, Subtarget))
571 return false;
572 }
573
574 if (!Info.OrigRet.Ty->isVoidTy() &&
575 !isSupportedReturnType(T: Info.OrigRet.Ty, Subtarget))
576 return false;
577
578 MachineInstrBuilder CallSeqStart =
579 MIRBuilder.buildInstr(RISCV::ADJCALLSTACKDOWN);
580
581 SmallVector<ArgInfo, 32> SplitArgInfos;
582 SmallVector<ISD::OutputArg, 8> Outs;
583 SmallVector<Type *, 4> TypeList;
584 for (auto &AInfo : Info.OrigArgs) {
585 // Handle any required unmerging of split value types from a given VReg into
586 // physical registers. ArgInfo objects are constructed correspondingly and
587 // appended to SplitArgInfos.
588 splitToValueTypes(OrigArgInfo: AInfo, SplitArgs&: SplitArgInfos, DL, CallConv: CC);
589 TypeList.push_back(Elt: AInfo.Ty);
590 }
591
592 // TODO: Support tail calls.
593 Info.IsTailCall = false;
594
595 // Select the recommended relocation type R_RISCV_CALL_PLT.
596 if (!Info.Callee.isReg())
597 Info.Callee.setTargetFlags(RISCVII::MO_CALL);
598
599 MachineInstrBuilder Call =
600 MIRBuilder
601 .buildInstrNoInsert(Opcode: Info.Callee.isReg() ? RISCV::PseudoCALLIndirect
602 : RISCV::PseudoCALL)
603 .add(Info.Callee);
604 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
605 Call.addRegMask(Mask: TRI->getCallPreservedMask(MF, Info.CallConv));
606
607 RVVArgDispatcher ArgDispatcher{&MF, getTLI<RISCVTargetLowering>(),
608 ArrayRef(TypeList)};
609 RISCVOutgoingValueAssigner ArgAssigner(
610 CC == CallingConv::Fast ? RISCV::CC_RISCV_FastCC : RISCV::CC_RISCV,
611 /*IsRet=*/false, ArgDispatcher);
612 RISCVOutgoingValueHandler ArgHandler(MIRBuilder, MF.getRegInfo(), Call);
613 if (!determineAndHandleAssignments(Handler&: ArgHandler, Assigner&: ArgAssigner, Args&: SplitArgInfos,
614 MIRBuilder, CallConv: CC, IsVarArg: Info.IsVarArg))
615 return false;
616
617 MIRBuilder.insertInstr(MIB: Call);
618
619 CallSeqStart.addImm(Val: ArgAssigner.StackSize).addImm(Val: 0);
620 MIRBuilder.buildInstr(RISCV::ADJCALLSTACKUP)
621 .addImm(ArgAssigner.StackSize)
622 .addImm(0);
623
624 // If Callee is a reg, since it is used by a target specific
625 // instruction, it must have a register class matching the
626 // constraint of that instruction.
627 if (Call->getOperand(i: 0).isReg())
628 constrainOperandRegClass(MF, *TRI, MF.getRegInfo(),
629 *Subtarget.getInstrInfo(),
630 *Subtarget.getRegBankInfo(), *Call,
631 Call->getDesc(), Call->getOperand(i: 0), 0);
632
633 if (Info.OrigRet.Ty->isVoidTy())
634 return true;
635
636 SmallVector<ArgInfo, 4> SplitRetInfos;
637 splitToValueTypes(OrigArgInfo: Info.OrigRet, SplitArgs&: SplitRetInfos, DL, CallConv: CC);
638
639 RVVArgDispatcher RetDispatcher{&MF, getTLI<RISCVTargetLowering>(),
640 ArrayRef(F.getReturnType())};
641 RISCVIncomingValueAssigner RetAssigner(
642 CC == CallingConv::Fast ? RISCV::CC_RISCV_FastCC : RISCV::CC_RISCV,
643 /*IsRet=*/true, RetDispatcher);
644 RISCVCallReturnHandler RetHandler(MIRBuilder, MF.getRegInfo(), Call);
645 if (!determineAndHandleAssignments(Handler&: RetHandler, Assigner&: RetAssigner, Args&: SplitRetInfos,
646 MIRBuilder, CallConv: CC, IsVarArg: Info.IsVarArg))
647 return false;
648
649 return true;
650}
651

source code of llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp