1//===- MipsSEFrameLowering.cpp - Mips32/64 Frame Information --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the Mips32/64 implementation of TargetFrameLowering class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "MipsSEFrameLowering.h"
14#include "MCTargetDesc/MipsABIInfo.h"
15#include "MipsMachineFunction.h"
16#include "MipsRegisterInfo.h"
17#include "MipsSEInstrInfo.h"
18#include "MipsSubtarget.h"
19#include "llvm/ADT/BitVector.h"
20#include "llvm/ADT/StringRef.h"
21#include "llvm/ADT/StringSwitch.h"
22#include "llvm/CodeGen/MachineBasicBlock.h"
23#include "llvm/CodeGen/MachineFrameInfo.h"
24#include "llvm/CodeGen/MachineFunction.h"
25#include "llvm/CodeGen/MachineInstr.h"
26#include "llvm/CodeGen/MachineInstrBuilder.h"
27#include "llvm/CodeGen/MachineModuleInfo.h"
28#include "llvm/CodeGen/MachineOperand.h"
29#include "llvm/CodeGen/MachineRegisterInfo.h"
30#include "llvm/CodeGen/RegisterScavenging.h"
31#include "llvm/CodeGen/TargetInstrInfo.h"
32#include "llvm/CodeGen/TargetRegisterInfo.h"
33#include "llvm/CodeGen/TargetSubtargetInfo.h"
34#include "llvm/IR/DebugLoc.h"
35#include "llvm/IR/Function.h"
36#include "llvm/MC/MCDwarf.h"
37#include "llvm/MC/MCRegisterInfo.h"
38#include "llvm/MC/MachineLocation.h"
39#include "llvm/Support/CodeGen.h"
40#include "llvm/Support/ErrorHandling.h"
41#include "llvm/Support/MathExtras.h"
42#include <cassert>
43#include <cstdint>
44#include <utility>
45#include <vector>
46
47using namespace llvm;
48
49static std::pair<unsigned, unsigned> getMFHiLoOpc(unsigned Src) {
50 if (Mips::ACC64RegClass.contains(Src))
51 return std::make_pair(x: (unsigned)Mips::PseudoMFHI,
52 y: (unsigned)Mips::PseudoMFLO);
53
54 if (Mips::ACC64DSPRegClass.contains(Src))
55 return std::make_pair(x: (unsigned)Mips::MFHI_DSP, y: (unsigned)Mips::MFLO_DSP);
56
57 if (Mips::ACC128RegClass.contains(Src))
58 return std::make_pair(x: (unsigned)Mips::PseudoMFHI64,
59 y: (unsigned)Mips::PseudoMFLO64);
60
61 return std::make_pair(x: 0, y: 0);
62}
63
64namespace {
65
66/// Helper class to expand pseudos.
67class ExpandPseudo {
68public:
69 ExpandPseudo(MachineFunction &MF);
70 bool expand();
71
72private:
73 using Iter = MachineBasicBlock::iterator;
74
75 bool expandInstr(MachineBasicBlock &MBB, Iter I);
76 void expandLoadCCond(MachineBasicBlock &MBB, Iter I);
77 void expandStoreCCond(MachineBasicBlock &MBB, Iter I);
78 void expandLoadACC(MachineBasicBlock &MBB, Iter I, unsigned RegSize);
79 void expandStoreACC(MachineBasicBlock &MBB, Iter I, unsigned MFHiOpc,
80 unsigned MFLoOpc, unsigned RegSize);
81 bool expandCopy(MachineBasicBlock &MBB, Iter I);
82 bool expandCopyACC(MachineBasicBlock &MBB, Iter I, unsigned MFHiOpc,
83 unsigned MFLoOpc);
84 bool expandBuildPairF64(MachineBasicBlock &MBB,
85 MachineBasicBlock::iterator I, bool FP64) const;
86 bool expandExtractElementF64(MachineBasicBlock &MBB,
87 MachineBasicBlock::iterator I, bool FP64) const;
88
89 MachineFunction &MF;
90 MachineRegisterInfo &MRI;
91 const MipsSubtarget &Subtarget;
92 const MipsSEInstrInfo &TII;
93 const MipsRegisterInfo &RegInfo;
94};
95
96} // end anonymous namespace
97
98ExpandPseudo::ExpandPseudo(MachineFunction &MF_)
99 : MF(MF_), MRI(MF.getRegInfo()),
100 Subtarget(MF.getSubtarget<MipsSubtarget>()),
101 TII(*static_cast<const MipsSEInstrInfo *>(Subtarget.getInstrInfo())),
102 RegInfo(*Subtarget.getRegisterInfo()) {}
103
104bool ExpandPseudo::expand() {
105 bool Expanded = false;
106
107 for (auto &MBB : MF) {
108 for (Iter I = MBB.begin(), End = MBB.end(); I != End;)
109 Expanded |= expandInstr(MBB, I: I++);
110 }
111
112 return Expanded;
113}
114
115bool ExpandPseudo::expandInstr(MachineBasicBlock &MBB, Iter I) {
116 switch(I->getOpcode()) {
117 case Mips::LOAD_CCOND_DSP:
118 expandLoadCCond(MBB, I);
119 break;
120 case Mips::STORE_CCOND_DSP:
121 expandStoreCCond(MBB, I);
122 break;
123 case Mips::LOAD_ACC64:
124 case Mips::LOAD_ACC64DSP:
125 expandLoadACC(MBB, I, 4);
126 break;
127 case Mips::LOAD_ACC128:
128 expandLoadACC(MBB, I, 8);
129 break;
130 case Mips::STORE_ACC64:
131 expandStoreACC(MBB, I, Mips::PseudoMFHI, Mips::PseudoMFLO, 4);
132 break;
133 case Mips::STORE_ACC64DSP:
134 expandStoreACC(MBB, I, Mips::MFHI_DSP, Mips::MFLO_DSP, 4);
135 break;
136 case Mips::STORE_ACC128:
137 expandStoreACC(MBB, I, Mips::PseudoMFHI64, Mips::PseudoMFLO64, 8);
138 break;
139 case Mips::BuildPairF64:
140 if (expandBuildPairF64(MBB, I, false))
141 MBB.erase(I);
142 return false;
143 case Mips::BuildPairF64_64:
144 if (expandBuildPairF64(MBB, I, true))
145 MBB.erase(I);
146 return false;
147 case Mips::ExtractElementF64:
148 if (expandExtractElementF64(MBB, I, false))
149 MBB.erase(I);
150 return false;
151 case Mips::ExtractElementF64_64:
152 if (expandExtractElementF64(MBB, I, true))
153 MBB.erase(I);
154 return false;
155 case TargetOpcode::COPY:
156 if (!expandCopy(MBB, I))
157 return false;
158 break;
159 default:
160 return false;
161 }
162
163 MBB.erase(I);
164 return true;
165}
166
167void ExpandPseudo::expandLoadCCond(MachineBasicBlock &MBB, Iter I) {
168 // load $vr, FI
169 // copy ccond, $vr
170
171 assert(I->getOperand(0).isReg() && I->getOperand(1).isFI());
172
173 const TargetRegisterClass *RC = RegInfo.intRegClass(Size: 4);
174 Register VR = MRI.createVirtualRegister(RegClass: RC);
175 Register Dst = I->getOperand(i: 0).getReg(), FI = I->getOperand(i: 1).getIndex();
176
177 TII.loadRegFromStack(MBB, I, VR, FI, RC, &RegInfo, 0);
178 BuildMI(MBB, I, I->getDebugLoc(), TII.get(TargetOpcode::COPY), Dst)
179 .addReg(VR, RegState::Kill);
180}
181
182void ExpandPseudo::expandStoreCCond(MachineBasicBlock &MBB, Iter I) {
183 // copy $vr, ccond
184 // store $vr, FI
185
186 assert(I->getOperand(0).isReg() && I->getOperand(1).isFI());
187
188 const TargetRegisterClass *RC = RegInfo.intRegClass(Size: 4);
189 Register VR = MRI.createVirtualRegister(RegClass: RC);
190 Register Src = I->getOperand(i: 0).getReg(), FI = I->getOperand(i: 1).getIndex();
191
192 BuildMI(MBB, I, I->getDebugLoc(), TII.get(TargetOpcode::COPY), VR)
193 .addReg(Src, getKillRegState(B: I->getOperand(i: 0).isKill()));
194 TII.storeRegToStack(MBB, I, VR, true, FI, RC, &RegInfo, 0);
195}
196
197void ExpandPseudo::expandLoadACC(MachineBasicBlock &MBB, Iter I,
198 unsigned RegSize) {
199 // load $vr0, FI
200 // copy lo, $vr0
201 // load $vr1, FI + 4
202 // copy hi, $vr1
203
204 assert(I->getOperand(0).isReg() && I->getOperand(1).isFI());
205
206 const TargetRegisterClass *RC = RegInfo.intRegClass(Size: RegSize);
207 Register VR0 = MRI.createVirtualRegister(RegClass: RC);
208 Register VR1 = MRI.createVirtualRegister(RegClass: RC);
209 Register Dst = I->getOperand(i: 0).getReg(), FI = I->getOperand(i: 1).getIndex();
210 Register Lo = RegInfo.getSubReg(Dst, Mips::sub_lo);
211 Register Hi = RegInfo.getSubReg(Dst, Mips::sub_hi);
212 DebugLoc DL = I->getDebugLoc();
213 const MCInstrDesc &Desc = TII.get(TargetOpcode::COPY);
214
215 TII.loadRegFromStack(MBB, I, VR0, FI, RC, &RegInfo, 0);
216 BuildMI(BB&: MBB, I, MIMD: DL, MCID: Desc, DestReg: Lo).addReg(RegNo: VR0, flags: RegState::Kill);
217 TII.loadRegFromStack(MBB, I, VR1, FI, RC, &RegInfo, RegSize);
218 BuildMI(BB&: MBB, I, MIMD: DL, MCID: Desc, DestReg: Hi).addReg(RegNo: VR1, flags: RegState::Kill);
219}
220
221void ExpandPseudo::expandStoreACC(MachineBasicBlock &MBB, Iter I,
222 unsigned MFHiOpc, unsigned MFLoOpc,
223 unsigned RegSize) {
224 // mflo $vr0, src
225 // store $vr0, FI
226 // mfhi $vr1, src
227 // store $vr1, FI + 4
228
229 assert(I->getOperand(0).isReg() && I->getOperand(1).isFI());
230
231 const TargetRegisterClass *RC = RegInfo.intRegClass(Size: RegSize);
232 Register VR0 = MRI.createVirtualRegister(RegClass: RC);
233 Register VR1 = MRI.createVirtualRegister(RegClass: RC);
234 Register Src = I->getOperand(i: 0).getReg(), FI = I->getOperand(i: 1).getIndex();
235 unsigned SrcKill = getKillRegState(B: I->getOperand(i: 0).isKill());
236 DebugLoc DL = I->getDebugLoc();
237
238 BuildMI(MBB, I, DL, TII.get(MFLoOpc), VR0).addReg(Src);
239 TII.storeRegToStack(MBB, I, VR0, true, FI, RC, &RegInfo, 0);
240 BuildMI(MBB, I, DL, TII.get(MFHiOpc), VR1).addReg(Src, SrcKill);
241 TII.storeRegToStack(MBB, I, VR1, true, FI, RC, &RegInfo, RegSize);
242}
243
244bool ExpandPseudo::expandCopy(MachineBasicBlock &MBB, Iter I) {
245 Register Src = I->getOperand(i: 1).getReg();
246 std::pair<unsigned, unsigned> Opcodes = getMFHiLoOpc(Src);
247
248 if (!Opcodes.first)
249 return false;
250
251 return expandCopyACC(MBB, I, MFHiOpc: Opcodes.first, MFLoOpc: Opcodes.second);
252}
253
254bool ExpandPseudo::expandCopyACC(MachineBasicBlock &MBB, Iter I,
255 unsigned MFHiOpc, unsigned MFLoOpc) {
256 // mflo $vr0, src
257 // copy dst_lo, $vr0
258 // mfhi $vr1, src
259 // copy dst_hi, $vr1
260
261 unsigned Dst = I->getOperand(i: 0).getReg(), Src = I->getOperand(i: 1).getReg();
262 const TargetRegisterClass *DstRC = RegInfo.getMinimalPhysRegClass(Dst);
263 unsigned VRegSize = RegInfo.getRegSizeInBits(*DstRC) / 16;
264 const TargetRegisterClass *RC = RegInfo.intRegClass(Size: VRegSize);
265 Register VR0 = MRI.createVirtualRegister(RegClass: RC);
266 Register VR1 = MRI.createVirtualRegister(RegClass: RC);
267 unsigned SrcKill = getKillRegState(B: I->getOperand(i: 1).isKill());
268 Register DstLo = RegInfo.getSubReg(Dst, Mips::sub_lo);
269 Register DstHi = RegInfo.getSubReg(Dst, Mips::sub_hi);
270 DebugLoc DL = I->getDebugLoc();
271
272 BuildMI(MBB, I, DL, TII.get(MFLoOpc), VR0).addReg(Src);
273 BuildMI(MBB, I, DL, TII.get(TargetOpcode::COPY), DstLo)
274 .addReg(VR0, RegState::Kill);
275 BuildMI(MBB, I, DL, TII.get(MFHiOpc), VR1).addReg(Src, SrcKill);
276 BuildMI(MBB, I, DL, TII.get(TargetOpcode::COPY), DstHi)
277 .addReg(VR1, RegState::Kill);
278 return true;
279}
280
281/// This method expands the same instruction that MipsSEInstrInfo::
282/// expandBuildPairF64 does, for the case when ABI is fpxx and mthc1 is not
283/// available and the case where the ABI is FP64A. It is implemented here
284/// because frame indexes are eliminated before MipsSEInstrInfo::
285/// expandBuildPairF64 is called.
286bool ExpandPseudo::expandBuildPairF64(MachineBasicBlock &MBB,
287 MachineBasicBlock::iterator I,
288 bool FP64) const {
289 // For fpxx and when mthc1 is not available, use:
290 // spill + reload via ldc1
291 //
292 // The case where dmtc1 is available doesn't need to be handled here
293 // because it never creates a BuildPairF64 node.
294 //
295 // The FP64A ABI (fp64 with nooddspreg) must also use a spill/reload sequence
296 // for odd-numbered double precision values (because the lower 32-bits is
297 // transferred with mtc1 which is redirected to the upper half of the even
298 // register). Unfortunately, we have to make this decision before register
299 // allocation so for now we use a spill/reload sequence for all
300 // double-precision values in regardless of being an odd/even register.
301 //
302 // For the cases that should be covered here MipsSEISelDAGToDAG adds $sp as
303 // implicit operand, so other passes (like ShrinkWrapping) are aware that
304 // stack is used.
305 if (I->getNumOperands() == 4 && I->getOperand(3).isReg()
306 && I->getOperand(3).getReg() == Mips::SP) {
307 Register DstReg = I->getOperand(i: 0).getReg();
308 Register LoReg = I->getOperand(i: 1).getReg();
309 Register HiReg = I->getOperand(i: 2).getReg();
310
311 // It should be impossible to have FGR64 on MIPS-II or MIPS32r1 (which are
312 // the cases where mthc1 is not available). 64-bit architectures and
313 // MIPS32r2 or later can use FGR64 though.
314 assert(Subtarget.isGP64bit() || Subtarget.hasMTHC1() ||
315 !Subtarget.isFP64bit());
316
317 const TargetRegisterClass *RC = &Mips::GPR32RegClass;
318 const TargetRegisterClass *RC2 =
319 FP64 ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass;
320
321 // We re-use the same spill slot each time so that the stack frame doesn't
322 // grow too much in functions with a large number of moves.
323 int FI = MF.getInfo<MipsFunctionInfo>()->getMoveF64ViaSpillFI(MF, RC: RC2);
324 if (!Subtarget.isLittle())
325 std::swap(a&: LoReg, b&: HiReg);
326 TII.storeRegToStack(MBB, I, LoReg, I->getOperand(i: 1).isKill(), FI, RC,
327 &RegInfo, 0);
328 TII.storeRegToStack(MBB, I, HiReg, I->getOperand(i: 2).isKill(), FI, RC,
329 &RegInfo, 4);
330 TII.loadRegFromStack(MBB, I, DstReg, FI, RC2, &RegInfo, 0);
331 return true;
332 }
333
334 return false;
335}
336
337/// This method expands the same instruction that MipsSEInstrInfo::
338/// expandExtractElementF64 does, for the case when ABI is fpxx and mfhc1 is not
339/// available and the case where the ABI is FP64A. It is implemented here
340/// because frame indexes are eliminated before MipsSEInstrInfo::
341/// expandExtractElementF64 is called.
342bool ExpandPseudo::expandExtractElementF64(MachineBasicBlock &MBB,
343 MachineBasicBlock::iterator I,
344 bool FP64) const {
345 const MachineOperand &Op1 = I->getOperand(i: 1);
346 const MachineOperand &Op2 = I->getOperand(i: 2);
347
348 if ((Op1.isReg() && Op1.isUndef()) || (Op2.isReg() && Op2.isUndef())) {
349 Register DstReg = I->getOperand(i: 0).getReg();
350 BuildMI(MBB, I, I->getDebugLoc(), TII.get(Mips::IMPLICIT_DEF), DstReg);
351 return true;
352 }
353
354 // For fpxx and when mfhc1 is not available, use:
355 // spill + reload via ldc1
356 //
357 // The case where dmfc1 is available doesn't need to be handled here
358 // because it never creates a ExtractElementF64 node.
359 //
360 // The FP64A ABI (fp64 with nooddspreg) must also use a spill/reload sequence
361 // for odd-numbered double precision values (because the lower 32-bits is
362 // transferred with mfc1 which is redirected to the upper half of the even
363 // register). Unfortunately, we have to make this decision before register
364 // allocation so for now we use a spill/reload sequence for all
365 // double-precision values in regardless of being an odd/even register.
366 //
367 // For the cases that should be covered here MipsSEISelDAGToDAG adds $sp as
368 // implicit operand, so other passes (like ShrinkWrapping) are aware that
369 // stack is used.
370 if (I->getNumOperands() == 4 && I->getOperand(3).isReg()
371 && I->getOperand(3).getReg() == Mips::SP) {
372 Register DstReg = I->getOperand(i: 0).getReg();
373 Register SrcReg = Op1.getReg();
374 unsigned N = Op2.getImm();
375 int64_t Offset = 4 * (Subtarget.isLittle() ? N : (1 - N));
376
377 // It should be impossible to have FGR64 on MIPS-II or MIPS32r1 (which are
378 // the cases where mfhc1 is not available). 64-bit architectures and
379 // MIPS32r2 or later can use FGR64 though.
380 assert(Subtarget.isGP64bit() || Subtarget.hasMTHC1() ||
381 !Subtarget.isFP64bit());
382
383 const TargetRegisterClass *RC =
384 FP64 ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass;
385 const TargetRegisterClass *RC2 = &Mips::GPR32RegClass;
386
387 // We re-use the same spill slot each time so that the stack frame doesn't
388 // grow too much in functions with a large number of moves.
389 int FI = MF.getInfo<MipsFunctionInfo>()->getMoveF64ViaSpillFI(MF, RC);
390 TII.storeRegToStack(MBB, I, SrcReg, Op1.isKill(), FI, RC, &RegInfo, 0);
391 TII.loadRegFromStack(MBB, I, DstReg, FI, RC2, &RegInfo, Offset);
392 return true;
393 }
394
395 return false;
396}
397
398MipsSEFrameLowering::MipsSEFrameLowering(const MipsSubtarget &STI)
399 : MipsFrameLowering(STI, STI.getStackAlignment()) {}
400
401void MipsSEFrameLowering::emitPrologue(MachineFunction &MF,
402 MachineBasicBlock &MBB) const {
403 MachineFrameInfo &MFI = MF.getFrameInfo();
404 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
405
406 const MipsSEInstrInfo &TII =
407 *static_cast<const MipsSEInstrInfo *>(STI.getInstrInfo());
408 const MipsRegisterInfo &RegInfo =
409 *static_cast<const MipsRegisterInfo *>(STI.getRegisterInfo());
410
411 MachineBasicBlock::iterator MBBI = MBB.begin();
412 DebugLoc dl;
413 MipsABIInfo ABI = STI.getABI();
414 unsigned SP = ABI.GetStackPtr();
415 unsigned FP = ABI.GetFramePtr();
416 unsigned ZERO = ABI.GetNullPtr();
417 unsigned MOVE = ABI.GetGPRMoveOp();
418 unsigned ADDiu = ABI.GetPtrAddiuOp();
419 unsigned AND = ABI.IsN64() ? Mips::AND64 : Mips::AND;
420
421 const TargetRegisterClass *RC = ABI.ArePtrs64bit() ?
422 &Mips::GPR64RegClass : &Mips::GPR32RegClass;
423
424 // First, compute final stack size.
425 uint64_t StackSize = MFI.getStackSize();
426
427 // No need to allocate space on the stack.
428 if (StackSize == 0 && !MFI.adjustsStack()) return;
429
430 MachineModuleInfo &MMI = MF.getMMI();
431 const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
432
433 // Adjust stack.
434 TII.adjustStackPtr(SP, Amount: -StackSize, MBB, I: MBBI);
435
436 // emit ".cfi_def_cfa_offset StackSize"
437 unsigned CFIIndex =
438 MF.addFrameInst(Inst: MCCFIInstruction::cfiDefCfaOffset(L: nullptr, Offset: StackSize));
439 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
440 .addCFIIndex(CFIIndex);
441
442 if (MF.getFunction().hasFnAttribute(Kind: "interrupt"))
443 emitInterruptPrologueStub(MF, MBB);
444
445 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
446
447 if (!CSI.empty()) {
448 // Find the instruction past the last instruction that saves a callee-saved
449 // register to the stack.
450 for (unsigned i = 0; i < CSI.size(); ++i)
451 ++MBBI;
452
453 // Iterate over list of callee-saved registers and emit .cfi_offset
454 // directives.
455 for (const CalleeSavedInfo &I : CSI) {
456 int64_t Offset = MFI.getObjectOffset(ObjectIdx: I.getFrameIdx());
457 Register Reg = I.getReg();
458
459 // If Reg is a double precision register, emit two cfa_offsets,
460 // one for each of the paired single precision registers.
461 if (Mips::AFGR64RegClass.contains(Reg)) {
462 unsigned Reg0 =
463 MRI->getDwarfRegNum(RegInfo.getSubReg(Reg, Mips::sub_lo), true);
464 unsigned Reg1 =
465 MRI->getDwarfRegNum(RegInfo.getSubReg(Reg, Mips::sub_hi), true);
466
467 if (!STI.isLittle())
468 std::swap(a&: Reg0, b&: Reg1);
469
470 unsigned CFIIndex = MF.addFrameInst(
471 Inst: MCCFIInstruction::createOffset(L: nullptr, Register: Reg0, Offset));
472 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
473 .addCFIIndex(CFIIndex);
474
475 CFIIndex = MF.addFrameInst(
476 Inst: MCCFIInstruction::createOffset(L: nullptr, Register: Reg1, Offset: Offset + 4));
477 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
478 .addCFIIndex(CFIIndex);
479 } else if (Mips::FGR64RegClass.contains(Reg)) {
480 unsigned Reg0 = MRI->getDwarfRegNum(RegNum: Reg, isEH: true);
481 unsigned Reg1 = MRI->getDwarfRegNum(RegNum: Reg, isEH: true) + 1;
482
483 if (!STI.isLittle())
484 std::swap(a&: Reg0, b&: Reg1);
485
486 unsigned CFIIndex = MF.addFrameInst(
487 Inst: MCCFIInstruction::createOffset(L: nullptr, Register: Reg0, Offset));
488 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
489 .addCFIIndex(CFIIndex);
490
491 CFIIndex = MF.addFrameInst(
492 Inst: MCCFIInstruction::createOffset(L: nullptr, Register: Reg1, Offset: Offset + 4));
493 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
494 .addCFIIndex(CFIIndex);
495 } else {
496 // Reg is either in GPR32 or FGR32.
497 unsigned CFIIndex = MF.addFrameInst(Inst: MCCFIInstruction::createOffset(
498 L: nullptr, Register: MRI->getDwarfRegNum(RegNum: Reg, isEH: true), Offset));
499 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
500 .addCFIIndex(CFIIndex);
501 }
502 }
503 }
504
505 if (MipsFI->callsEhReturn()) {
506 // Insert instructions that spill eh data registers.
507 for (int I = 0; I < 4; ++I) {
508 if (!MBB.isLiveIn(Reg: ABI.GetEhDataReg(I)))
509 MBB.addLiveIn(PhysReg: ABI.GetEhDataReg(I));
510 TII.storeRegToStackSlot(MBB, MBBI, ABI.GetEhDataReg(I), false,
511 MipsFI->getEhDataRegFI(Reg: I), RC, &RegInfo,
512 Register());
513 }
514
515 // Emit .cfi_offset directives for eh data registers.
516 for (int I = 0; I < 4; ++I) {
517 int64_t Offset = MFI.getObjectOffset(ObjectIdx: MipsFI->getEhDataRegFI(Reg: I));
518 unsigned Reg = MRI->getDwarfRegNum(RegNum: ABI.GetEhDataReg(I), isEH: true);
519 unsigned CFIIndex = MF.addFrameInst(
520 Inst: MCCFIInstruction::createOffset(L: nullptr, Register: Reg, Offset));
521 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
522 .addCFIIndex(CFIIndex);
523 }
524 }
525
526 // if framepointer enabled, set it to point to the stack pointer.
527 if (hasFP(MF)) {
528 // Insert instruction "move $fp, $sp" at this location.
529 BuildMI(MBB, MBBI, dl, TII.get(MOVE), FP).addReg(SP).addReg(ZERO)
530 .setMIFlag(MachineInstr::FrameSetup);
531
532 // emit ".cfi_def_cfa_register $fp"
533 unsigned CFIIndex = MF.addFrameInst(Inst: MCCFIInstruction::createDefCfaRegister(
534 L: nullptr, Register: MRI->getDwarfRegNum(RegNum: FP, isEH: true)));
535 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
536 .addCFIIndex(CFIIndex);
537
538 if (RegInfo.hasStackRealignment(MF)) {
539 // addiu $Reg, $zero, -MaxAlignment
540 // andi $sp, $sp, $Reg
541 Register VR = MF.getRegInfo().createVirtualRegister(RegClass: RC);
542 assert((Log2(MFI.getMaxAlign()) < 16) &&
543 "Function's alignment size requirement is not supported.");
544 int64_t MaxAlign = -(int64_t)MFI.getMaxAlign().value();
545
546 BuildMI(MBB, MBBI, dl, TII.get(ADDiu), VR).addReg(ZERO).addImm(MaxAlign);
547 BuildMI(MBB, MBBI, dl, TII.get(AND), SP).addReg(SP).addReg(VR);
548
549 if (hasBP(MF)) {
550 // move $s7, $sp
551 unsigned BP = STI.isABI_N64() ? Mips::S7_64 : Mips::S7;
552 BuildMI(MBB, MBBI, dl, TII.get(MOVE), BP)
553 .addReg(SP)
554 .addReg(ZERO);
555 }
556 }
557 }
558}
559
560void MipsSEFrameLowering::emitInterruptPrologueStub(
561 MachineFunction &MF, MachineBasicBlock &MBB) const {
562 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
563 MachineBasicBlock::iterator MBBI = MBB.begin();
564 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
565
566 // Report an error the target doesn't support Mips32r2 or later.
567 // The epilogue relies on the use of the "ehb" to clear execution
568 // hazards. Pre R2 Mips relies on an implementation defined number
569 // of "ssnop"s to clear the execution hazard. Support for ssnop hazard
570 // clearing is not provided so reject that configuration.
571 if (!STI.hasMips32r2())
572 report_fatal_error(
573 reason: "\"interrupt\" attribute is not supported on pre-MIPS32R2 or "
574 "MIPS16 targets.");
575
576 // The GP register contains the "user" value, so we cannot perform
577 // any gp relative loads until we restore the "kernel" or "system" gp
578 // value. Until support is written we shall only accept the static
579 // relocation model.
580 if ((STI.getRelocationModel() != Reloc::Static))
581 report_fatal_error(reason: "\"interrupt\" attribute is only supported for the "
582 "static relocation model on MIPS at the present time.");
583
584 if (!STI.isABI_O32() || STI.hasMips64())
585 report_fatal_error(reason: "\"interrupt\" attribute is only supported for the "
586 "O32 ABI on MIPS32R2+ at the present time.");
587
588 // Perform ISR handling like GCC
589 StringRef IntKind =
590 MF.getFunction().getFnAttribute(Kind: "interrupt").getValueAsString();
591 const TargetRegisterClass *PtrRC = &Mips::GPR32RegClass;
592
593 // EIC interrupt handling needs to read the Cause register to disable
594 // interrupts.
595 if (IntKind == "eic") {
596 // Coprocessor registers are always live per se.
597 MBB.addLiveIn(Mips::COP013);
598 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MFC0), Mips::K0)
599 .addReg(Mips::COP013)
600 .addImm(0)
601 .setMIFlag(MachineInstr::FrameSetup);
602
603 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::EXT), Mips::K0)
604 .addReg(Mips::K0)
605 .addImm(10)
606 .addImm(6)
607 .setMIFlag(MachineInstr::FrameSetup);
608 }
609
610 // Fetch and spill EPC
611 MBB.addLiveIn(Mips::COP014);
612 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MFC0), Mips::K1)
613 .addReg(Mips::COP014)
614 .addImm(0)
615 .setMIFlag(MachineInstr::FrameSetup);
616
617 STI.getInstrInfo()->storeRegToStack(MBB, MBBI, Mips::K1, false,
618 MipsFI->getISRRegFI(0), PtrRC,
619 STI.getRegisterInfo(), 0);
620
621 // Fetch and Spill Status
622 MBB.addLiveIn(Mips::COP012);
623 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MFC0), Mips::K1)
624 .addReg(Mips::COP012)
625 .addImm(0)
626 .setMIFlag(MachineInstr::FrameSetup);
627
628 STI.getInstrInfo()->storeRegToStack(MBB, MBBI, Mips::K1, false,
629 MipsFI->getISRRegFI(1), PtrRC,
630 STI.getRegisterInfo(), 0);
631
632 // Build the configuration for disabling lower priority interrupts. Non EIC
633 // interrupts need to be masked off with zero, EIC from the Cause register.
634 unsigned InsPosition = 8;
635 unsigned InsSize = 0;
636 unsigned SrcReg = Mips::ZERO;
637
638 // If the interrupt we're tied to is the EIC, switch the source for the
639 // masking off interrupts to the cause register.
640 if (IntKind == "eic") {
641 SrcReg = Mips::K0;
642 InsPosition = 10;
643 InsSize = 6;
644 } else
645 InsSize = StringSwitch<unsigned>(IntKind)
646 .Case(S: "sw0", Value: 1)
647 .Case(S: "sw1", Value: 2)
648 .Case(S: "hw0", Value: 3)
649 .Case(S: "hw1", Value: 4)
650 .Case(S: "hw2", Value: 5)
651 .Case(S: "hw3", Value: 6)
652 .Case(S: "hw4", Value: 7)
653 .Case(S: "hw5", Value: 8)
654 .Default(Value: 0);
655 assert(InsSize != 0 && "Unknown interrupt type!");
656
657 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::INS), Mips::K1)
658 .addReg(SrcReg)
659 .addImm(InsPosition)
660 .addImm(InsSize)
661 .addReg(Mips::K1)
662 .setMIFlag(MachineInstr::FrameSetup);
663
664 // Mask off KSU, ERL, EXL
665 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::INS), Mips::K1)
666 .addReg(Mips::ZERO)
667 .addImm(1)
668 .addImm(4)
669 .addReg(Mips::K1)
670 .setMIFlag(MachineInstr::FrameSetup);
671
672 // Disable the FPU as we are not spilling those register sets.
673 if (!STI.useSoftFloat())
674 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::INS), Mips::K1)
675 .addReg(Mips::ZERO)
676 .addImm(29)
677 .addImm(1)
678 .addReg(Mips::K1)
679 .setMIFlag(MachineInstr::FrameSetup);
680
681 // Set the new status
682 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MTC0), Mips::COP012)
683 .addReg(Mips::K1)
684 .addImm(0)
685 .setMIFlag(MachineInstr::FrameSetup);
686}
687
688void MipsSEFrameLowering::emitEpilogue(MachineFunction &MF,
689 MachineBasicBlock &MBB) const {
690 MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator();
691 MachineFrameInfo &MFI = MF.getFrameInfo();
692 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
693
694 const MipsSEInstrInfo &TII =
695 *static_cast<const MipsSEInstrInfo *>(STI.getInstrInfo());
696 const MipsRegisterInfo &RegInfo =
697 *static_cast<const MipsRegisterInfo *>(STI.getRegisterInfo());
698
699 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
700 MipsABIInfo ABI = STI.getABI();
701 unsigned SP = ABI.GetStackPtr();
702 unsigned FP = ABI.GetFramePtr();
703 unsigned ZERO = ABI.GetNullPtr();
704 unsigned MOVE = ABI.GetGPRMoveOp();
705
706 // if framepointer enabled, restore the stack pointer.
707 if (hasFP(MF)) {
708 // Find the first instruction that restores a callee-saved register.
709 MachineBasicBlock::iterator I = MBBI;
710
711 for (unsigned i = 0; i < MFI.getCalleeSavedInfo().size(); ++i)
712 --I;
713
714 // Insert instruction "move $sp, $fp" at this location.
715 BuildMI(MBB, I, DL, TII.get(MOVE), SP).addReg(FP).addReg(ZERO);
716 }
717
718 if (MipsFI->callsEhReturn()) {
719 const TargetRegisterClass *RC =
720 ABI.ArePtrs64bit() ? &Mips::GPR64RegClass : &Mips::GPR32RegClass;
721
722 // Find first instruction that restores a callee-saved register.
723 MachineBasicBlock::iterator I = MBBI;
724 for (unsigned i = 0; i < MFI.getCalleeSavedInfo().size(); ++i)
725 --I;
726
727 // Insert instructions that restore eh data registers.
728 for (int J = 0; J < 4; ++J) {
729 TII.loadRegFromStackSlot(MBB, I, ABI.GetEhDataReg(I: J),
730 MipsFI->getEhDataRegFI(Reg: J), RC, &RegInfo,
731 Register());
732 }
733 }
734
735 if (MF.getFunction().hasFnAttribute(Kind: "interrupt"))
736 emitInterruptEpilogueStub(MF, MBB);
737
738 // Get the number of bytes from FrameInfo
739 uint64_t StackSize = MFI.getStackSize();
740
741 if (!StackSize)
742 return;
743
744 // Adjust stack.
745 TII.adjustStackPtr(SP, Amount: StackSize, MBB, I: MBBI);
746}
747
748void MipsSEFrameLowering::emitInterruptEpilogueStub(
749 MachineFunction &MF, MachineBasicBlock &MBB) const {
750 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
751 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
752 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
753
754 // Perform ISR handling like GCC
755 const TargetRegisterClass *PtrRC = &Mips::GPR32RegClass;
756
757 // Disable Interrupts.
758 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::DI), Mips::ZERO);
759 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::EHB));
760
761 // Restore EPC
762 STI.getInstrInfo()->loadRegFromStackSlot(MBB, MBBI, Mips::K1,
763 MipsFI->getISRRegFI(0), PtrRC,
764 STI.getRegisterInfo(), Register());
765 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MTC0), Mips::COP014)
766 .addReg(Mips::K1)
767 .addImm(0);
768
769 // Restore Status
770 STI.getInstrInfo()->loadRegFromStackSlot(MBB, MBBI, Mips::K1,
771 MipsFI->getISRRegFI(1), PtrRC,
772 STI.getRegisterInfo(), Register());
773 BuildMI(MBB, MBBI, DL, STI.getInstrInfo()->get(Mips::MTC0), Mips::COP012)
774 .addReg(Mips::K1)
775 .addImm(0);
776}
777
778StackOffset
779MipsSEFrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
780 Register &FrameReg) const {
781 const MachineFrameInfo &MFI = MF.getFrameInfo();
782 MipsABIInfo ABI = STI.getABI();
783
784 if (MFI.isFixedObjectIndex(ObjectIdx: FI))
785 FrameReg = hasFP(MF) ? ABI.GetFramePtr() : ABI.GetStackPtr();
786 else
787 FrameReg = hasBP(MF) ? ABI.GetBasePtr() : ABI.GetStackPtr();
788
789 return StackOffset::getFixed(Fixed: MFI.getObjectOffset(ObjectIdx: FI) + MFI.getStackSize() -
790 getOffsetOfLocalArea() +
791 MFI.getOffsetAdjustment());
792}
793
794bool MipsSEFrameLowering::spillCalleeSavedRegisters(
795 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
796 ArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const {
797 MachineFunction *MF = MBB.getParent();
798 const TargetInstrInfo &TII = *STI.getInstrInfo();
799
800 for (const CalleeSavedInfo &I : CSI) {
801 // Add the callee-saved register as live-in. Do not add if the register is
802 // RA and return address is taken, because it has already been added in
803 // method MipsTargetLowering::lowerRETURNADDR.
804 // It's killed at the spill, unless the register is RA and return address
805 // is taken.
806 Register Reg = I.getReg();
807 bool IsRAAndRetAddrIsTaken = (Reg == Mips::RA || Reg == Mips::RA_64)
808 && MF->getFrameInfo().isReturnAddressTaken();
809 if (!IsRAAndRetAddrIsTaken)
810 MBB.addLiveIn(PhysReg: Reg);
811
812 // ISRs require HI/LO to be spilled into kernel registers to be then
813 // spilled to the stack frame.
814 bool IsLOHI = (Reg == Mips::LO0 || Reg == Mips::LO0_64 ||
815 Reg == Mips::HI0 || Reg == Mips::HI0_64);
816 const Function &Func = MBB.getParent()->getFunction();
817 if (IsLOHI && Func.hasFnAttribute(Kind: "interrupt")) {
818 DebugLoc DL = MI->getDebugLoc();
819
820 unsigned Op = 0;
821 if (!STI.getABI().ArePtrs64bit()) {
822 Op = (Reg == Mips::HI0) ? Mips::MFHI : Mips::MFLO;
823 Reg = Mips::K0;
824 } else {
825 Op = (Reg == Mips::HI0) ? Mips::MFHI64 : Mips::MFLO64;
826 Reg = Mips::K0_64;
827 }
828 BuildMI(MBB, MI, DL, TII.get(Op), Mips::K0)
829 .setMIFlag(MachineInstr::FrameSetup);
830 }
831
832 // Insert the spill to the stack frame.
833 bool IsKill = !IsRAAndRetAddrIsTaken;
834 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
835 TII.storeRegToStackSlot(MBB, MI, SrcReg: Reg, isKill: IsKill, FrameIndex: I.getFrameIdx(), RC, TRI,
836 VReg: Register());
837 }
838
839 return true;
840}
841
842bool
843MipsSEFrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
844 const MachineFrameInfo &MFI = MF.getFrameInfo();
845 // Reserve call frame if the size of the maximum call frame fits into 16-bit
846 // immediate field and there are no variable sized objects on the stack.
847 // Make sure the second register scavenger spill slot can be accessed with one
848 // instruction.
849 return isInt<16>(x: MFI.getMaxCallFrameSize() + getStackAlignment()) &&
850 !MFI.hasVarSizedObjects();
851}
852
853/// Mark \p Reg and all registers aliasing it in the bitset.
854static void setAliasRegs(MachineFunction &MF, BitVector &SavedRegs,
855 unsigned Reg) {
856 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
857 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
858 SavedRegs.set(*AI);
859}
860
861void MipsSEFrameLowering::determineCalleeSaves(MachineFunction &MF,
862 BitVector &SavedRegs,
863 RegScavenger *RS) const {
864 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
865 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
866 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
867 MipsABIInfo ABI = STI.getABI();
868 unsigned RA = ABI.IsN64() ? Mips::RA_64 : Mips::RA;
869 unsigned FP = ABI.GetFramePtr();
870 unsigned BP = ABI.IsN64() ? Mips::S7_64 : Mips::S7;
871
872 // Mark $ra and $fp as used if function has dedicated frame pointer.
873 if (hasFP(MF)) {
874 setAliasRegs(MF, SavedRegs, Reg: RA);
875 setAliasRegs(MF, SavedRegs, Reg: FP);
876 }
877 // Mark $s7 as used if function has dedicated base pointer.
878 if (hasBP(MF))
879 setAliasRegs(MF, SavedRegs, Reg: BP);
880
881 // Create spill slots for eh data registers if function calls eh_return.
882 if (MipsFI->callsEhReturn())
883 MipsFI->createEhDataRegsFI(MF);
884
885 // Create spill slots for Coprocessor 0 registers if function is an ISR.
886 if (MipsFI->isISR())
887 MipsFI->createISRRegFI(MF);
888
889 // Expand pseudo instructions which load, store or copy accumulators.
890 // Add an emergency spill slot if a pseudo was expanded.
891 if (ExpandPseudo(MF).expand()) {
892 // The spill slot should be half the size of the accumulator. If target have
893 // general-purpose registers 64 bits wide, it should be 64-bit, otherwise
894 // it should be 32-bit.
895 const TargetRegisterClass &RC = STI.isGP64bit() ?
896 Mips::GPR64RegClass : Mips::GPR32RegClass;
897 int FI = MF.getFrameInfo().CreateStackObject(Size: TRI->getSpillSize(RC),
898 Alignment: TRI->getSpillAlign(RC), isSpillSlot: false);
899 RS->addScavengingFrameIndex(FI);
900 }
901
902 // Set scavenging frame index if necessary.
903 uint64_t MaxSPOffset = estimateStackSize(MF);
904
905 // MSA has a minimum offset of 10 bits signed. If there is a variable
906 // sized object on the stack, the estimation cannot account for it.
907 if (isIntN(N: STI.hasMSA() ? 10 : 16, x: MaxSPOffset) &&
908 !MF.getFrameInfo().hasVarSizedObjects())
909 return;
910
911 const TargetRegisterClass &RC =
912 ABI.ArePtrs64bit() ? Mips::GPR64RegClass : Mips::GPR32RegClass;
913 int FI = MF.getFrameInfo().CreateStackObject(Size: TRI->getSpillSize(RC),
914 Alignment: TRI->getSpillAlign(RC), isSpillSlot: false);
915 RS->addScavengingFrameIndex(FI);
916}
917
918const MipsFrameLowering *
919llvm::createMipsSEFrameLowering(const MipsSubtarget &ST) {
920 return new MipsSEFrameLowering(ST);
921}
922

source code of llvm/lib/Target/Mips/MipsSEFrameLowering.cpp