1 | //===-- Thumb1InstrInfo.cpp - Thumb-1 Instruction Information -------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file contains the Thumb-1 implementation of the TargetInstrInfo class. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "Thumb1InstrInfo.h" |
14 | #include "ARMSubtarget.h" |
15 | #include "llvm/ADT/BitVector.h" |
16 | #include "llvm/CodeGen/LiveRegUnits.h" |
17 | #include "llvm/CodeGen/MachineFrameInfo.h" |
18 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
19 | #include "llvm/CodeGen/MachineMemOperand.h" |
20 | #include "llvm/MC/MCInst.h" |
21 | #include "llvm/MC/MCInstBuilder.h" |
22 | |
23 | using namespace llvm; |
24 | |
25 | Thumb1InstrInfo::Thumb1InstrInfo(const ARMSubtarget &STI) |
26 | : ARMBaseInstrInfo(STI) {} |
27 | |
28 | /// Return the noop instruction to use for a noop. |
29 | MCInst Thumb1InstrInfo::getNop() const { |
30 | return MCInstBuilder(ARM::tMOVr) |
31 | .addReg(ARM::R8) |
32 | .addReg(ARM::R8) |
33 | .addImm(ARMCC::AL) |
34 | .addReg(0); |
35 | } |
36 | |
37 | unsigned Thumb1InstrInfo::getUnindexedOpcode(unsigned Opc) const { |
38 | return 0; |
39 | } |
40 | |
41 | void Thumb1InstrInfo::copyPhysReg(MachineBasicBlock &MBB, |
42 | MachineBasicBlock::iterator I, |
43 | const DebugLoc &DL, MCRegister DestReg, |
44 | MCRegister SrcReg, bool KillSrc) const { |
45 | // Need to check the arch. |
46 | MachineFunction &MF = *MBB.getParent(); |
47 | const ARMSubtarget &st = MF.getSubtarget<ARMSubtarget>(); |
48 | |
49 | assert(ARM::GPRRegClass.contains(DestReg, SrcReg) && |
50 | "Thumb1 can only copy GPR registers" ); |
51 | |
52 | if (st.hasV6Ops() || ARM::hGPRRegClass.contains(SrcReg) || |
53 | !ARM::tGPRRegClass.contains(DestReg)) |
54 | BuildMI(MBB, I, DL, get(ARM::tMOVr), DestReg) |
55 | .addReg(SrcReg, getKillRegState(B: KillSrc)) |
56 | .add(predOps(Pred: ARMCC::AL)); |
57 | else { |
58 | const TargetRegisterInfo *RegInfo = st.getRegisterInfo(); |
59 | LiveRegUnits UsedRegs(*RegInfo); |
60 | UsedRegs.addLiveOuts(MBB); |
61 | |
62 | auto InstUpToI = MBB.end(); |
63 | while (InstUpToI != I) |
64 | // The pre-decrement is on purpose here. |
65 | // We want to have the liveness right before I. |
66 | UsedRegs.stepBackward(MI: *--InstUpToI); |
67 | |
68 | if (UsedRegs.available(ARM::Reg: CPSR)) { |
69 | BuildMI(MBB, I, DL, get(ARM::tMOVSr), DestReg) |
70 | .addReg(SrcReg, getKillRegState(B: KillSrc)) |
71 | ->addRegisterDead(ARM::CPSR, RegInfo); |
72 | return; |
73 | } |
74 | |
75 | // Use high register to move source to destination |
76 | // if movs is not an option. |
77 | BitVector Allocatable = RegInfo->getAllocatableSet( |
78 | MF, RC: RegInfo->getRegClass(ARM::i: hGPRRegClassID)); |
79 | |
80 | Register TmpReg = ARM::NoRegister; |
81 | // Prefer R12 as it is known to not be preserved anyway |
82 | if (UsedRegs.available(ARM::Reg: R12) && Allocatable.test(ARM::R12)) { |
83 | TmpReg = ARM::R12; |
84 | } else { |
85 | for (Register Reg : Allocatable.set_bits()) { |
86 | if (UsedRegs.available(Reg)) { |
87 | TmpReg = Reg; |
88 | break; |
89 | } |
90 | } |
91 | } |
92 | |
93 | if (TmpReg) { |
94 | BuildMI(MBB, I, DL, get(ARM::tMOVr), TmpReg) |
95 | .addReg(SrcReg, getKillRegState(B: KillSrc)) |
96 | .add(predOps(Pred: ARMCC::AL)); |
97 | BuildMI(MBB, I, DL, get(ARM::tMOVr), DestReg) |
98 | .addReg(TmpReg, getKillRegState(B: true)) |
99 | .add(predOps(Pred: ARMCC::AL)); |
100 | return; |
101 | } |
102 | |
103 | // 'MOV lo, lo' is unpredictable on < v6, so use the stack to do it |
104 | BuildMI(MBB, I, DL, get(ARM::tPUSH)) |
105 | .add(predOps(Pred: ARMCC::AL)) |
106 | .addReg(SrcReg, getKillRegState(B: KillSrc)); |
107 | BuildMI(MBB, I, DL, get(ARM::tPOP)) |
108 | .add(predOps(Pred: ARMCC::AL)) |
109 | .addReg(DestReg, getDefRegState(B: true)); |
110 | } |
111 | } |
112 | |
113 | void Thumb1InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, |
114 | MachineBasicBlock::iterator I, |
115 | Register SrcReg, bool isKill, int FI, |
116 | const TargetRegisterClass *RC, |
117 | const TargetRegisterInfo *TRI, |
118 | Register VReg) const { |
119 | assert((RC == &ARM::tGPRRegClass || |
120 | (SrcReg.isPhysical() && isARMLowRegister(SrcReg))) && |
121 | "Unknown regclass!" ); |
122 | |
123 | if (RC == &ARM::tGPRRegClass || |
124 | (SrcReg.isPhysical() && isARMLowRegister(Reg: SrcReg))) { |
125 | DebugLoc DL; |
126 | if (I != MBB.end()) DL = I->getDebugLoc(); |
127 | |
128 | MachineFunction &MF = *MBB.getParent(); |
129 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
130 | MachineMemOperand *MMO = MF.getMachineMemOperand( |
131 | PtrInfo: MachinePointerInfo::getFixedStack(MF, FI), F: MachineMemOperand::MOStore, |
132 | Size: MFI.getObjectSize(ObjectIdx: FI), BaseAlignment: MFI.getObjectAlign(ObjectIdx: FI)); |
133 | BuildMI(MBB, I, DL, get(ARM::tSTRspi)) |
134 | .addReg(SrcReg, getKillRegState(B: isKill)) |
135 | .addFrameIndex(FI) |
136 | .addImm(0) |
137 | .addMemOperand(MMO) |
138 | .add(predOps(Pred: ARMCC::AL)); |
139 | } |
140 | } |
141 | |
142 | void Thumb1InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, |
143 | MachineBasicBlock::iterator I, |
144 | Register DestReg, int FI, |
145 | const TargetRegisterClass *RC, |
146 | const TargetRegisterInfo *TRI, |
147 | Register VReg) const { |
148 | assert((RC->hasSuperClassEq(&ARM::tGPRRegClass) || |
149 | (DestReg.isPhysical() && isARMLowRegister(DestReg))) && |
150 | "Unknown regclass!" ); |
151 | |
152 | if (RC->hasSuperClassEq(RC: &ARM::tGPRRegClass) || |
153 | (DestReg.isPhysical() && isARMLowRegister(Reg: DestReg))) { |
154 | DebugLoc DL; |
155 | if (I != MBB.end()) DL = I->getDebugLoc(); |
156 | |
157 | MachineFunction &MF = *MBB.getParent(); |
158 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
159 | MachineMemOperand *MMO = MF.getMachineMemOperand( |
160 | PtrInfo: MachinePointerInfo::getFixedStack(MF, FI), F: MachineMemOperand::MOLoad, |
161 | Size: MFI.getObjectSize(ObjectIdx: FI), BaseAlignment: MFI.getObjectAlign(ObjectIdx: FI)); |
162 | BuildMI(MBB, I, DL, get(ARM::tLDRspi), DestReg) |
163 | .addFrameIndex(FI) |
164 | .addImm(0) |
165 | .addMemOperand(MMO) |
166 | .add(predOps(Pred: ARMCC::AL)); |
167 | } |
168 | } |
169 | |
170 | void Thumb1InstrInfo::expandLoadStackGuard( |
171 | MachineBasicBlock::iterator MI) const { |
172 | MachineFunction &MF = *MI->getParent()->getParent(); |
173 | const ARMSubtarget &ST = MF.getSubtarget<ARMSubtarget>(); |
174 | const auto *GV = cast<GlobalValue>(Val: (*MI->memoperands_begin())->getValue()); |
175 | |
176 | assert(MF.getFunction().getParent()->getStackProtectorGuard() != "tls" && |
177 | "TLS stack protector not supported for Thumb1 targets" ); |
178 | |
179 | unsigned Instr; |
180 | if (!GV->isDSOLocal()) |
181 | Instr = ARM::tLDRLIT_ga_pcrel; |
182 | else if (ST.genExecuteOnly() && ST.hasV8MBaselineOps()) |
183 | Instr = ARM::t2MOVi32imm; |
184 | else if (ST.genExecuteOnly()) |
185 | Instr = ARM::tMOVi32imm; |
186 | else |
187 | Instr = ARM::tLDRLIT_ga_abs; |
188 | expandLoadStackGuardBase(MI, LoadImmOpc: Instr, ARM::LoadOpc: tLDRi); |
189 | } |
190 | |
191 | bool Thumb1InstrInfo::canCopyGluedNodeDuringSchedule(SDNode *N) const { |
192 | // In Thumb1 the scheduler may need to schedule a cross-copy between GPRS and CPSR |
193 | // but this is not always possible there, so allow the Scheduler to clone tADCS and tSBCS |
194 | // even if they have glue. |
195 | // FIXME. Actually implement the cross-copy where it is possible (post v6) |
196 | // because these copies entail more spilling. |
197 | unsigned Opcode = N->getMachineOpcode(); |
198 | if (Opcode == ARM::tADCS || Opcode == ARM::tSBCS) |
199 | return true; |
200 | |
201 | return false; |
202 | } |
203 | |