1//===-- RISCVInstructionSelector.cpp -----------------------------*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the InstructionSelector class for
10/// RISC-V.
11/// \todo This should be generated by TableGen.
12//===----------------------------------------------------------------------===//
13
14#include "MCTargetDesc/RISCVMatInt.h"
15#include "RISCVRegisterBankInfo.h"
16#include "RISCVSubtarget.h"
17#include "RISCVTargetMachine.h"
18#include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
19#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
20#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
21#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
22#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
23#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
24#include "llvm/CodeGen/MachineJumpTableInfo.h"
25#include "llvm/IR/IntrinsicsRISCV.h"
26#include "llvm/Support/Debug.h"
27
28#define DEBUG_TYPE "riscv-isel"
29
30using namespace llvm;
31using namespace MIPatternMatch;
32
33#define GET_GLOBALISEL_PREDICATE_BITSET
34#include "RISCVGenGlobalISel.inc"
35#undef GET_GLOBALISEL_PREDICATE_BITSET
36
37namespace {
38
39class RISCVInstructionSelector : public InstructionSelector {
40public:
41 RISCVInstructionSelector(const RISCVTargetMachine &TM,
42 const RISCVSubtarget &STI,
43 const RISCVRegisterBankInfo &RBI);
44
45 bool select(MachineInstr &MI) override;
46 static const char *getName() { return DEBUG_TYPE; }
47
48private:
49 const TargetRegisterClass *
50 getRegClassForTypeOnBank(LLT Ty, const RegisterBank &RB) const;
51
52 bool isRegInGprb(Register Reg, MachineRegisterInfo &MRI) const;
53 bool isRegInFprb(Register Reg, MachineRegisterInfo &MRI) const;
54
55 // tblgen-erated 'select' implementation, used as the initial selector for
56 // the patterns that don't require complex C++.
57 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
58
59 // A lowering phase that runs before any selection attempts.
60 // Returns true if the instruction was modified.
61 void preISelLower(MachineInstr &MI, MachineIRBuilder &MIB,
62 MachineRegisterInfo &MRI);
63
64 bool replacePtrWithInt(MachineOperand &Op, MachineIRBuilder &MIB,
65 MachineRegisterInfo &MRI);
66
67 // Custom selection methods
68 bool selectCopy(MachineInstr &MI, MachineRegisterInfo &MRI) const;
69 bool selectImplicitDef(MachineInstr &MI, MachineIRBuilder &MIB,
70 MachineRegisterInfo &MRI) const;
71 bool materializeImm(Register Reg, int64_t Imm, MachineIRBuilder &MIB) const;
72 bool selectAddr(MachineInstr &MI, MachineIRBuilder &MIB,
73 MachineRegisterInfo &MRI, bool IsLocal = true,
74 bool IsExternWeak = false) const;
75 bool selectSExtInreg(MachineInstr &MI, MachineIRBuilder &MIB) const;
76 bool selectSelect(MachineInstr &MI, MachineIRBuilder &MIB,
77 MachineRegisterInfo &MRI) const;
78 bool selectFPCompare(MachineInstr &MI, MachineIRBuilder &MIB,
79 MachineRegisterInfo &MRI) const;
80 void emitFence(AtomicOrdering FenceOrdering, SyncScope::ID FenceSSID,
81 MachineIRBuilder &MIB) const;
82 bool selectMergeValues(MachineInstr &MI, MachineIRBuilder &MIB,
83 MachineRegisterInfo &MRI) const;
84 bool selectUnmergeValues(MachineInstr &MI, MachineIRBuilder &MIB,
85 MachineRegisterInfo &MRI) const;
86
87 ComplexRendererFns selectShiftMask(MachineOperand &Root) const;
88 ComplexRendererFns selectAddrRegImm(MachineOperand &Root) const;
89
90 ComplexRendererFns selectSHXADDOp(MachineOperand &Root, unsigned ShAmt) const;
91 template <unsigned ShAmt>
92 ComplexRendererFns selectSHXADDOp(MachineOperand &Root) const {
93 return selectSHXADDOp(Root, ShAmt);
94 }
95
96 ComplexRendererFns selectSHXADD_UWOp(MachineOperand &Root,
97 unsigned ShAmt) const;
98 template <unsigned ShAmt>
99 ComplexRendererFns selectSHXADD_UWOp(MachineOperand &Root) const {
100 return selectSHXADD_UWOp(Root, ShAmt);
101 }
102
103 // Custom renderers for tablegen
104 void renderNegImm(MachineInstrBuilder &MIB, const MachineInstr &MI,
105 int OpIdx) const;
106 void renderImmSubFromXLen(MachineInstrBuilder &MIB, const MachineInstr &MI,
107 int OpIdx) const;
108 void renderImmSubFrom32(MachineInstrBuilder &MIB, const MachineInstr &MI,
109 int OpIdx) const;
110 void renderImmPlus1(MachineInstrBuilder &MIB, const MachineInstr &MI,
111 int OpIdx) const;
112 void renderImm(MachineInstrBuilder &MIB, const MachineInstr &MI,
113 int OpIdx) const;
114
115 void renderTrailingZeros(MachineInstrBuilder &MIB, const MachineInstr &MI,
116 int OpIdx) const;
117
118 const RISCVSubtarget &STI;
119 const RISCVInstrInfo &TII;
120 const RISCVRegisterInfo &TRI;
121 const RISCVRegisterBankInfo &RBI;
122 const RISCVTargetMachine &TM;
123
124 // FIXME: This is necessary because DAGISel uses "Subtarget->" and GlobalISel
125 // uses "STI." in the code generated by TableGen. We need to unify the name of
126 // Subtarget variable.
127 const RISCVSubtarget *Subtarget = &STI;
128
129#define GET_GLOBALISEL_PREDICATES_DECL
130#include "RISCVGenGlobalISel.inc"
131#undef GET_GLOBALISEL_PREDICATES_DECL
132
133#define GET_GLOBALISEL_TEMPORARIES_DECL
134#include "RISCVGenGlobalISel.inc"
135#undef GET_GLOBALISEL_TEMPORARIES_DECL
136};
137
138} // end anonymous namespace
139
140#define GET_GLOBALISEL_IMPL
141#include "RISCVGenGlobalISel.inc"
142#undef GET_GLOBALISEL_IMPL
143
144RISCVInstructionSelector::RISCVInstructionSelector(
145 const RISCVTargetMachine &TM, const RISCVSubtarget &STI,
146 const RISCVRegisterBankInfo &RBI)
147 : STI(STI), TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()), RBI(RBI),
148 TM(TM),
149
150#define GET_GLOBALISEL_PREDICATES_INIT
151#include "RISCVGenGlobalISel.inc"
152#undef GET_GLOBALISEL_PREDICATES_INIT
153#define GET_GLOBALISEL_TEMPORARIES_INIT
154#include "RISCVGenGlobalISel.inc"
155#undef GET_GLOBALISEL_TEMPORARIES_INIT
156{
157}
158
159InstructionSelector::ComplexRendererFns
160RISCVInstructionSelector::selectShiftMask(MachineOperand &Root) const {
161 if (!Root.isReg())
162 return std::nullopt;
163
164 using namespace llvm::MIPatternMatch;
165 MachineRegisterInfo &MRI = MF->getRegInfo();
166
167 Register RootReg = Root.getReg();
168 Register ShAmtReg = RootReg;
169 const LLT ShiftLLT = MRI.getType(Reg: RootReg);
170 unsigned ShiftWidth = ShiftLLT.getSizeInBits();
171 assert(isPowerOf2_32(ShiftWidth) && "Unexpected max shift amount!");
172 // Peek through zext.
173 Register ZExtSrcReg;
174 if (mi_match(R: ShAmtReg, MRI, P: m_GZExt(Src: m_Reg(R&: ZExtSrcReg)))) {
175 ShAmtReg = ZExtSrcReg;
176 }
177
178 APInt AndMask;
179 Register AndSrcReg;
180 if (mi_match(R: ShAmtReg, MRI, P: m_GAnd(L: m_Reg(R&: AndSrcReg), R: m_ICst(Cst&: AndMask)))) {
181 APInt ShMask(AndMask.getBitWidth(), ShiftWidth - 1);
182 if (ShMask.isSubsetOf(RHS: AndMask)) {
183 ShAmtReg = AndSrcReg;
184 } else {
185 // SimplifyDemandedBits may have optimized the mask so try restoring any
186 // bits that are known zero.
187 KnownBits Known = KB->getKnownBits(R: ShAmtReg);
188 if (ShMask.isSubsetOf(RHS: AndMask | Known.Zero))
189 ShAmtReg = AndSrcReg;
190 }
191 }
192
193 APInt Imm;
194 Register Reg;
195 if (mi_match(R: ShAmtReg, MRI, P: m_GAdd(L: m_Reg(R&: Reg), R: m_ICst(Cst&: Imm)))) {
196 if (Imm != 0 && Imm.urem(RHS: ShiftWidth) == 0)
197 // If we are shifting by X+N where N == 0 mod Size, then just shift by X
198 // to avoid the ADD.
199 ShAmtReg = Reg;
200 } else if (mi_match(R: ShAmtReg, MRI, P: m_GSub(L: m_ICst(Cst&: Imm), R: m_Reg(R&: Reg)))) {
201 if (Imm != 0 && Imm.urem(RHS: ShiftWidth) == 0) {
202 // If we are shifting by N-X where N == 0 mod Size, then just shift by -X
203 // to generate a NEG instead of a SUB of a constant.
204 ShAmtReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
205 unsigned NegOpc = Subtarget->is64Bit() ? RISCV::SUBW : RISCV::SUB;
206 return {{[=](MachineInstrBuilder &MIB) {
207 MachineIRBuilder(*MIB.getInstr())
208 .buildInstr(NegOpc, {ShAmtReg}, {Register(RISCV::X0), Reg});
209 MIB.addReg(RegNo: ShAmtReg);
210 }}};
211 }
212 if (Imm.urem(RHS: ShiftWidth) == ShiftWidth - 1) {
213 // If we are shifting by N-X where N == -1 mod Size, then just shift by ~X
214 // to generate a NOT instead of a SUB of a constant.
215 ShAmtReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
216 return {{[=](MachineInstrBuilder &MIB) {
217 MachineIRBuilder(*MIB.getInstr())
218 .buildInstr(RISCV::XORI, {ShAmtReg}, {Reg})
219 .addImm(-1);
220 MIB.addReg(RegNo: ShAmtReg);
221 }}};
222 }
223 }
224
225 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegNo: ShAmtReg); }}};
226}
227
228InstructionSelector::ComplexRendererFns
229RISCVInstructionSelector::selectSHXADDOp(MachineOperand &Root,
230 unsigned ShAmt) const {
231 using namespace llvm::MIPatternMatch;
232 MachineFunction &MF = *Root.getParent()->getParent()->getParent();
233 MachineRegisterInfo &MRI = MF.getRegInfo();
234
235 if (!Root.isReg())
236 return std::nullopt;
237 Register RootReg = Root.getReg();
238
239 const unsigned XLen = STI.getXLen();
240 APInt Mask, C2;
241 Register RegY;
242 std::optional<bool> LeftShift;
243 // (and (shl y, c2), mask)
244 if (mi_match(R: RootReg, MRI,
245 P: m_GAnd(L: m_GShl(L: m_Reg(R&: RegY), R: m_ICst(Cst&: C2)), R: m_ICst(Cst&: Mask))))
246 LeftShift = true;
247 // (and (lshr y, c2), mask)
248 else if (mi_match(R: RootReg, MRI,
249 P: m_GAnd(L: m_GLShr(L: m_Reg(R&: RegY), R: m_ICst(Cst&: C2)), R: m_ICst(Cst&: Mask))))
250 LeftShift = false;
251
252 if (LeftShift.has_value()) {
253 if (*LeftShift)
254 Mask &= maskTrailingZeros<uint64_t>(N: C2.getLimitedValue());
255 else
256 Mask &= maskTrailingOnes<uint64_t>(N: XLen - C2.getLimitedValue());
257
258 if (Mask.isShiftedMask()) {
259 unsigned Leading = XLen - Mask.getActiveBits();
260 unsigned Trailing = Mask.countr_zero();
261 // Given (and (shl y, c2), mask) in which mask has no leading zeros and
262 // c3 trailing zeros. We can use an SRLI by c3 - c2 followed by a SHXADD.
263 if (*LeftShift && Leading == 0 && C2.ult(RHS: Trailing) && Trailing == ShAmt) {
264 Register DstReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
265 return {{[=](MachineInstrBuilder &MIB) {
266 MachineIRBuilder(*MIB.getInstr())
267 .buildInstr(RISCV::SRLI, {DstReg}, {RegY})
268 .addImm(Trailing - C2.getLimitedValue());
269 MIB.addReg(RegNo: DstReg);
270 }}};
271 }
272
273 // Given (and (lshr y, c2), mask) in which mask has c2 leading zeros and
274 // c3 trailing zeros. We can use an SRLI by c2 + c3 followed by a SHXADD.
275 if (!*LeftShift && Leading == C2 && Trailing == ShAmt) {
276 Register DstReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
277 return {{[=](MachineInstrBuilder &MIB) {
278 MachineIRBuilder(*MIB.getInstr())
279 .buildInstr(RISCV::SRLI, {DstReg}, {RegY})
280 .addImm(Leading + Trailing);
281 MIB.addReg(RegNo: DstReg);
282 }}};
283 }
284 }
285 }
286
287 LeftShift.reset();
288
289 // (shl (and y, mask), c2)
290 if (mi_match(R: RootReg, MRI,
291 P: m_GShl(L: m_OneNonDBGUse(SP: m_GAnd(L: m_Reg(R&: RegY), R: m_ICst(Cst&: Mask))),
292 R: m_ICst(Cst&: C2))))
293 LeftShift = true;
294 // (lshr (and y, mask), c2)
295 else if (mi_match(R: RootReg, MRI,
296 P: m_GLShr(L: m_OneNonDBGUse(SP: m_GAnd(L: m_Reg(R&: RegY), R: m_ICst(Cst&: Mask))),
297 R: m_ICst(Cst&: C2))))
298 LeftShift = false;
299
300 if (LeftShift.has_value() && Mask.isShiftedMask()) {
301 unsigned Leading = XLen - Mask.getActiveBits();
302 unsigned Trailing = Mask.countr_zero();
303
304 // Given (shl (and y, mask), c2) in which mask has 32 leading zeros and
305 // c3 trailing zeros. If c1 + c3 == ShAmt, we can emit SRLIW + SHXADD.
306 bool Cond = *LeftShift && Leading == 32 && Trailing > 0 &&
307 (Trailing + C2.getLimitedValue()) == ShAmt;
308 if (!Cond)
309 // Given (lshr (and y, mask), c2) in which mask has 32 leading zeros and
310 // c3 trailing zeros. If c3 - c1 == ShAmt, we can emit SRLIW + SHXADD.
311 Cond = !*LeftShift && Leading == 32 && C2.ult(RHS: Trailing) &&
312 (Trailing - C2.getLimitedValue()) == ShAmt;
313
314 if (Cond) {
315 Register DstReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
316 return {{[=](MachineInstrBuilder &MIB) {
317 MachineIRBuilder(*MIB.getInstr())
318 .buildInstr(RISCV::SRLIW, {DstReg}, {RegY})
319 .addImm(Trailing);
320 MIB.addReg(RegNo: DstReg);
321 }}};
322 }
323 }
324
325 return std::nullopt;
326}
327
328InstructionSelector::ComplexRendererFns
329RISCVInstructionSelector::selectSHXADD_UWOp(MachineOperand &Root,
330 unsigned ShAmt) const {
331 using namespace llvm::MIPatternMatch;
332 MachineFunction &MF = *Root.getParent()->getParent()->getParent();
333 MachineRegisterInfo &MRI = MF.getRegInfo();
334
335 if (!Root.isReg())
336 return std::nullopt;
337 Register RootReg = Root.getReg();
338
339 // Given (and (shl x, c2), mask) in which mask is a shifted mask with
340 // 32 - ShAmt leading zeros and c2 trailing zeros. We can use SLLI by
341 // c2 - ShAmt followed by SHXADD_UW with ShAmt for x amount.
342 APInt Mask, C2;
343 Register RegX;
344 if (mi_match(
345 R: RootReg, MRI,
346 P: m_OneNonDBGUse(SP: m_GAnd(L: m_OneNonDBGUse(SP: m_GShl(L: m_Reg(R&: RegX), R: m_ICst(Cst&: C2))),
347 R: m_ICst(Cst&: Mask))))) {
348 Mask &= maskTrailingZeros<uint64_t>(N: C2.getLimitedValue());
349
350 if (Mask.isShiftedMask()) {
351 unsigned Leading = Mask.countl_zero();
352 unsigned Trailing = Mask.countr_zero();
353 if (Leading == 32 - ShAmt && C2 == Trailing && Trailing > ShAmt) {
354 Register DstReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
355 return {{[=](MachineInstrBuilder &MIB) {
356 MachineIRBuilder(*MIB.getInstr())
357 .buildInstr(RISCV::SLLI, {DstReg}, {RegX})
358 .addImm(C2.getLimitedValue() - ShAmt);
359 MIB.addReg(RegNo: DstReg);
360 }}};
361 }
362 }
363 }
364
365 return std::nullopt;
366}
367
368InstructionSelector::ComplexRendererFns
369RISCVInstructionSelector::selectAddrRegImm(MachineOperand &Root) const {
370 MachineFunction &MF = *Root.getParent()->getParent()->getParent();
371 MachineRegisterInfo &MRI = MF.getRegInfo();
372
373 if (!Root.isReg())
374 return std::nullopt;
375
376 MachineInstr *RootDef = MRI.getVRegDef(Reg: Root.getReg());
377 if (RootDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) {
378 return {{
379 [=](MachineInstrBuilder &MIB) { MIB.add(MO: RootDef->getOperand(i: 1)); },
380 [=](MachineInstrBuilder &MIB) { MIB.addImm(Val: 0); },
381 }};
382 }
383
384 if (isBaseWithConstantOffset(Root, MRI)) {
385 MachineOperand &LHS = RootDef->getOperand(i: 1);
386 MachineOperand &RHS = RootDef->getOperand(i: 2);
387 MachineInstr *LHSDef = MRI.getVRegDef(Reg: LHS.getReg());
388 MachineInstr *RHSDef = MRI.getVRegDef(Reg: RHS.getReg());
389
390 int64_t RHSC = RHSDef->getOperand(i: 1).getCImm()->getSExtValue();
391 if (isInt<12>(x: RHSC)) {
392 if (LHSDef->getOpcode() == TargetOpcode::G_FRAME_INDEX)
393 return {{
394 [=](MachineInstrBuilder &MIB) { MIB.add(MO: LHSDef->getOperand(i: 1)); },
395 [=](MachineInstrBuilder &MIB) { MIB.addImm(Val: RHSC); },
396 }};
397
398 return {{[=](MachineInstrBuilder &MIB) { MIB.add(MO: LHS); },
399 [=](MachineInstrBuilder &MIB) { MIB.addImm(Val: RHSC); }}};
400 }
401 }
402
403 // TODO: Need to get the immediate from a G_PTR_ADD. Should this be done in
404 // the combiner?
405 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegNo: Root.getReg()); },
406 [=](MachineInstrBuilder &MIB) { MIB.addImm(Val: 0); }}};
407}
408
409/// Returns the RISCVCC::CondCode that corresponds to the CmpInst::Predicate CC.
410/// CC Must be an ICMP Predicate.
411static RISCVCC::CondCode getRISCVCCFromICmp(CmpInst::Predicate CC) {
412 switch (CC) {
413 default:
414 llvm_unreachable("Expected ICMP CmpInst::Predicate.");
415 case CmpInst::Predicate::ICMP_EQ:
416 return RISCVCC::COND_EQ;
417 case CmpInst::Predicate::ICMP_NE:
418 return RISCVCC::COND_NE;
419 case CmpInst::Predicate::ICMP_ULT:
420 return RISCVCC::COND_LTU;
421 case CmpInst::Predicate::ICMP_SLT:
422 return RISCVCC::COND_LT;
423 case CmpInst::Predicate::ICMP_UGE:
424 return RISCVCC::COND_GEU;
425 case CmpInst::Predicate::ICMP_SGE:
426 return RISCVCC::COND_GE;
427 }
428}
429
430static void getOperandsForBranch(Register CondReg, MachineRegisterInfo &MRI,
431 RISCVCC::CondCode &CC, Register &LHS,
432 Register &RHS) {
433 // Try to fold an ICmp. If that fails, use a NE compare with X0.
434 CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE;
435 if (!mi_match(R: CondReg, MRI, P: m_GICmp(P: m_Pred(P&: Pred), L: m_Reg(R&: LHS), R: m_Reg(R&: RHS)))) {
436 LHS = CondReg;
437 RHS = RISCV::X0;
438 CC = RISCVCC::COND_NE;
439 return;
440 }
441
442 // We found an ICmp, do some canonicalizations.
443
444 // Adjust comparisons to use comparison with 0 if possible.
445 if (auto Constant = getIConstantVRegSExtVal(VReg: RHS, MRI)) {
446 switch (Pred) {
447 case CmpInst::Predicate::ICMP_SGT:
448 // Convert X > -1 to X >= 0
449 if (*Constant == -1) {
450 CC = RISCVCC::COND_GE;
451 RHS = RISCV::X0;
452 return;
453 }
454 break;
455 case CmpInst::Predicate::ICMP_SLT:
456 // Convert X < 1 to 0 >= X
457 if (*Constant == 1) {
458 CC = RISCVCC::COND_GE;
459 RHS = LHS;
460 LHS = RISCV::X0;
461 return;
462 }
463 break;
464 default:
465 break;
466 }
467 }
468
469 switch (Pred) {
470 default:
471 llvm_unreachable("Expected ICMP CmpInst::Predicate.");
472 case CmpInst::Predicate::ICMP_EQ:
473 case CmpInst::Predicate::ICMP_NE:
474 case CmpInst::Predicate::ICMP_ULT:
475 case CmpInst::Predicate::ICMP_SLT:
476 case CmpInst::Predicate::ICMP_UGE:
477 case CmpInst::Predicate::ICMP_SGE:
478 // These CCs are supported directly by RISC-V branches.
479 break;
480 case CmpInst::Predicate::ICMP_SGT:
481 case CmpInst::Predicate::ICMP_SLE:
482 case CmpInst::Predicate::ICMP_UGT:
483 case CmpInst::Predicate::ICMP_ULE:
484 // These CCs are not supported directly by RISC-V branches, but changing the
485 // direction of the CC and swapping LHS and RHS are.
486 Pred = CmpInst::getSwappedPredicate(pred: Pred);
487 std::swap(a&: LHS, b&: RHS);
488 break;
489 }
490
491 CC = getRISCVCCFromICmp(CC: Pred);
492 return;
493}
494
495bool RISCVInstructionSelector::select(MachineInstr &MI) {
496 MachineBasicBlock &MBB = *MI.getParent();
497 MachineFunction &MF = *MBB.getParent();
498 MachineRegisterInfo &MRI = MF.getRegInfo();
499 MachineIRBuilder MIB(MI);
500
501 preISelLower(MI, MIB, MRI);
502 const unsigned Opc = MI.getOpcode();
503
504 if (!MI.isPreISelOpcode() || Opc == TargetOpcode::G_PHI) {
505 if (Opc == TargetOpcode::PHI || Opc == TargetOpcode::G_PHI) {
506 const Register DefReg = MI.getOperand(i: 0).getReg();
507 const LLT DefTy = MRI.getType(Reg: DefReg);
508
509 const RegClassOrRegBank &RegClassOrBank =
510 MRI.getRegClassOrRegBank(Reg: DefReg);
511
512 const TargetRegisterClass *DefRC =
513 RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
514 if (!DefRC) {
515 if (!DefTy.isValid()) {
516 LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
517 return false;
518 }
519
520 const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
521 DefRC = getRegClassForTypeOnBank(Ty: DefTy, RB);
522 if (!DefRC) {
523 LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
524 return false;
525 }
526 }
527
528 MI.setDesc(TII.get(TargetOpcode::PHI));
529 return RBI.constrainGenericRegister(Reg: DefReg, RC: *DefRC, MRI);
530 }
531
532 // Certain non-generic instructions also need some special handling.
533 if (MI.isCopy())
534 return selectCopy(MI, MRI);
535
536 return true;
537 }
538
539 if (selectImpl(I&: MI, CoverageInfo&: *CoverageInfo))
540 return true;
541
542 switch (Opc) {
543 case TargetOpcode::G_ANYEXT:
544 case TargetOpcode::G_PTRTOINT:
545 case TargetOpcode::G_INTTOPTR:
546 case TargetOpcode::G_TRUNC:
547 return selectCopy(MI, MRI);
548 case TargetOpcode::G_CONSTANT: {
549 Register DstReg = MI.getOperand(i: 0).getReg();
550 int64_t Imm = MI.getOperand(i: 1).getCImm()->getSExtValue();
551
552 if (!materializeImm(Reg: DstReg, Imm, MIB))
553 return false;
554
555 MI.eraseFromParent();
556 return true;
557 }
558 case TargetOpcode::G_FCONSTANT: {
559 // TODO: Use constant pool for complext constants.
560 // TODO: Optimize +0.0 to use fcvt.d.w for s64 on rv32.
561 Register DstReg = MI.getOperand(i: 0).getReg();
562 const APFloat &FPimm = MI.getOperand(i: 1).getFPImm()->getValueAPF();
563 APInt Imm = FPimm.bitcastToAPInt();
564 unsigned Size = MRI.getType(Reg: DstReg).getSizeInBits();
565 if (Size == 32 || (Size == 64 && Subtarget->is64Bit())) {
566 Register GPRReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
567 if (!materializeImm(Reg: GPRReg, Imm: Imm.getSExtValue(), MIB))
568 return false;
569
570 unsigned Opcode = Size == 64 ? RISCV::FMV_D_X : RISCV::FMV_W_X;
571 auto FMV = MIB.buildInstr(Opc: Opcode, DstOps: {DstReg}, SrcOps: {GPRReg});
572 if (!FMV.constrainAllUses(TII, TRI, RBI))
573 return false;
574 } else {
575 assert(Size == 64 && !Subtarget->is64Bit() &&
576 "Unexpected size or subtarget");
577 // Split into two pieces and build through the stack.
578 Register GPRRegHigh = MRI.createVirtualRegister(&RISCV::GPRRegClass);
579 Register GPRRegLow = MRI.createVirtualRegister(&RISCV::GPRRegClass);
580 if (!materializeImm(Reg: GPRRegHigh, Imm: Imm.extractBits(numBits: 32, bitPosition: 32).getSExtValue(),
581 MIB))
582 return false;
583 if (!materializeImm(Reg: GPRRegLow, Imm: Imm.trunc(width: 32).getSExtValue(), MIB))
584 return false;
585 MachineInstrBuilder PairF64 = MIB.buildInstr(
586 RISCV::BuildPairF64Pseudo, {DstReg}, {GPRRegLow, GPRRegHigh});
587 if (!PairF64.constrainAllUses(TII, TRI, RBI))
588 return false;
589 }
590
591 MI.eraseFromParent();
592 return true;
593 }
594 case TargetOpcode::G_GLOBAL_VALUE: {
595 auto *GV = MI.getOperand(i: 1).getGlobal();
596 if (GV->isThreadLocal()) {
597 // TODO: implement this case.
598 return false;
599 }
600
601 return selectAddr(MI, MIB, MRI, IsLocal: GV->isDSOLocal(),
602 IsExternWeak: GV->hasExternalWeakLinkage());
603 }
604 case TargetOpcode::G_JUMP_TABLE:
605 case TargetOpcode::G_CONSTANT_POOL:
606 return selectAddr(MI, MIB, MRI);
607 case TargetOpcode::G_BRCOND: {
608 Register LHS, RHS;
609 RISCVCC::CondCode CC;
610 getOperandsForBranch(CondReg: MI.getOperand(i: 0).getReg(), MRI, CC, LHS, RHS);
611
612 auto Bcc = MIB.buildInstr(Opc: RISCVCC::getBrCond(CC), DstOps: {}, SrcOps: {LHS, RHS})
613 .addMBB(MBB: MI.getOperand(i: 1).getMBB());
614 MI.eraseFromParent();
615 return constrainSelectedInstRegOperands(*Bcc, TII, TRI, RBI);
616 }
617 case TargetOpcode::G_BRJT: {
618 // FIXME: Move to legalization?
619 const MachineJumpTableInfo *MJTI = MF.getJumpTableInfo();
620 unsigned EntrySize = MJTI->getEntrySize(TD: MF.getDataLayout());
621 assert((EntrySize == 4 || (Subtarget->is64Bit() && EntrySize == 8)) &&
622 "Unsupported jump-table entry size");
623 assert(
624 (MJTI->getEntryKind() == MachineJumpTableInfo::EK_LabelDifference32 ||
625 MJTI->getEntryKind() == MachineJumpTableInfo::EK_Custom32 ||
626 MJTI->getEntryKind() == MachineJumpTableInfo::EK_BlockAddress) &&
627 "Unexpected jump-table entry kind");
628
629 auto SLL =
630 MIB.buildInstr(RISCV::SLLI, {&RISCV::GPRRegClass}, {MI.getOperand(2)})
631 .addImm(Log2_32(EntrySize));
632 if (!SLL.constrainAllUses(TII, TRI, RBI))
633 return false;
634
635 // TODO: Use SHXADD. Moving to legalization would fix this automatically.
636 auto ADD = MIB.buildInstr(RISCV::ADD, {&RISCV::GPRRegClass},
637 {MI.getOperand(0), SLL.getReg(0)});
638 if (!ADD.constrainAllUses(TII, TRI, RBI))
639 return false;
640
641 unsigned LdOpc = EntrySize == 8 ? RISCV::LD : RISCV::LW;
642 auto Dest =
643 MIB.buildInstr(LdOpc, {&RISCV::GPRRegClass}, {ADD.getReg(0)})
644 .addImm(0)
645 .addMemOperand(MF.getMachineMemOperand(
646 MachinePointerInfo::getJumpTable(MF), MachineMemOperand::MOLoad,
647 EntrySize, Align(MJTI->getEntryAlignment(MF.getDataLayout()))));
648 if (!Dest.constrainAllUses(TII, TRI, RBI))
649 return false;
650
651 // If the Kind is EK_LabelDifference32, the table stores an offset from
652 // the location of the table. Add the table address to get an absolute
653 // address.
654 if (MJTI->getEntryKind() == MachineJumpTableInfo::EK_LabelDifference32) {
655 Dest = MIB.buildInstr(RISCV::ADD, {&RISCV::GPRRegClass},
656 {Dest.getReg(0), MI.getOperand(0)});
657 if (!Dest.constrainAllUses(TII, TRI, RBI))
658 return false;
659 }
660
661 auto Branch =
662 MIB.buildInstr(RISCV::PseudoBRIND, {}, {Dest.getReg(0)}).addImm(0);
663 if (!Branch.constrainAllUses(TII, TRI, RBI))
664 return false;
665
666 MI.eraseFromParent();
667 return true;
668 }
669 case TargetOpcode::G_BRINDIRECT:
670 MI.setDesc(TII.get(RISCV::PseudoBRIND));
671 MI.addOperand(Op: MachineOperand::CreateImm(Val: 0));
672 return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
673 case TargetOpcode::G_SEXT_INREG:
674 return selectSExtInreg(MI, MIB);
675 case TargetOpcode::G_FRAME_INDEX: {
676 // TODO: We may want to replace this code with the SelectionDAG patterns,
677 // which fail to get imported because it uses FrameAddrRegImm, which is a
678 // ComplexPattern
679 MI.setDesc(TII.get(RISCV::ADDI));
680 MI.addOperand(Op: MachineOperand::CreateImm(Val: 0));
681 return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
682 }
683 case TargetOpcode::G_SELECT:
684 return selectSelect(MI, MIB, MRI);
685 case TargetOpcode::G_FCMP:
686 return selectFPCompare(MI, MIB, MRI);
687 case TargetOpcode::G_FENCE: {
688 AtomicOrdering FenceOrdering =
689 static_cast<AtomicOrdering>(MI.getOperand(i: 0).getImm());
690 SyncScope::ID FenceSSID =
691 static_cast<SyncScope::ID>(MI.getOperand(i: 1).getImm());
692 emitFence(FenceOrdering, FenceSSID, MIB);
693 MI.eraseFromParent();
694 return true;
695 }
696 case TargetOpcode::G_IMPLICIT_DEF:
697 return selectImplicitDef(MI, MIB, MRI);
698 case TargetOpcode::G_MERGE_VALUES:
699 return selectMergeValues(MI, MIB, MRI);
700 case TargetOpcode::G_UNMERGE_VALUES:
701 return selectUnmergeValues(MI, MIB, MRI);
702 default:
703 return false;
704 }
705}
706
707bool RISCVInstructionSelector::selectMergeValues(
708 MachineInstr &MI, MachineIRBuilder &MIB, MachineRegisterInfo &MRI) const {
709 assert(MI.getOpcode() == TargetOpcode::G_MERGE_VALUES);
710
711 // Build a F64 Pair from operands
712 if (MI.getNumOperands() != 3)
713 return false;
714 Register Dst = MI.getOperand(i: 0).getReg();
715 Register Lo = MI.getOperand(i: 1).getReg();
716 Register Hi = MI.getOperand(i: 2).getReg();
717 if (!isRegInFprb(Reg: Dst, MRI) || !isRegInGprb(Reg: Lo, MRI) || !isRegInGprb(Reg: Hi, MRI))
718 return false;
719 MI.setDesc(TII.get(RISCV::BuildPairF64Pseudo));
720 return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
721}
722
723bool RISCVInstructionSelector::selectUnmergeValues(
724 MachineInstr &MI, MachineIRBuilder &MIB, MachineRegisterInfo &MRI) const {
725 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES);
726
727 // Split F64 Src into two s32 parts
728 if (MI.getNumOperands() != 3)
729 return false;
730 Register Src = MI.getOperand(i: 2).getReg();
731 Register Lo = MI.getOperand(i: 0).getReg();
732 Register Hi = MI.getOperand(i: 1).getReg();
733 if (!isRegInFprb(Reg: Src, MRI) || !isRegInGprb(Reg: Lo, MRI) || !isRegInGprb(Reg: Hi, MRI))
734 return false;
735 MI.setDesc(TII.get(RISCV::SplitF64Pseudo));
736 return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
737}
738
739bool RISCVInstructionSelector::replacePtrWithInt(MachineOperand &Op,
740 MachineIRBuilder &MIB,
741 MachineRegisterInfo &MRI) {
742 Register PtrReg = Op.getReg();
743 assert(MRI.getType(PtrReg).isPointer() && "Operand is not a pointer!");
744
745 const LLT sXLen = LLT::scalar(SizeInBits: STI.getXLen());
746 auto PtrToInt = MIB.buildPtrToInt(Dst: sXLen, Src: PtrReg);
747 MRI.setRegBank(Reg: PtrToInt.getReg(Idx: 0), RegBank: RBI.getRegBank(RISCV::GPRBRegBankID));
748 Op.setReg(PtrToInt.getReg(Idx: 0));
749 return select(MI&: *PtrToInt);
750}
751
752void RISCVInstructionSelector::preISelLower(MachineInstr &MI,
753 MachineIRBuilder &MIB,
754 MachineRegisterInfo &MRI) {
755 switch (MI.getOpcode()) {
756 case TargetOpcode::G_PTR_ADD: {
757 Register DstReg = MI.getOperand(i: 0).getReg();
758 const LLT sXLen = LLT::scalar(SizeInBits: STI.getXLen());
759
760 replacePtrWithInt(Op&: MI.getOperand(i: 1), MIB, MRI);
761 MI.setDesc(TII.get(TargetOpcode::G_ADD));
762 MRI.setType(VReg: DstReg, Ty: sXLen);
763 break;
764 }
765 case TargetOpcode::G_PTRMASK: {
766 Register DstReg = MI.getOperand(i: 0).getReg();
767 const LLT sXLen = LLT::scalar(SizeInBits: STI.getXLen());
768 replacePtrWithInt(Op&: MI.getOperand(i: 1), MIB, MRI);
769 MI.setDesc(TII.get(TargetOpcode::G_AND));
770 MRI.setType(VReg: DstReg, Ty: sXLen);
771 }
772 }
773}
774
775void RISCVInstructionSelector::renderNegImm(MachineInstrBuilder &MIB,
776 const MachineInstr &MI,
777 int OpIdx) const {
778 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
779 "Expected G_CONSTANT");
780 int64_t CstVal = MI.getOperand(i: 1).getCImm()->getSExtValue();
781 MIB.addImm(Val: -CstVal);
782}
783
784void RISCVInstructionSelector::renderImmSubFromXLen(MachineInstrBuilder &MIB,
785 const MachineInstr &MI,
786 int OpIdx) const {
787 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
788 "Expected G_CONSTANT");
789 uint64_t CstVal = MI.getOperand(i: 1).getCImm()->getZExtValue();
790 MIB.addImm(Val: STI.getXLen() - CstVal);
791}
792
793void RISCVInstructionSelector::renderImmSubFrom32(MachineInstrBuilder &MIB,
794 const MachineInstr &MI,
795 int OpIdx) const {
796 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
797 "Expected G_CONSTANT");
798 uint64_t CstVal = MI.getOperand(i: 1).getCImm()->getZExtValue();
799 MIB.addImm(Val: 32 - CstVal);
800}
801
802void RISCVInstructionSelector::renderImmPlus1(MachineInstrBuilder &MIB,
803 const MachineInstr &MI,
804 int OpIdx) const {
805 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
806 "Expected G_CONSTANT");
807 int64_t CstVal = MI.getOperand(i: 1).getCImm()->getSExtValue();
808 MIB.addImm(Val: CstVal + 1);
809}
810
811void RISCVInstructionSelector::renderImm(MachineInstrBuilder &MIB,
812 const MachineInstr &MI,
813 int OpIdx) const {
814 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
815 "Expected G_CONSTANT");
816 int64_t CstVal = MI.getOperand(i: 1).getCImm()->getSExtValue();
817 MIB.addImm(Val: CstVal);
818}
819
820void RISCVInstructionSelector::renderTrailingZeros(MachineInstrBuilder &MIB,
821 const MachineInstr &MI,
822 int OpIdx) const {
823 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
824 "Expected G_CONSTANT");
825 uint64_t C = MI.getOperand(i: 1).getCImm()->getZExtValue();
826 MIB.addImm(Val: llvm::countr_zero(Val: C));
827}
828
829const TargetRegisterClass *RISCVInstructionSelector::getRegClassForTypeOnBank(
830 LLT Ty, const RegisterBank &RB) const {
831 if (RB.getID() == RISCV::GPRBRegBankID) {
832 if (Ty.getSizeInBits() <= 32 || (STI.is64Bit() && Ty.getSizeInBits() == 64))
833 return &RISCV::GPRRegClass;
834 }
835
836 if (RB.getID() == RISCV::FPRBRegBankID) {
837 if (Ty.getSizeInBits() == 32)
838 return &RISCV::FPR32RegClass;
839 if (Ty.getSizeInBits() == 64)
840 return &RISCV::FPR64RegClass;
841 }
842
843 if (RB.getID() == RISCV::VRBRegBankID) {
844 if (Ty.getSizeInBits().getKnownMinValue() <= 64)
845 return &RISCV::VRRegClass;
846
847 if (Ty.getSizeInBits().getKnownMinValue() == 128)
848 return &RISCV::VRM2RegClass;
849
850 if (Ty.getSizeInBits().getKnownMinValue() == 256)
851 return &RISCV::VRM4RegClass;
852
853 if (Ty.getSizeInBits().getKnownMinValue() == 512)
854 return &RISCV::VRM8RegClass;
855 }
856
857 return nullptr;
858}
859
860bool RISCVInstructionSelector::isRegInGprb(Register Reg,
861 MachineRegisterInfo &MRI) const {
862 return RBI.getRegBank(Reg, MRI, TRI)->getID() == RISCV::GPRBRegBankID;
863}
864
865bool RISCVInstructionSelector::isRegInFprb(Register Reg,
866 MachineRegisterInfo &MRI) const {
867 return RBI.getRegBank(Reg, MRI, TRI)->getID() == RISCV::FPRBRegBankID;
868}
869
870bool RISCVInstructionSelector::selectCopy(MachineInstr &MI,
871 MachineRegisterInfo &MRI) const {
872 Register DstReg = MI.getOperand(i: 0).getReg();
873
874 if (DstReg.isPhysical())
875 return true;
876
877 const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(
878 Ty: MRI.getType(Reg: DstReg), RB: *RBI.getRegBank(DstReg, MRI, TRI));
879 assert(DstRC &&
880 "Register class not available for LLT, register bank combination");
881
882 // No need to constrain SrcReg. It will get constrained when
883 // we hit another of its uses or its defs.
884 // Copies do not have constraints.
885 if (!RBI.constrainGenericRegister(Reg: DstReg, RC: *DstRC, MRI)) {
886 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(MI.getOpcode())
887 << " operand\n");
888 return false;
889 }
890
891 MI.setDesc(TII.get(RISCV::COPY));
892 return true;
893}
894
895bool RISCVInstructionSelector::selectImplicitDef(
896 MachineInstr &MI, MachineIRBuilder &MIB, MachineRegisterInfo &MRI) const {
897 assert(MI.getOpcode() == TargetOpcode::G_IMPLICIT_DEF);
898
899 const Register DstReg = MI.getOperand(i: 0).getReg();
900 const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(
901 Ty: MRI.getType(Reg: DstReg), RB: *RBI.getRegBank(DstReg, MRI, TRI));
902
903 assert(DstRC &&
904 "Register class not available for LLT, register bank combination");
905
906 if (!RBI.constrainGenericRegister(Reg: DstReg, RC: *DstRC, MRI)) {
907 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(MI.getOpcode())
908 << " operand\n");
909 }
910 MI.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
911 return true;
912}
913
914bool RISCVInstructionSelector::materializeImm(Register DstReg, int64_t Imm,
915 MachineIRBuilder &MIB) const {
916 MachineRegisterInfo &MRI = *MIB.getMRI();
917
918 if (Imm == 0) {
919 MIB.buildCopy(DstReg, Register(RISCV::X0));
920 RBI.constrainGenericRegister(DstReg, RISCV::GPRRegClass, MRI);
921 return true;
922 }
923
924 RISCVMatInt::InstSeq Seq = RISCVMatInt::generateInstSeq(Imm, *Subtarget);
925 unsigned NumInsts = Seq.size();
926 Register SrcReg = RISCV::X0;
927
928 for (unsigned i = 0; i < NumInsts; i++) {
929 Register TmpReg = i < NumInsts - 1
930 ? MRI.createVirtualRegister(&RISCV::GPRRegClass)
931 : DstReg;
932 const RISCVMatInt::Inst &I = Seq[i];
933 MachineInstr *Result;
934
935 switch (I.getOpndKind()) {
936 case RISCVMatInt::Imm:
937 // clang-format off
938 Result = MIB.buildInstr(Opc: I.getOpcode(), DstOps: {TmpReg}, SrcOps: {})
939 .addImm(Val: I.getImm());
940 // clang-format on
941 break;
942 case RISCVMatInt::RegX0:
943 Result = MIB.buildInstr(I.getOpcode(), {TmpReg},
944 {SrcReg, Register(RISCV::X0)});
945 break;
946 case RISCVMatInt::RegReg:
947 Result = MIB.buildInstr(Opc: I.getOpcode(), DstOps: {TmpReg}, SrcOps: {SrcReg, SrcReg});
948 break;
949 case RISCVMatInt::RegImm:
950 Result =
951 MIB.buildInstr(Opc: I.getOpcode(), DstOps: {TmpReg}, SrcOps: {SrcReg}).addImm(Val: I.getImm());
952 break;
953 }
954
955 if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
956 return false;
957
958 SrcReg = TmpReg;
959 }
960
961 return true;
962}
963
964bool RISCVInstructionSelector::selectAddr(MachineInstr &MI,
965 MachineIRBuilder &MIB,
966 MachineRegisterInfo &MRI,
967 bool IsLocal,
968 bool IsExternWeak) const {
969 assert((MI.getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
970 MI.getOpcode() == TargetOpcode::G_JUMP_TABLE ||
971 MI.getOpcode() == TargetOpcode::G_CONSTANT_POOL) &&
972 "Unexpected opcode");
973
974 const MachineOperand &DispMO = MI.getOperand(i: 1);
975
976 Register DefReg = MI.getOperand(i: 0).getReg();
977 const LLT DefTy = MRI.getType(Reg: DefReg);
978
979 // When HWASAN is used and tagging of global variables is enabled
980 // they should be accessed via the GOT, since the tagged address of a global
981 // is incompatible with existing code models. This also applies to non-pic
982 // mode.
983 if (TM.isPositionIndependent() || Subtarget->allowTaggedGlobals()) {
984 if (IsLocal && !Subtarget->allowTaggedGlobals()) {
985 // Use PC-relative addressing to access the symbol. This generates the
986 // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
987 // %pcrel_lo(auipc)).
988 MI.setDesc(TII.get(RISCV::PseudoLLA));
989 return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
990 }
991
992 // Use PC-relative addressing to access the GOT for this symbol, then
993 // load the address from the GOT. This generates the pattern (PseudoLGA
994 // sym), which expands to (ld (addi (auipc %got_pcrel_hi(sym))
995 // %pcrel_lo(auipc))).
996 MachineFunction &MF = *MI.getParent()->getParent();
997 MachineMemOperand *MemOp = MF.getMachineMemOperand(
998 PtrInfo: MachinePointerInfo::getGOT(MF),
999 f: MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
1000 MachineMemOperand::MOInvariant,
1001 MemTy: DefTy, base_alignment: Align(DefTy.getSizeInBits() / 8));
1002
1003 auto Result = MIB.buildInstr(RISCV::PseudoLGA, {DefReg}, {})
1004 .addDisp(DispMO, 0)
1005 .addMemOperand(MemOp);
1006
1007 if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
1008 return false;
1009
1010 MI.eraseFromParent();
1011 return true;
1012 }
1013
1014 switch (TM.getCodeModel()) {
1015 default: {
1016 reportGISelFailure(MF&: const_cast<MachineFunction &>(*MF), TPC: *TPC, MORE&: *MORE,
1017 PassName: getName(), Msg: "Unsupported code model for lowering", MI);
1018 return false;
1019 }
1020 case CodeModel::Small: {
1021 // Must lie within a single 2 GiB address range and must lie between
1022 // absolute addresses -2 GiB and +2 GiB. This generates the pattern (addi
1023 // (lui %hi(sym)) %lo(sym)).
1024 Register AddrHiDest = MRI.createVirtualRegister(&RISCV::GPRRegClass);
1025 MachineInstr *AddrHi = MIB.buildInstr(RISCV::LUI, {AddrHiDest}, {})
1026 .addDisp(DispMO, 0, RISCVII::MO_HI);
1027
1028 if (!constrainSelectedInstRegOperands(*AddrHi, TII, TRI, RBI))
1029 return false;
1030
1031 auto Result = MIB.buildInstr(RISCV::ADDI, {DefReg}, {AddrHiDest})
1032 .addDisp(DispMO, 0, RISCVII::MO_LO);
1033
1034 if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
1035 return false;
1036
1037 MI.eraseFromParent();
1038 return true;
1039 }
1040 case CodeModel::Medium:
1041 // Emit LGA/LLA instead of the sequence it expands to because the pcrel_lo
1042 // relocation needs to reference a label that points to the auipc
1043 // instruction itself, not the global. This cannot be done inside the
1044 // instruction selector.
1045 if (IsExternWeak) {
1046 // An extern weak symbol may be undefined, i.e. have value 0, which may
1047 // not be within 2GiB of PC, so use GOT-indirect addressing to access the
1048 // symbol. This generates the pattern (PseudoLGA sym), which expands to
1049 // (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
1050 MachineFunction &MF = *MI.getParent()->getParent();
1051 MachineMemOperand *MemOp = MF.getMachineMemOperand(
1052 PtrInfo: MachinePointerInfo::getGOT(MF),
1053 f: MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
1054 MachineMemOperand::MOInvariant,
1055 MemTy: DefTy, base_alignment: Align(DefTy.getSizeInBits() / 8));
1056
1057 auto Result = MIB.buildInstr(RISCV::PseudoLGA, {DefReg}, {})
1058 .addDisp(DispMO, 0)
1059 .addMemOperand(MemOp);
1060
1061 if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
1062 return false;
1063
1064 MI.eraseFromParent();
1065 return true;
1066 }
1067
1068 // Generate a sequence for accessing addresses within any 2GiB range
1069 // within the address space. This generates the pattern (PseudoLLA sym),
1070 // which expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
1071 MI.setDesc(TII.get(RISCV::PseudoLLA));
1072 return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
1073 }
1074
1075 return false;
1076}
1077
1078bool RISCVInstructionSelector::selectSExtInreg(MachineInstr &MI,
1079 MachineIRBuilder &MIB) const {
1080 if (!STI.isRV64())
1081 return false;
1082
1083 const MachineOperand &Size = MI.getOperand(i: 2);
1084 // Only Size == 32 (i.e. shift by 32 bits) is acceptable at this point.
1085 if (!Size.isImm() || Size.getImm() != 32)
1086 return false;
1087
1088 const MachineOperand &Src = MI.getOperand(i: 1);
1089 const MachineOperand &Dst = MI.getOperand(i: 0);
1090 // addiw rd, rs, 0 (i.e. sext.w rd, rs)
1091 MachineInstr *NewMI =
1092 MIB.buildInstr(RISCV::ADDIW, {Dst.getReg()}, {Src.getReg()}).addImm(0U);
1093
1094 if (!constrainSelectedInstRegOperands(*NewMI, TII, TRI, RBI))
1095 return false;
1096
1097 MI.eraseFromParent();
1098 return true;
1099}
1100
1101bool RISCVInstructionSelector::selectSelect(MachineInstr &MI,
1102 MachineIRBuilder &MIB,
1103 MachineRegisterInfo &MRI) const {
1104 auto &SelectMI = cast<GSelect>(Val&: MI);
1105
1106 Register LHS, RHS;
1107 RISCVCC::CondCode CC;
1108 getOperandsForBranch(CondReg: SelectMI.getCondReg(), MRI, CC, LHS, RHS);
1109
1110 Register DstReg = SelectMI.getReg(Idx: 0);
1111
1112 unsigned Opc = RISCV::Select_GPR_Using_CC_GPR;
1113 if (RBI.getRegBank(DstReg, MRI, TRI)->getID() == RISCV::FPRBRegBankID) {
1114 unsigned Size = MRI.getType(Reg: DstReg).getSizeInBits();
1115 Opc = Size == 32 ? RISCV::Select_FPR32_Using_CC_GPR
1116 : RISCV::Select_FPR64_Using_CC_GPR;
1117 }
1118
1119 MachineInstr *Result = MIB.buildInstr(Opcode: Opc)
1120 .addDef(RegNo: DstReg)
1121 .addReg(RegNo: LHS)
1122 .addReg(RegNo: RHS)
1123 .addImm(Val: CC)
1124 .addReg(RegNo: SelectMI.getTrueReg())
1125 .addReg(RegNo: SelectMI.getFalseReg());
1126 MI.eraseFromParent();
1127 return constrainSelectedInstRegOperands(*Result, TII, TRI, RBI);
1128}
1129
1130// Convert an FCMP predicate to one of the supported F or D instructions.
1131static unsigned getFCmpOpcode(CmpInst::Predicate Pred, unsigned Size) {
1132 assert((Size == 32 || Size == 64) && "Unsupported size");
1133 switch (Pred) {
1134 default:
1135 llvm_unreachable("Unsupported predicate");
1136 case CmpInst::FCMP_OLT:
1137 return Size == 32 ? RISCV::FLT_S : RISCV::FLT_D;
1138 case CmpInst::FCMP_OLE:
1139 return Size == 32 ? RISCV::FLE_S : RISCV::FLE_D;
1140 case CmpInst::FCMP_OEQ:
1141 return Size == 32 ? RISCV::FEQ_S : RISCV::FEQ_D;
1142 }
1143}
1144
1145// Try legalizing an FCMP by swapping or inverting the predicate to one that
1146// is supported.
1147static bool legalizeFCmpPredicate(Register &LHS, Register &RHS,
1148 CmpInst::Predicate &Pred, bool &NeedInvert) {
1149 auto isLegalFCmpPredicate = [](CmpInst::Predicate Pred) {
1150 return Pred == CmpInst::FCMP_OLT || Pred == CmpInst::FCMP_OLE ||
1151 Pred == CmpInst::FCMP_OEQ;
1152 };
1153
1154 assert(!isLegalFCmpPredicate(Pred) && "Predicate already legal?");
1155
1156 CmpInst::Predicate InvPred = CmpInst::getSwappedPredicate(pred: Pred);
1157 if (isLegalFCmpPredicate(InvPred)) {
1158 Pred = InvPred;
1159 std::swap(a&: LHS, b&: RHS);
1160 return true;
1161 }
1162
1163 InvPred = CmpInst::getInversePredicate(pred: Pred);
1164 NeedInvert = true;
1165 if (isLegalFCmpPredicate(InvPred)) {
1166 Pred = InvPred;
1167 return true;
1168 }
1169 InvPred = CmpInst::getSwappedPredicate(pred: InvPred);
1170 if (isLegalFCmpPredicate(InvPred)) {
1171 Pred = InvPred;
1172 std::swap(a&: LHS, b&: RHS);
1173 return true;
1174 }
1175
1176 return false;
1177}
1178
1179// Emit a sequence of instructions to compare LHS and RHS using Pred. Return
1180// the result in DstReg.
1181// FIXME: Maybe we should expand this earlier.
1182bool RISCVInstructionSelector::selectFPCompare(MachineInstr &MI,
1183 MachineIRBuilder &MIB,
1184 MachineRegisterInfo &MRI) const {
1185 auto &CmpMI = cast<GFCmp>(Val&: MI);
1186 CmpInst::Predicate Pred = CmpMI.getCond();
1187
1188 Register DstReg = CmpMI.getReg(Idx: 0);
1189 Register LHS = CmpMI.getLHSReg();
1190 Register RHS = CmpMI.getRHSReg();
1191
1192 unsigned Size = MRI.getType(Reg: LHS).getSizeInBits();
1193 assert((Size == 32 || Size == 64) && "Unexpected size");
1194
1195 Register TmpReg = DstReg;
1196
1197 bool NeedInvert = false;
1198 // First try swapping operands or inverting.
1199 if (legalizeFCmpPredicate(LHS, RHS, Pred, NeedInvert)) {
1200 if (NeedInvert)
1201 TmpReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
1202 auto Cmp = MIB.buildInstr(Opc: getFCmpOpcode(Pred, Size), DstOps: {TmpReg}, SrcOps: {LHS, RHS});
1203 if (!Cmp.constrainAllUses(TII, TRI, RBI))
1204 return false;
1205 } else if (Pred == CmpInst::FCMP_ONE || Pred == CmpInst::FCMP_UEQ) {
1206 // fcmp one LHS, RHS => (OR (FLT LHS, RHS), (FLT RHS, LHS))
1207 NeedInvert = Pred == CmpInst::FCMP_UEQ;
1208 auto Cmp1 = MIB.buildInstr(getFCmpOpcode(CmpInst::FCMP_OLT, Size),
1209 {&RISCV::GPRRegClass}, {LHS, RHS});
1210 if (!Cmp1.constrainAllUses(TII, TRI, RBI))
1211 return false;
1212 auto Cmp2 = MIB.buildInstr(getFCmpOpcode(CmpInst::FCMP_OLT, Size),
1213 {&RISCV::GPRRegClass}, {RHS, LHS});
1214 if (!Cmp2.constrainAllUses(TII, TRI, RBI))
1215 return false;
1216 if (NeedInvert)
1217 TmpReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
1218 auto Or =
1219 MIB.buildInstr(RISCV::OR, {TmpReg}, {Cmp1.getReg(0), Cmp2.getReg(0)});
1220 if (!Or.constrainAllUses(TII, TRI, RBI))
1221 return false;
1222 } else if (Pred == CmpInst::FCMP_ORD || Pred == CmpInst::FCMP_UNO) {
1223 // fcmp ord LHS, RHS => (AND (FEQ LHS, LHS), (FEQ RHS, RHS))
1224 // FIXME: If LHS and RHS are the same we can use a single FEQ.
1225 NeedInvert = Pred == CmpInst::FCMP_UNO;
1226 auto Cmp1 = MIB.buildInstr(getFCmpOpcode(CmpInst::FCMP_OEQ, Size),
1227 {&RISCV::GPRRegClass}, {LHS, LHS});
1228 if (!Cmp1.constrainAllUses(TII, TRI, RBI))
1229 return false;
1230 auto Cmp2 = MIB.buildInstr(getFCmpOpcode(CmpInst::FCMP_OEQ, Size),
1231 {&RISCV::GPRRegClass}, {RHS, RHS});
1232 if (!Cmp2.constrainAllUses(TII, TRI, RBI))
1233 return false;
1234 if (NeedInvert)
1235 TmpReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
1236 auto And =
1237 MIB.buildInstr(RISCV::AND, {TmpReg}, {Cmp1.getReg(0), Cmp2.getReg(0)});
1238 if (!And.constrainAllUses(TII, TRI, RBI))
1239 return false;
1240 } else
1241 llvm_unreachable("Unhandled predicate");
1242
1243 // Emit an XORI to invert the result if needed.
1244 if (NeedInvert) {
1245 auto Xor = MIB.buildInstr(RISCV::XORI, {DstReg}, {TmpReg}).addImm(1);
1246 if (!Xor.constrainAllUses(TII, TRI, RBI))
1247 return false;
1248 }
1249
1250 MI.eraseFromParent();
1251 return true;
1252}
1253
1254void RISCVInstructionSelector::emitFence(AtomicOrdering FenceOrdering,
1255 SyncScope::ID FenceSSID,
1256 MachineIRBuilder &MIB) const {
1257 if (STI.hasStdExtZtso()) {
1258 // The only fence that needs an instruction is a sequentially-consistent
1259 // cross-thread fence.
1260 if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
1261 FenceSSID == SyncScope::System) {
1262 // fence rw, rw
1263 MIB.buildInstr(RISCV::FENCE, {}, {})
1264 .addImm(RISCVFenceField::R | RISCVFenceField::W)
1265 .addImm(RISCVFenceField::R | RISCVFenceField::W);
1266 return;
1267 }
1268
1269 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
1270 MIB.buildInstr(Opc: TargetOpcode::MEMBARRIER, DstOps: {}, SrcOps: {});
1271 return;
1272 }
1273
1274 // singlethread fences only synchronize with signal handlers on the same
1275 // thread and thus only need to preserve instruction order, not actually
1276 // enforce memory ordering.
1277 if (FenceSSID == SyncScope::SingleThread) {
1278 MIB.buildInstr(Opc: TargetOpcode::MEMBARRIER, DstOps: {}, SrcOps: {});
1279 return;
1280 }
1281
1282 // Refer to Table A.6 in the version 2.3 draft of the RISC-V Instruction Set
1283 // Manual: Volume I.
1284 unsigned Pred, Succ;
1285 switch (FenceOrdering) {
1286 default:
1287 llvm_unreachable("Unexpected ordering");
1288 case AtomicOrdering::AcquireRelease:
1289 // fence acq_rel -> fence.tso
1290 MIB.buildInstr(RISCV::FENCE_TSO, {}, {});
1291 return;
1292 case AtomicOrdering::Acquire:
1293 // fence acquire -> fence r, rw
1294 Pred = RISCVFenceField::R;
1295 Succ = RISCVFenceField::R | RISCVFenceField::W;
1296 break;
1297 case AtomicOrdering::Release:
1298 // fence release -> fence rw, w
1299 Pred = RISCVFenceField::R | RISCVFenceField::W;
1300 Succ = RISCVFenceField::W;
1301 break;
1302 case AtomicOrdering::SequentiallyConsistent:
1303 // fence seq_cst -> fence rw, rw
1304 Pred = RISCVFenceField::R | RISCVFenceField::W;
1305 Succ = RISCVFenceField::R | RISCVFenceField::W;
1306 break;
1307 }
1308 MIB.buildInstr(RISCV::FENCE, {}, {}).addImm(Pred).addImm(Succ);
1309}
1310
1311namespace llvm {
1312InstructionSelector *
1313createRISCVInstructionSelector(const RISCVTargetMachine &TM,
1314 RISCVSubtarget &Subtarget,
1315 RISCVRegisterBankInfo &RBI) {
1316 return new RISCVInstructionSelector(TM, Subtarget, RBI);
1317}
1318} // end namespace llvm
1319

source code of llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp