1//===-- TargetInstrInfo.cpp - Target Instruction Information --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the TargetInstrInfo class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/CodeGen/TargetInstrInfo.h"
14#include "llvm/ADT/StringExtras.h"
15#include "llvm/BinaryFormat/Dwarf.h"
16#include "llvm/CodeGen/MachineCombinerPattern.h"
17#include "llvm/CodeGen/MachineFrameInfo.h"
18#include "llvm/CodeGen/MachineInstrBuilder.h"
19#include "llvm/CodeGen/MachineMemOperand.h"
20#include "llvm/CodeGen/MachineRegisterInfo.h"
21#include "llvm/CodeGen/MachineScheduler.h"
22#include "llvm/CodeGen/MachineTraceMetrics.h"
23#include "llvm/CodeGen/PseudoSourceValue.h"
24#include "llvm/CodeGen/ScoreboardHazardRecognizer.h"
25#include "llvm/CodeGen/StackMaps.h"
26#include "llvm/CodeGen/TargetFrameLowering.h"
27#include "llvm/CodeGen/TargetLowering.h"
28#include "llvm/CodeGen/TargetRegisterInfo.h"
29#include "llvm/CodeGen/TargetSchedule.h"
30#include "llvm/IR/DataLayout.h"
31#include "llvm/IR/DebugInfoMetadata.h"
32#include "llvm/MC/MCAsmInfo.h"
33#include "llvm/MC/MCInstrItineraries.h"
34#include "llvm/Support/CommandLine.h"
35#include "llvm/Support/ErrorHandling.h"
36#include "llvm/Support/raw_ostream.h"
37#include "llvm/Target/TargetMachine.h"
38
39using namespace llvm;
40
41static cl::opt<bool> DisableHazardRecognizer(
42 "disable-sched-hazard", cl::Hidden, cl::init(Val: false),
43 cl::desc("Disable hazard detection during preRA scheduling"));
44
45TargetInstrInfo::~TargetInstrInfo() = default;
46
47const TargetRegisterClass*
48TargetInstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
49 const TargetRegisterInfo *TRI,
50 const MachineFunction &MF) const {
51 if (OpNum >= MCID.getNumOperands())
52 return nullptr;
53
54 short RegClass = MCID.operands()[OpNum].RegClass;
55 if (MCID.operands()[OpNum].isLookupPtrRegClass())
56 return TRI->getPointerRegClass(MF, Kind: RegClass);
57
58 // Instructions like INSERT_SUBREG do not have fixed register classes.
59 if (RegClass < 0)
60 return nullptr;
61
62 // Otherwise just look it up normally.
63 return TRI->getRegClass(i: RegClass);
64}
65
66/// insertNoop - Insert a noop into the instruction stream at the specified
67/// point.
68void TargetInstrInfo::insertNoop(MachineBasicBlock &MBB,
69 MachineBasicBlock::iterator MI) const {
70 llvm_unreachable("Target didn't implement insertNoop!");
71}
72
73/// insertNoops - Insert noops into the instruction stream at the specified
74/// point.
75void TargetInstrInfo::insertNoops(MachineBasicBlock &MBB,
76 MachineBasicBlock::iterator MI,
77 unsigned Quantity) const {
78 for (unsigned i = 0; i < Quantity; ++i)
79 insertNoop(MBB, MI);
80}
81
82static bool isAsmComment(const char *Str, const MCAsmInfo &MAI) {
83 return strncmp(s1: Str, s2: MAI.getCommentString().data(),
84 n: MAI.getCommentString().size()) == 0;
85}
86
87/// Measure the specified inline asm to determine an approximation of its
88/// length.
89/// Comments (which run till the next SeparatorString or newline) do not
90/// count as an instruction.
91/// Any other non-whitespace text is considered an instruction, with
92/// multiple instructions separated by SeparatorString or newlines.
93/// Variable-length instructions are not handled here; this function
94/// may be overloaded in the target code to do that.
95/// We implement a special case of the .space directive which takes only a
96/// single integer argument in base 10 that is the size in bytes. This is a
97/// restricted form of the GAS directive in that we only interpret
98/// simple--i.e. not a logical or arithmetic expression--size values without
99/// the optional fill value. This is primarily used for creating arbitrary
100/// sized inline asm blocks for testing purposes.
101unsigned TargetInstrInfo::getInlineAsmLength(
102 const char *Str,
103 const MCAsmInfo &MAI, const TargetSubtargetInfo *STI) const {
104 // Count the number of instructions in the asm.
105 bool AtInsnStart = true;
106 unsigned Length = 0;
107 const unsigned MaxInstLength = MAI.getMaxInstLength(STI);
108 for (; *Str; ++Str) {
109 if (*Str == '\n' || strncmp(s1: Str, s2: MAI.getSeparatorString(),
110 n: strlen(s: MAI.getSeparatorString())) == 0) {
111 AtInsnStart = true;
112 } else if (isAsmComment(Str, MAI)) {
113 // Stop counting as an instruction after a comment until the next
114 // separator.
115 AtInsnStart = false;
116 }
117
118 if (AtInsnStart && !isSpace(C: static_cast<unsigned char>(*Str))) {
119 unsigned AddLength = MaxInstLength;
120 if (strncmp(s1: Str, s2: ".space", n: 6) == 0) {
121 char *EStr;
122 int SpaceSize;
123 SpaceSize = strtol(nptr: Str + 6, endptr: &EStr, base: 10);
124 SpaceSize = SpaceSize < 0 ? 0 : SpaceSize;
125 while (*EStr != '\n' && isSpace(C: static_cast<unsigned char>(*EStr)))
126 ++EStr;
127 if (*EStr == '\0' || *EStr == '\n' ||
128 isAsmComment(Str: EStr, MAI)) // Successfully parsed .space argument
129 AddLength = SpaceSize;
130 }
131 Length += AddLength;
132 AtInsnStart = false;
133 }
134 }
135
136 return Length;
137}
138
139/// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
140/// after it, replacing it with an unconditional branch to NewDest.
141void
142TargetInstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
143 MachineBasicBlock *NewDest) const {
144 MachineBasicBlock *MBB = Tail->getParent();
145
146 // Remove all the old successors of MBB from the CFG.
147 while (!MBB->succ_empty())
148 MBB->removeSuccessor(I: MBB->succ_begin());
149
150 // Save off the debug loc before erasing the instruction.
151 DebugLoc DL = Tail->getDebugLoc();
152
153 // Update call site info and remove all the dead instructions
154 // from the end of MBB.
155 while (Tail != MBB->end()) {
156 auto MI = Tail++;
157 if (MI->shouldUpdateCallSiteInfo())
158 MBB->getParent()->eraseCallSiteInfo(MI: &*MI);
159 MBB->erase(I: MI);
160 }
161
162 // If MBB isn't immediately before MBB, insert a branch to it.
163 if (++MachineFunction::iterator(MBB) != MachineFunction::iterator(NewDest))
164 insertBranch(MBB&: *MBB, TBB: NewDest, FBB: nullptr, Cond: SmallVector<MachineOperand, 0>(), DL);
165 MBB->addSuccessor(Succ: NewDest);
166}
167
168MachineInstr *TargetInstrInfo::commuteInstructionImpl(MachineInstr &MI,
169 bool NewMI, unsigned Idx1,
170 unsigned Idx2) const {
171 const MCInstrDesc &MCID = MI.getDesc();
172 bool HasDef = MCID.getNumDefs();
173 if (HasDef && !MI.getOperand(i: 0).isReg())
174 // No idea how to commute this instruction. Target should implement its own.
175 return nullptr;
176
177 unsigned CommutableOpIdx1 = Idx1; (void)CommutableOpIdx1;
178 unsigned CommutableOpIdx2 = Idx2; (void)CommutableOpIdx2;
179 assert(findCommutedOpIndices(MI, CommutableOpIdx1, CommutableOpIdx2) &&
180 CommutableOpIdx1 == Idx1 && CommutableOpIdx2 == Idx2 &&
181 "TargetInstrInfo::CommuteInstructionImpl(): not commutable operands.");
182 assert(MI.getOperand(Idx1).isReg() && MI.getOperand(Idx2).isReg() &&
183 "This only knows how to commute register operands so far");
184
185 Register Reg0 = HasDef ? MI.getOperand(i: 0).getReg() : Register();
186 Register Reg1 = MI.getOperand(i: Idx1).getReg();
187 Register Reg2 = MI.getOperand(i: Idx2).getReg();
188 unsigned SubReg0 = HasDef ? MI.getOperand(i: 0).getSubReg() : 0;
189 unsigned SubReg1 = MI.getOperand(i: Idx1).getSubReg();
190 unsigned SubReg2 = MI.getOperand(i: Idx2).getSubReg();
191 bool Reg1IsKill = MI.getOperand(i: Idx1).isKill();
192 bool Reg2IsKill = MI.getOperand(i: Idx2).isKill();
193 bool Reg1IsUndef = MI.getOperand(i: Idx1).isUndef();
194 bool Reg2IsUndef = MI.getOperand(i: Idx2).isUndef();
195 bool Reg1IsInternal = MI.getOperand(i: Idx1).isInternalRead();
196 bool Reg2IsInternal = MI.getOperand(i: Idx2).isInternalRead();
197 // Avoid calling isRenamable for virtual registers since we assert that
198 // renamable property is only queried/set for physical registers.
199 bool Reg1IsRenamable =
200 Reg1.isPhysical() ? MI.getOperand(i: Idx1).isRenamable() : false;
201 bool Reg2IsRenamable =
202 Reg2.isPhysical() ? MI.getOperand(i: Idx2).isRenamable() : false;
203 // If destination is tied to either of the commuted source register, then
204 // it must be updated.
205 if (HasDef && Reg0 == Reg1 &&
206 MI.getDesc().getOperandConstraint(OpNum: Idx1, Constraint: MCOI::TIED_TO) == 0) {
207 Reg2IsKill = false;
208 Reg0 = Reg2;
209 SubReg0 = SubReg2;
210 } else if (HasDef && Reg0 == Reg2 &&
211 MI.getDesc().getOperandConstraint(OpNum: Idx2, Constraint: MCOI::TIED_TO) == 0) {
212 Reg1IsKill = false;
213 Reg0 = Reg1;
214 SubReg0 = SubReg1;
215 }
216
217 MachineInstr *CommutedMI = nullptr;
218 if (NewMI) {
219 // Create a new instruction.
220 MachineFunction &MF = *MI.getMF();
221 CommutedMI = MF.CloneMachineInstr(Orig: &MI);
222 } else {
223 CommutedMI = &MI;
224 }
225
226 if (HasDef) {
227 CommutedMI->getOperand(i: 0).setReg(Reg0);
228 CommutedMI->getOperand(i: 0).setSubReg(SubReg0);
229 }
230 CommutedMI->getOperand(i: Idx2).setReg(Reg1);
231 CommutedMI->getOperand(i: Idx1).setReg(Reg2);
232 CommutedMI->getOperand(i: Idx2).setSubReg(SubReg1);
233 CommutedMI->getOperand(i: Idx1).setSubReg(SubReg2);
234 CommutedMI->getOperand(i: Idx2).setIsKill(Reg1IsKill);
235 CommutedMI->getOperand(i: Idx1).setIsKill(Reg2IsKill);
236 CommutedMI->getOperand(i: Idx2).setIsUndef(Reg1IsUndef);
237 CommutedMI->getOperand(i: Idx1).setIsUndef(Reg2IsUndef);
238 CommutedMI->getOperand(i: Idx2).setIsInternalRead(Reg1IsInternal);
239 CommutedMI->getOperand(i: Idx1).setIsInternalRead(Reg2IsInternal);
240 // Avoid calling setIsRenamable for virtual registers since we assert that
241 // renamable property is only queried/set for physical registers.
242 if (Reg1.isPhysical())
243 CommutedMI->getOperand(i: Idx2).setIsRenamable(Reg1IsRenamable);
244 if (Reg2.isPhysical())
245 CommutedMI->getOperand(i: Idx1).setIsRenamable(Reg2IsRenamable);
246 return CommutedMI;
247}
248
249MachineInstr *TargetInstrInfo::commuteInstruction(MachineInstr &MI, bool NewMI,
250 unsigned OpIdx1,
251 unsigned OpIdx2) const {
252 // If OpIdx1 or OpIdx2 is not specified, then this method is free to choose
253 // any commutable operand, which is done in findCommutedOpIndices() method
254 // called below.
255 if ((OpIdx1 == CommuteAnyOperandIndex || OpIdx2 == CommuteAnyOperandIndex) &&
256 !findCommutedOpIndices(MI, SrcOpIdx1&: OpIdx1, SrcOpIdx2&: OpIdx2)) {
257 assert(MI.isCommutable() &&
258 "Precondition violation: MI must be commutable.");
259 return nullptr;
260 }
261 return commuteInstructionImpl(MI, NewMI, Idx1: OpIdx1, Idx2: OpIdx2);
262}
263
264bool TargetInstrInfo::fixCommutedOpIndices(unsigned &ResultIdx1,
265 unsigned &ResultIdx2,
266 unsigned CommutableOpIdx1,
267 unsigned CommutableOpIdx2) {
268 if (ResultIdx1 == CommuteAnyOperandIndex &&
269 ResultIdx2 == CommuteAnyOperandIndex) {
270 ResultIdx1 = CommutableOpIdx1;
271 ResultIdx2 = CommutableOpIdx2;
272 } else if (ResultIdx1 == CommuteAnyOperandIndex) {
273 if (ResultIdx2 == CommutableOpIdx1)
274 ResultIdx1 = CommutableOpIdx2;
275 else if (ResultIdx2 == CommutableOpIdx2)
276 ResultIdx1 = CommutableOpIdx1;
277 else
278 return false;
279 } else if (ResultIdx2 == CommuteAnyOperandIndex) {
280 if (ResultIdx1 == CommutableOpIdx1)
281 ResultIdx2 = CommutableOpIdx2;
282 else if (ResultIdx1 == CommutableOpIdx2)
283 ResultIdx2 = CommutableOpIdx1;
284 else
285 return false;
286 } else
287 // Check that the result operand indices match the given commutable
288 // operand indices.
289 return (ResultIdx1 == CommutableOpIdx1 && ResultIdx2 == CommutableOpIdx2) ||
290 (ResultIdx1 == CommutableOpIdx2 && ResultIdx2 == CommutableOpIdx1);
291
292 return true;
293}
294
295bool TargetInstrInfo::findCommutedOpIndices(const MachineInstr &MI,
296 unsigned &SrcOpIdx1,
297 unsigned &SrcOpIdx2) const {
298 assert(!MI.isBundle() &&
299 "TargetInstrInfo::findCommutedOpIndices() can't handle bundles");
300
301 const MCInstrDesc &MCID = MI.getDesc();
302 if (!MCID.isCommutable())
303 return false;
304
305 // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
306 // is not true, then the target must implement this.
307 unsigned CommutableOpIdx1 = MCID.getNumDefs();
308 unsigned CommutableOpIdx2 = CommutableOpIdx1 + 1;
309 if (!fixCommutedOpIndices(ResultIdx1&: SrcOpIdx1, ResultIdx2&: SrcOpIdx2,
310 CommutableOpIdx1, CommutableOpIdx2))
311 return false;
312
313 if (!MI.getOperand(i: SrcOpIdx1).isReg() || !MI.getOperand(i: SrcOpIdx2).isReg())
314 // No idea.
315 return false;
316 return true;
317}
318
319bool TargetInstrInfo::isUnpredicatedTerminator(const MachineInstr &MI) const {
320 if (!MI.isTerminator()) return false;
321
322 // Conditional branch is a special case.
323 if (MI.isBranch() && !MI.isBarrier())
324 return true;
325 if (!MI.isPredicable())
326 return true;
327 return !isPredicated(MI);
328}
329
330bool TargetInstrInfo::PredicateInstruction(
331 MachineInstr &MI, ArrayRef<MachineOperand> Pred) const {
332 bool MadeChange = false;
333
334 assert(!MI.isBundle() &&
335 "TargetInstrInfo::PredicateInstruction() can't handle bundles");
336
337 const MCInstrDesc &MCID = MI.getDesc();
338 if (!MI.isPredicable())
339 return false;
340
341 for (unsigned j = 0, i = 0, e = MI.getNumOperands(); i != e; ++i) {
342 if (MCID.operands()[i].isPredicate()) {
343 MachineOperand &MO = MI.getOperand(i);
344 if (MO.isReg()) {
345 MO.setReg(Pred[j].getReg());
346 MadeChange = true;
347 } else if (MO.isImm()) {
348 MO.setImm(Pred[j].getImm());
349 MadeChange = true;
350 } else if (MO.isMBB()) {
351 MO.setMBB(Pred[j].getMBB());
352 MadeChange = true;
353 }
354 ++j;
355 }
356 }
357 return MadeChange;
358}
359
360bool TargetInstrInfo::hasLoadFromStackSlot(
361 const MachineInstr &MI,
362 SmallVectorImpl<const MachineMemOperand *> &Accesses) const {
363 size_t StartSize = Accesses.size();
364 for (MachineInstr::mmo_iterator o = MI.memoperands_begin(),
365 oe = MI.memoperands_end();
366 o != oe; ++o) {
367 if ((*o)->isLoad() &&
368 isa_and_nonnull<FixedStackPseudoSourceValue>(Val: (*o)->getPseudoValue()))
369 Accesses.push_back(Elt: *o);
370 }
371 return Accesses.size() != StartSize;
372}
373
374bool TargetInstrInfo::hasStoreToStackSlot(
375 const MachineInstr &MI,
376 SmallVectorImpl<const MachineMemOperand *> &Accesses) const {
377 size_t StartSize = Accesses.size();
378 for (MachineInstr::mmo_iterator o = MI.memoperands_begin(),
379 oe = MI.memoperands_end();
380 o != oe; ++o) {
381 if ((*o)->isStore() &&
382 isa_and_nonnull<FixedStackPseudoSourceValue>(Val: (*o)->getPseudoValue()))
383 Accesses.push_back(Elt: *o);
384 }
385 return Accesses.size() != StartSize;
386}
387
388bool TargetInstrInfo::getStackSlotRange(const TargetRegisterClass *RC,
389 unsigned SubIdx, unsigned &Size,
390 unsigned &Offset,
391 const MachineFunction &MF) const {
392 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
393 if (!SubIdx) {
394 Size = TRI->getSpillSize(RC: *RC);
395 Offset = 0;
396 return true;
397 }
398 unsigned BitSize = TRI->getSubRegIdxSize(Idx: SubIdx);
399 // Convert bit size to byte size.
400 if (BitSize % 8)
401 return false;
402
403 int BitOffset = TRI->getSubRegIdxOffset(Idx: SubIdx);
404 if (BitOffset < 0 || BitOffset % 8)
405 return false;
406
407 Size = BitSize / 8;
408 Offset = (unsigned)BitOffset / 8;
409
410 assert(TRI->getSpillSize(*RC) >= (Offset + Size) && "bad subregister range");
411
412 if (!MF.getDataLayout().isLittleEndian()) {
413 Offset = TRI->getSpillSize(RC: *RC) - (Offset + Size);
414 }
415 return true;
416}
417
418void TargetInstrInfo::reMaterialize(MachineBasicBlock &MBB,
419 MachineBasicBlock::iterator I,
420 Register DestReg, unsigned SubIdx,
421 const MachineInstr &Orig,
422 const TargetRegisterInfo &TRI) const {
423 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig: &Orig);
424 MI->substituteRegister(FromReg: MI->getOperand(i: 0).getReg(), ToReg: DestReg, SubIdx, RegInfo: TRI);
425 MBB.insert(I, MI);
426}
427
428bool TargetInstrInfo::produceSameValue(const MachineInstr &MI0,
429 const MachineInstr &MI1,
430 const MachineRegisterInfo *MRI) const {
431 return MI0.isIdenticalTo(Other: MI1, Check: MachineInstr::IgnoreVRegDefs);
432}
433
434MachineInstr &
435TargetInstrInfo::duplicate(MachineBasicBlock &MBB,
436 MachineBasicBlock::iterator InsertBefore,
437 const MachineInstr &Orig) const {
438 MachineFunction &MF = *MBB.getParent();
439 // CFI instructions are marked as non-duplicable, because Darwin compact
440 // unwind info emission can't handle multiple prologue setups.
441 assert((!Orig.isNotDuplicable() ||
442 (!MF.getTarget().getTargetTriple().isOSDarwin() &&
443 Orig.isCFIInstruction())) &&
444 "Instruction cannot be duplicated");
445
446 return MF.cloneMachineInstrBundle(MBB, InsertBefore, Orig);
447}
448
449// If the COPY instruction in MI can be folded to a stack operation, return
450// the register class to use.
451static const TargetRegisterClass *canFoldCopy(const MachineInstr &MI,
452 const TargetInstrInfo &TII,
453 unsigned FoldIdx) {
454 assert(TII.isCopyInstr(MI) && "MI must be a COPY instruction");
455 if (MI.getNumOperands() != 2)
456 return nullptr;
457 assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand");
458
459 const MachineOperand &FoldOp = MI.getOperand(i: FoldIdx);
460 const MachineOperand &LiveOp = MI.getOperand(i: 1 - FoldIdx);
461
462 if (FoldOp.getSubReg() || LiveOp.getSubReg())
463 return nullptr;
464
465 Register FoldReg = FoldOp.getReg();
466 Register LiveReg = LiveOp.getReg();
467
468 assert(FoldReg.isVirtual() && "Cannot fold physregs");
469
470 const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
471 const TargetRegisterClass *RC = MRI.getRegClass(Reg: FoldReg);
472
473 if (LiveOp.getReg().isPhysical())
474 return RC->contains(Reg: LiveOp.getReg()) ? RC : nullptr;
475
476 if (RC->hasSubClassEq(RC: MRI.getRegClass(Reg: LiveReg)))
477 return RC;
478
479 // FIXME: Allow folding when register classes are memory compatible.
480 return nullptr;
481}
482
483MCInst TargetInstrInfo::getNop() const { llvm_unreachable("Not implemented"); }
484
485std::pair<unsigned, unsigned>
486TargetInstrInfo::getPatchpointUnfoldableRange(const MachineInstr &MI) const {
487 switch (MI.getOpcode()) {
488 case TargetOpcode::STACKMAP:
489 // StackMapLiveValues are foldable
490 return std::make_pair(x: 0, y: StackMapOpers(&MI).getVarIdx());
491 case TargetOpcode::PATCHPOINT:
492 // For PatchPoint, the call args are not foldable (even if reported in the
493 // stackmap e.g. via anyregcc).
494 return std::make_pair(x: 0, y: PatchPointOpers(&MI).getVarIdx());
495 case TargetOpcode::STATEPOINT:
496 // For statepoints, fold deopt and gc arguments, but not call arguments.
497 return std::make_pair(x: MI.getNumDefs(), y: StatepointOpers(&MI).getVarIdx());
498 default:
499 llvm_unreachable("unexpected stackmap opcode");
500 }
501}
502
503static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr &MI,
504 ArrayRef<unsigned> Ops, int FrameIndex,
505 const TargetInstrInfo &TII) {
506 unsigned StartIdx = 0;
507 unsigned NumDefs = 0;
508 // getPatchpointUnfoldableRange throws guarantee if MI is not a patchpoint.
509 std::tie(args&: NumDefs, args&: StartIdx) = TII.getPatchpointUnfoldableRange(MI);
510
511 unsigned DefToFoldIdx = MI.getNumOperands();
512
513 // Return false if any operands requested for folding are not foldable (not
514 // part of the stackmap's live values).
515 for (unsigned Op : Ops) {
516 if (Op < NumDefs) {
517 assert(DefToFoldIdx == MI.getNumOperands() && "Folding multiple defs");
518 DefToFoldIdx = Op;
519 } else if (Op < StartIdx) {
520 return nullptr;
521 }
522 if (MI.getOperand(i: Op).isTied())
523 return nullptr;
524 }
525
526 MachineInstr *NewMI =
527 MF.CreateMachineInstr(MCID: TII.get(Opcode: MI.getOpcode()), DL: MI.getDebugLoc(), NoImplicit: true);
528 MachineInstrBuilder MIB(MF, NewMI);
529
530 // No need to fold return, the meta data, and function arguments
531 for (unsigned i = 0; i < StartIdx; ++i)
532 if (i != DefToFoldIdx)
533 MIB.add(MO: MI.getOperand(i));
534
535 for (unsigned i = StartIdx, e = MI.getNumOperands(); i < e; ++i) {
536 MachineOperand &MO = MI.getOperand(i);
537 unsigned TiedTo = e;
538 (void)MI.isRegTiedToDefOperand(UseOpIdx: i, DefOpIdx: &TiedTo);
539
540 if (is_contained(Range&: Ops, Element: i)) {
541 assert(TiedTo == e && "Cannot fold tied operands");
542 unsigned SpillSize;
543 unsigned SpillOffset;
544 // Compute the spill slot size and offset.
545 const TargetRegisterClass *RC =
546 MF.getRegInfo().getRegClass(Reg: MO.getReg());
547 bool Valid =
548 TII.getStackSlotRange(RC, SubIdx: MO.getSubReg(), Size&: SpillSize, Offset&: SpillOffset, MF);
549 if (!Valid)
550 report_fatal_error(reason: "cannot spill patchpoint subregister operand");
551 MIB.addImm(Val: StackMaps::IndirectMemRefOp);
552 MIB.addImm(Val: SpillSize);
553 MIB.addFrameIndex(Idx: FrameIndex);
554 MIB.addImm(Val: SpillOffset);
555 } else {
556 MIB.add(MO);
557 if (TiedTo < e) {
558 assert(TiedTo < NumDefs && "Bad tied operand");
559 if (TiedTo > DefToFoldIdx)
560 --TiedTo;
561 NewMI->tieOperands(DefIdx: TiedTo, UseIdx: NewMI->getNumOperands() - 1);
562 }
563 }
564 }
565 return NewMI;
566}
567
568static void foldInlineAsmMemOperand(MachineInstr *MI, unsigned OpNo, int FI,
569 const TargetInstrInfo &TII) {
570 // If the machine operand is tied, untie it first.
571 if (MI->getOperand(i: OpNo).isTied()) {
572 unsigned TiedTo = MI->findTiedOperandIdx(OpIdx: OpNo);
573 MI->untieRegOperand(OpIdx: OpNo);
574 // Intentional recursion!
575 foldInlineAsmMemOperand(MI, OpNo: TiedTo, FI, TII);
576 }
577
578 SmallVector<MachineOperand, 5> NewOps;
579 TII.getFrameIndexOperands(Ops&: NewOps, FI);
580 assert(!NewOps.empty() && "getFrameIndexOperands didn't create any operands");
581 MI->removeOperand(OpNo);
582 MI->insert(InsertBefore: MI->operands_begin() + OpNo, Ops: NewOps);
583
584 // Change the previous operand to a MemKind InlineAsm::Flag. The second param
585 // is the per-target number of operands that represent the memory operand
586 // excluding this one (MD). This includes MO.
587 InlineAsm::Flag F(InlineAsm::Kind::Mem, NewOps.size());
588 F.setMemConstraint(InlineAsm::ConstraintCode::m);
589 MachineOperand &MD = MI->getOperand(i: OpNo - 1);
590 MD.setImm(F);
591}
592
593// Returns nullptr if not possible to fold.
594static MachineInstr *foldInlineAsmMemOperand(MachineInstr &MI,
595 ArrayRef<unsigned> Ops, int FI,
596 const TargetInstrInfo &TII) {
597 assert(MI.isInlineAsm() && "wrong opcode");
598 if (Ops.size() > 1)
599 return nullptr;
600 unsigned Op = Ops[0];
601 assert(Op && "should never be first operand");
602 assert(MI.getOperand(Op).isReg() && "shouldn't be folding non-reg operands");
603
604 if (!MI.mayFoldInlineAsmRegOp(OpId: Op))
605 return nullptr;
606
607 MachineInstr &NewMI = TII.duplicate(MBB&: *MI.getParent(), InsertBefore: MI.getIterator(), Orig: MI);
608
609 foldInlineAsmMemOperand(MI: &NewMI, OpNo: Op, FI, TII);
610
611 // Update mayload/maystore metadata, and memoperands.
612 const VirtRegInfo &RI =
613 AnalyzeVirtRegInBundle(MI, Reg: MI.getOperand(i: Op).getReg());
614 MachineOperand &ExtraMO = NewMI.getOperand(i: InlineAsm::MIOp_ExtraInfo);
615 MachineMemOperand::Flags Flags = MachineMemOperand::MONone;
616 if (RI.Reads) {
617 ExtraMO.setImm(ExtraMO.getImm() | InlineAsm::Extra_MayLoad);
618 Flags |= MachineMemOperand::MOLoad;
619 }
620 if (RI.Writes) {
621 ExtraMO.setImm(ExtraMO.getImm() | InlineAsm::Extra_MayStore);
622 Flags |= MachineMemOperand::MOStore;
623 }
624 MachineFunction *MF = NewMI.getMF();
625 const MachineFrameInfo &MFI = MF->getFrameInfo();
626 MachineMemOperand *MMO = MF->getMachineMemOperand(
627 PtrInfo: MachinePointerInfo::getFixedStack(MF&: *MF, FI), f: Flags, s: MFI.getObjectSize(ObjectIdx: FI),
628 base_alignment: MFI.getObjectAlign(ObjectIdx: FI));
629 NewMI.addMemOperand(MF&: *MF, MO: MMO);
630
631 return &NewMI;
632}
633
634MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
635 ArrayRef<unsigned> Ops, int FI,
636 LiveIntervals *LIS,
637 VirtRegMap *VRM) const {
638 auto Flags = MachineMemOperand::MONone;
639 for (unsigned OpIdx : Ops)
640 Flags |= MI.getOperand(i: OpIdx).isDef() ? MachineMemOperand::MOStore
641 : MachineMemOperand::MOLoad;
642
643 MachineBasicBlock *MBB = MI.getParent();
644 assert(MBB && "foldMemoryOperand needs an inserted instruction");
645 MachineFunction &MF = *MBB->getParent();
646
647 // If we're not folding a load into a subreg, the size of the load is the
648 // size of the spill slot. But if we are, we need to figure out what the
649 // actual load size is.
650 int64_t MemSize = 0;
651 const MachineFrameInfo &MFI = MF.getFrameInfo();
652 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
653
654 if (Flags & MachineMemOperand::MOStore) {
655 MemSize = MFI.getObjectSize(ObjectIdx: FI);
656 } else {
657 for (unsigned OpIdx : Ops) {
658 int64_t OpSize = MFI.getObjectSize(ObjectIdx: FI);
659
660 if (auto SubReg = MI.getOperand(i: OpIdx).getSubReg()) {
661 unsigned SubRegSize = TRI->getSubRegIdxSize(Idx: SubReg);
662 if (SubRegSize > 0 && !(SubRegSize % 8))
663 OpSize = SubRegSize / 8;
664 }
665
666 MemSize = std::max(a: MemSize, b: OpSize);
667 }
668 }
669
670 assert(MemSize && "Did not expect a zero-sized stack slot");
671
672 MachineInstr *NewMI = nullptr;
673
674 if (MI.getOpcode() == TargetOpcode::STACKMAP ||
675 MI.getOpcode() == TargetOpcode::PATCHPOINT ||
676 MI.getOpcode() == TargetOpcode::STATEPOINT) {
677 // Fold stackmap/patchpoint.
678 NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex: FI, TII: *this);
679 if (NewMI)
680 MBB->insert(I: MI, MI: NewMI);
681 } else if (MI.isInlineAsm()) {
682 return foldInlineAsmMemOperand(MI, Ops, FI, TII: *this);
683 } else {
684 // Ask the target to do the actual folding.
685 NewMI = foldMemoryOperandImpl(MF, MI, Ops, InsertPt: MI, FrameIndex: FI, LIS, VRM);
686 }
687
688 if (NewMI) {
689 NewMI->setMemRefs(MF, MemRefs: MI.memoperands());
690 // Add a memory operand, foldMemoryOperandImpl doesn't do that.
691 assert((!(Flags & MachineMemOperand::MOStore) ||
692 NewMI->mayStore()) &&
693 "Folded a def to a non-store!");
694 assert((!(Flags & MachineMemOperand::MOLoad) ||
695 NewMI->mayLoad()) &&
696 "Folded a use to a non-load!");
697 assert(MFI.getObjectOffset(FI) != -1);
698 MachineMemOperand *MMO =
699 MF.getMachineMemOperand(PtrInfo: MachinePointerInfo::getFixedStack(MF, FI),
700 f: Flags, s: MemSize, base_alignment: MFI.getObjectAlign(ObjectIdx: FI));
701 NewMI->addMemOperand(MF, MO: MMO);
702
703 // The pass "x86 speculative load hardening" always attaches symbols to
704 // call instructions. We need copy it form old instruction.
705 NewMI->cloneInstrSymbols(MF, MI);
706
707 return NewMI;
708 }
709
710 // Straight COPY may fold as load/store.
711 if (!isCopyInstr(MI) || Ops.size() != 1)
712 return nullptr;
713
714 const TargetRegisterClass *RC = canFoldCopy(MI, TII: *this, FoldIdx: Ops[0]);
715 if (!RC)
716 return nullptr;
717
718 const MachineOperand &MO = MI.getOperand(i: 1 - Ops[0]);
719 MachineBasicBlock::iterator Pos = MI;
720
721 if (Flags == MachineMemOperand::MOStore)
722 storeRegToStackSlot(MBB&: *MBB, MI: Pos, SrcReg: MO.getReg(), isKill: MO.isKill(), FrameIndex: FI, RC, TRI,
723 VReg: Register());
724 else
725 loadRegFromStackSlot(MBB&: *MBB, MI: Pos, DestReg: MO.getReg(), FrameIndex: FI, RC, TRI, VReg: Register());
726 return &*--Pos;
727}
728
729MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
730 ArrayRef<unsigned> Ops,
731 MachineInstr &LoadMI,
732 LiveIntervals *LIS) const {
733 assert(LoadMI.canFoldAsLoad() && "LoadMI isn't foldable!");
734#ifndef NDEBUG
735 for (unsigned OpIdx : Ops)
736 assert(MI.getOperand(OpIdx).isUse() && "Folding load into def!");
737#endif
738
739 MachineBasicBlock &MBB = *MI.getParent();
740 MachineFunction &MF = *MBB.getParent();
741
742 // Ask the target to do the actual folding.
743 MachineInstr *NewMI = nullptr;
744 int FrameIndex = 0;
745
746 if ((MI.getOpcode() == TargetOpcode::STACKMAP ||
747 MI.getOpcode() == TargetOpcode::PATCHPOINT ||
748 MI.getOpcode() == TargetOpcode::STATEPOINT) &&
749 isLoadFromStackSlot(MI: LoadMI, FrameIndex)) {
750 // Fold stackmap/patchpoint.
751 NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, TII: *this);
752 if (NewMI)
753 NewMI = &*MBB.insert(I: MI, MI: NewMI);
754 } else if (MI.isInlineAsm() && isLoadFromStackSlot(MI: LoadMI, FrameIndex)) {
755 return foldInlineAsmMemOperand(MI, Ops, FI: FrameIndex, TII: *this);
756 } else {
757 // Ask the target to do the actual folding.
758 NewMI = foldMemoryOperandImpl(MF, MI, Ops, InsertPt: MI, LoadMI, LIS);
759 }
760
761 if (!NewMI)
762 return nullptr;
763
764 // Copy the memoperands from the load to the folded instruction.
765 if (MI.memoperands_empty()) {
766 NewMI->setMemRefs(MF, MemRefs: LoadMI.memoperands());
767 } else {
768 // Handle the rare case of folding multiple loads.
769 NewMI->setMemRefs(MF, MemRefs: MI.memoperands());
770 for (MachineInstr::mmo_iterator I = LoadMI.memoperands_begin(),
771 E = LoadMI.memoperands_end();
772 I != E; ++I) {
773 NewMI->addMemOperand(MF, MO: *I);
774 }
775 }
776 return NewMI;
777}
778
779/// transferImplicitOperands - MI is a pseudo-instruction, and the lowered
780/// replacement instructions immediately precede it. Copy any implicit
781/// operands from MI to the replacement instruction.
782static void transferImplicitOperands(MachineInstr *MI,
783 const TargetRegisterInfo *TRI) {
784 MachineBasicBlock::iterator CopyMI = MI;
785 --CopyMI;
786
787 Register DstReg = MI->getOperand(i: 0).getReg();
788 for (const MachineOperand &MO : MI->implicit_operands()) {
789 CopyMI->addOperand(Op: MO);
790
791 // Be conservative about preserving kills when subregister defs are
792 // involved. If there was implicit kill of a super-register overlapping the
793 // copy result, we would kill the subregisters previous copies defined.
794
795 if (MO.isKill() && TRI->regsOverlap(RegA: DstReg, RegB: MO.getReg()))
796 CopyMI->getOperand(i: CopyMI->getNumOperands() - 1).setIsKill(false);
797 }
798}
799
800void TargetInstrInfo::lowerCopy(MachineInstr *MI,
801 const TargetRegisterInfo *TRI) const {
802 if (MI->allDefsAreDead()) {
803 MI->setDesc(get(Opcode: TargetOpcode::KILL));
804 return;
805 }
806
807 MachineOperand &DstMO = MI->getOperand(i: 0);
808 MachineOperand &SrcMO = MI->getOperand(i: 1);
809
810 bool IdentityCopy = (SrcMO.getReg() == DstMO.getReg());
811 if (IdentityCopy || SrcMO.isUndef()) {
812 // No need to insert an identity copy instruction, but replace with a KILL
813 // if liveness is changed.
814 if (SrcMO.isUndef() || MI->getNumOperands() > 2) {
815 // We must make sure the super-register gets killed. Replace the
816 // instruction with KILL.
817 MI->setDesc(get(Opcode: TargetOpcode::KILL));
818 return;
819 }
820 // Vanilla identity copy.
821 MI->eraseFromParent();
822 return;
823 }
824
825 copyPhysReg(MBB&: *MI->getParent(), MI, DL: MI->getDebugLoc(), DestReg: DstMO.getReg(),
826 SrcReg: SrcMO.getReg(), KillSrc: SrcMO.isKill());
827
828 if (MI->getNumOperands() > 2)
829 transferImplicitOperands(MI, TRI);
830 MI->eraseFromParent();
831}
832
833bool TargetInstrInfo::hasReassociableOperands(
834 const MachineInstr &Inst, const MachineBasicBlock *MBB) const {
835 const MachineOperand &Op1 = Inst.getOperand(i: 1);
836 const MachineOperand &Op2 = Inst.getOperand(i: 2);
837 const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
838
839 // We need virtual register definitions for the operands that we will
840 // reassociate.
841 MachineInstr *MI1 = nullptr;
842 MachineInstr *MI2 = nullptr;
843 if (Op1.isReg() && Op1.getReg().isVirtual())
844 MI1 = MRI.getUniqueVRegDef(Reg: Op1.getReg());
845 if (Op2.isReg() && Op2.getReg().isVirtual())
846 MI2 = MRI.getUniqueVRegDef(Reg: Op2.getReg());
847
848 // And at least one operand must be defined in MBB.
849 return MI1 && MI2 && (MI1->getParent() == MBB || MI2->getParent() == MBB);
850}
851
852bool TargetInstrInfo::areOpcodesEqualOrInverse(unsigned Opcode1,
853 unsigned Opcode2) const {
854 return Opcode1 == Opcode2 || getInverseOpcode(Opcode: Opcode1) == Opcode2;
855}
856
857bool TargetInstrInfo::hasReassociableSibling(const MachineInstr &Inst,
858 bool &Commuted) const {
859 const MachineBasicBlock *MBB = Inst.getParent();
860 const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
861 MachineInstr *MI1 = MRI.getUniqueVRegDef(Reg: Inst.getOperand(i: 1).getReg());
862 MachineInstr *MI2 = MRI.getUniqueVRegDef(Reg: Inst.getOperand(i: 2).getReg());
863 unsigned Opcode = Inst.getOpcode();
864
865 // If only one operand has the same or inverse opcode and it's the second
866 // source operand, the operands must be commuted.
867 Commuted = !areOpcodesEqualOrInverse(Opcode1: Opcode, Opcode2: MI1->getOpcode()) &&
868 areOpcodesEqualOrInverse(Opcode1: Opcode, Opcode2: MI2->getOpcode());
869 if (Commuted)
870 std::swap(a&: MI1, b&: MI2);
871
872 // 1. The previous instruction must be the same type as Inst.
873 // 2. The previous instruction must also be associative/commutative or be the
874 // inverse of such an operation (this can be different even for
875 // instructions with the same opcode if traits like fast-math-flags are
876 // included).
877 // 3. The previous instruction must have virtual register definitions for its
878 // operands in the same basic block as Inst.
879 // 4. The previous instruction's result must only be used by Inst.
880 return areOpcodesEqualOrInverse(Opcode1: Opcode, Opcode2: MI1->getOpcode()) &&
881 (isAssociativeAndCommutative(Inst: *MI1) ||
882 isAssociativeAndCommutative(Inst: *MI1, /* Invert */ true)) &&
883 hasReassociableOperands(Inst: *MI1, MBB) &&
884 MRI.hasOneNonDBGUse(RegNo: MI1->getOperand(i: 0).getReg());
885}
886
887// 1. The operation must be associative and commutative or be the inverse of
888// such an operation.
889// 2. The instruction must have virtual register definitions for its
890// operands in the same basic block.
891// 3. The instruction must have a reassociable sibling.
892bool TargetInstrInfo::isReassociationCandidate(const MachineInstr &Inst,
893 bool &Commuted) const {
894 return (isAssociativeAndCommutative(Inst) ||
895 isAssociativeAndCommutative(Inst, /* Invert */ true)) &&
896 hasReassociableOperands(Inst, MBB: Inst.getParent()) &&
897 hasReassociableSibling(Inst, Commuted);
898}
899
900// The concept of the reassociation pass is that these operations can benefit
901// from this kind of transformation:
902//
903// A = ? op ?
904// B = A op X (Prev)
905// C = B op Y (Root)
906// -->
907// A = ? op ?
908// B = X op Y
909// C = A op B
910//
911// breaking the dependency between A and B, allowing them to be executed in
912// parallel (or back-to-back in a pipeline) instead of depending on each other.
913
914// FIXME: This has the potential to be expensive (compile time) while not
915// improving the code at all. Some ways to limit the overhead:
916// 1. Track successful transforms; bail out if hit rate gets too low.
917// 2. Only enable at -O3 or some other non-default optimization level.
918// 3. Pre-screen pattern candidates here: if an operand of the previous
919// instruction is known to not increase the critical path, then don't match
920// that pattern.
921bool TargetInstrInfo::getMachineCombinerPatterns(
922 MachineInstr &Root, SmallVectorImpl<MachineCombinerPattern> &Patterns,
923 bool DoRegPressureReduce) const {
924 bool Commute;
925 if (isReassociationCandidate(Inst: Root, Commuted&: Commute)) {
926 // We found a sequence of instructions that may be suitable for a
927 // reassociation of operands to increase ILP. Specify each commutation
928 // possibility for the Prev instruction in the sequence and let the
929 // machine combiner decide if changing the operands is worthwhile.
930 if (Commute) {
931 Patterns.push_back(Elt: MachineCombinerPattern::REASSOC_AX_YB);
932 Patterns.push_back(Elt: MachineCombinerPattern::REASSOC_XA_YB);
933 } else {
934 Patterns.push_back(Elt: MachineCombinerPattern::REASSOC_AX_BY);
935 Patterns.push_back(Elt: MachineCombinerPattern::REASSOC_XA_BY);
936 }
937 return true;
938 }
939
940 return false;
941}
942
943/// Return true when a code sequence can improve loop throughput.
944bool
945TargetInstrInfo::isThroughputPattern(MachineCombinerPattern Pattern) const {
946 return false;
947}
948
949std::pair<unsigned, unsigned>
950TargetInstrInfo::getReassociationOpcodes(MachineCombinerPattern Pattern,
951 const MachineInstr &Root,
952 const MachineInstr &Prev) const {
953 bool AssocCommutRoot = isAssociativeAndCommutative(Inst: Root);
954 bool AssocCommutPrev = isAssociativeAndCommutative(Inst: Prev);
955
956 // Early exit if both opcodes are associative and commutative. It's a trivial
957 // reassociation when we only change operands order. In this case opcodes are
958 // not required to have inverse versions.
959 if (AssocCommutRoot && AssocCommutPrev) {
960 assert(Root.getOpcode() == Prev.getOpcode() && "Expected to be equal");
961 return std::make_pair(x: Root.getOpcode(), y: Root.getOpcode());
962 }
963
964 // At least one instruction is not associative or commutative.
965 // Since we have matched one of the reassociation patterns, we expect that the
966 // instructions' opcodes are equal or one of them is the inversion of the
967 // other.
968 assert(areOpcodesEqualOrInverse(Root.getOpcode(), Prev.getOpcode()) &&
969 "Incorrectly matched pattern");
970 unsigned AssocCommutOpcode = Root.getOpcode();
971 unsigned InverseOpcode = *getInverseOpcode(Opcode: Root.getOpcode());
972 if (!AssocCommutRoot)
973 std::swap(a&: AssocCommutOpcode, b&: InverseOpcode);
974
975 // The transformation rule (`+` is any associative and commutative binary
976 // operation, `-` is the inverse):
977 // REASSOC_AX_BY:
978 // (A + X) + Y => A + (X + Y)
979 // (A + X) - Y => A + (X - Y)
980 // (A - X) + Y => A - (X - Y)
981 // (A - X) - Y => A - (X + Y)
982 // REASSOC_XA_BY:
983 // (X + A) + Y => (X + Y) + A
984 // (X + A) - Y => (X - Y) + A
985 // (X - A) + Y => (X + Y) - A
986 // (X - A) - Y => (X - Y) - A
987 // REASSOC_AX_YB:
988 // Y + (A + X) => (Y + X) + A
989 // Y - (A + X) => (Y - X) - A
990 // Y + (A - X) => (Y - X) + A
991 // Y - (A - X) => (Y + X) - A
992 // REASSOC_XA_YB:
993 // Y + (X + A) => (Y + X) + A
994 // Y - (X + A) => (Y - X) - A
995 // Y + (X - A) => (Y + X) - A
996 // Y - (X - A) => (Y - X) + A
997 switch (Pattern) {
998 default:
999 llvm_unreachable("Unexpected pattern");
1000 case MachineCombinerPattern::REASSOC_AX_BY:
1001 if (!AssocCommutRoot && AssocCommutPrev)
1002 return {AssocCommutOpcode, InverseOpcode};
1003 if (AssocCommutRoot && !AssocCommutPrev)
1004 return {InverseOpcode, InverseOpcode};
1005 if (!AssocCommutRoot && !AssocCommutPrev)
1006 return {InverseOpcode, AssocCommutOpcode};
1007 break;
1008 case MachineCombinerPattern::REASSOC_XA_BY:
1009 if (!AssocCommutRoot && AssocCommutPrev)
1010 return {AssocCommutOpcode, InverseOpcode};
1011 if (AssocCommutRoot && !AssocCommutPrev)
1012 return {InverseOpcode, AssocCommutOpcode};
1013 if (!AssocCommutRoot && !AssocCommutPrev)
1014 return {InverseOpcode, InverseOpcode};
1015 break;
1016 case MachineCombinerPattern::REASSOC_AX_YB:
1017 if (!AssocCommutRoot && AssocCommutPrev)
1018 return {InverseOpcode, InverseOpcode};
1019 if (AssocCommutRoot && !AssocCommutPrev)
1020 return {AssocCommutOpcode, InverseOpcode};
1021 if (!AssocCommutRoot && !AssocCommutPrev)
1022 return {InverseOpcode, AssocCommutOpcode};
1023 break;
1024 case MachineCombinerPattern::REASSOC_XA_YB:
1025 if (!AssocCommutRoot && AssocCommutPrev)
1026 return {InverseOpcode, InverseOpcode};
1027 if (AssocCommutRoot && !AssocCommutPrev)
1028 return {InverseOpcode, AssocCommutOpcode};
1029 if (!AssocCommutRoot && !AssocCommutPrev)
1030 return {AssocCommutOpcode, InverseOpcode};
1031 break;
1032 }
1033 llvm_unreachable("Unhandled combination");
1034}
1035
1036// Return a pair of boolean flags showing if the new root and new prev operands
1037// must be swapped. See visual example of the rule in
1038// TargetInstrInfo::getReassociationOpcodes.
1039static std::pair<bool, bool> mustSwapOperands(MachineCombinerPattern Pattern) {
1040 switch (Pattern) {
1041 default:
1042 llvm_unreachable("Unexpected pattern");
1043 case MachineCombinerPattern::REASSOC_AX_BY:
1044 return {false, false};
1045 case MachineCombinerPattern::REASSOC_XA_BY:
1046 return {true, false};
1047 case MachineCombinerPattern::REASSOC_AX_YB:
1048 return {true, true};
1049 case MachineCombinerPattern::REASSOC_XA_YB:
1050 return {true, true};
1051 }
1052}
1053
1054/// Attempt the reassociation transformation to reduce critical path length.
1055/// See the above comments before getMachineCombinerPatterns().
1056void TargetInstrInfo::reassociateOps(
1057 MachineInstr &Root, MachineInstr &Prev,
1058 MachineCombinerPattern Pattern,
1059 SmallVectorImpl<MachineInstr *> &InsInstrs,
1060 SmallVectorImpl<MachineInstr *> &DelInstrs,
1061 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
1062 MachineFunction *MF = Root.getMF();
1063 MachineRegisterInfo &MRI = MF->getRegInfo();
1064 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
1065 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
1066 const TargetRegisterClass *RC = Root.getRegClassConstraint(OpIdx: 0, TII, TRI);
1067
1068 // This array encodes the operand index for each parameter because the
1069 // operands may be commuted. Each row corresponds to a pattern value,
1070 // and each column specifies the index of A, B, X, Y.
1071 unsigned OpIdx[4][4] = {
1072 { 1, 1, 2, 2 },
1073 { 1, 2, 2, 1 },
1074 { 2, 1, 1, 2 },
1075 { 2, 2, 1, 1 }
1076 };
1077
1078 int Row;
1079 switch (Pattern) {
1080 case MachineCombinerPattern::REASSOC_AX_BY: Row = 0; break;
1081 case MachineCombinerPattern::REASSOC_AX_YB: Row = 1; break;
1082 case MachineCombinerPattern::REASSOC_XA_BY: Row = 2; break;
1083 case MachineCombinerPattern::REASSOC_XA_YB: Row = 3; break;
1084 default: llvm_unreachable("unexpected MachineCombinerPattern");
1085 }
1086
1087 MachineOperand &OpA = Prev.getOperand(i: OpIdx[Row][0]);
1088 MachineOperand &OpB = Root.getOperand(i: OpIdx[Row][1]);
1089 MachineOperand &OpX = Prev.getOperand(i: OpIdx[Row][2]);
1090 MachineOperand &OpY = Root.getOperand(i: OpIdx[Row][3]);
1091 MachineOperand &OpC = Root.getOperand(i: 0);
1092
1093 Register RegA = OpA.getReg();
1094 Register RegB = OpB.getReg();
1095 Register RegX = OpX.getReg();
1096 Register RegY = OpY.getReg();
1097 Register RegC = OpC.getReg();
1098
1099 if (RegA.isVirtual())
1100 MRI.constrainRegClass(Reg: RegA, RC);
1101 if (RegB.isVirtual())
1102 MRI.constrainRegClass(Reg: RegB, RC);
1103 if (RegX.isVirtual())
1104 MRI.constrainRegClass(Reg: RegX, RC);
1105 if (RegY.isVirtual())
1106 MRI.constrainRegClass(Reg: RegY, RC);
1107 if (RegC.isVirtual())
1108 MRI.constrainRegClass(Reg: RegC, RC);
1109
1110 // Create a new virtual register for the result of (X op Y) instead of
1111 // recycling RegB because the MachineCombiner's computation of the critical
1112 // path requires a new register definition rather than an existing one.
1113 Register NewVR = MRI.createVirtualRegister(RegClass: RC);
1114 InstrIdxForVirtReg.insert(KV: std::make_pair(x&: NewVR, y: 0));
1115
1116 auto [NewRootOpc, NewPrevOpc] = getReassociationOpcodes(Pattern, Root, Prev);
1117 bool KillA = OpA.isKill();
1118 bool KillX = OpX.isKill();
1119 bool KillY = OpY.isKill();
1120 bool KillNewVR = true;
1121
1122 auto [SwapRootOperands, SwapPrevOperands] = mustSwapOperands(Pattern);
1123
1124 if (SwapPrevOperands) {
1125 std::swap(a&: RegX, b&: RegY);
1126 std::swap(a&: KillX, b&: KillY);
1127 }
1128
1129 // Create new instructions for insertion.
1130 MachineInstrBuilder MIB1 =
1131 BuildMI(MF&: *MF, MIMD: MIMetadata(Prev), MCID: TII->get(Opcode: NewPrevOpc), DestReg: NewVR)
1132 .addReg(RegNo: RegX, flags: getKillRegState(B: KillX))
1133 .addReg(RegNo: RegY, flags: getKillRegState(B: KillY));
1134
1135 if (SwapRootOperands) {
1136 std::swap(a&: RegA, b&: NewVR);
1137 std::swap(a&: KillA, b&: KillNewVR);
1138 }
1139
1140 MachineInstrBuilder MIB2 =
1141 BuildMI(MF&: *MF, MIMD: MIMetadata(Root), MCID: TII->get(Opcode: NewRootOpc), DestReg: RegC)
1142 .addReg(RegNo: RegA, flags: getKillRegState(B: KillA))
1143 .addReg(RegNo: NewVR, flags: getKillRegState(B: KillNewVR));
1144
1145 // Propagate FP flags from the original instructions.
1146 // But clear poison-generating flags because those may not be valid now.
1147 // TODO: There should be a helper function for copying only fast-math-flags.
1148 uint32_t IntersectedFlags = Root.getFlags() & Prev.getFlags();
1149 MIB1->setFlags(IntersectedFlags);
1150 MIB1->clearFlag(Flag: MachineInstr::MIFlag::NoSWrap);
1151 MIB1->clearFlag(Flag: MachineInstr::MIFlag::NoUWrap);
1152 MIB1->clearFlag(Flag: MachineInstr::MIFlag::IsExact);
1153
1154 MIB2->setFlags(IntersectedFlags);
1155 MIB2->clearFlag(Flag: MachineInstr::MIFlag::NoSWrap);
1156 MIB2->clearFlag(Flag: MachineInstr::MIFlag::NoUWrap);
1157 MIB2->clearFlag(Flag: MachineInstr::MIFlag::IsExact);
1158
1159 setSpecialOperandAttr(OldMI1&: Root, OldMI2&: Prev, NewMI1&: *MIB1, NewMI2&: *MIB2);
1160
1161 // Record new instructions for insertion and old instructions for deletion.
1162 InsInstrs.push_back(Elt: MIB1);
1163 InsInstrs.push_back(Elt: MIB2);
1164 DelInstrs.push_back(Elt: &Prev);
1165 DelInstrs.push_back(Elt: &Root);
1166
1167 // We transformed:
1168 // B = A op X (Prev)
1169 // C = B op Y (Root)
1170 // Into:
1171 // B = X op Y (MIB1)
1172 // C = A op B (MIB2)
1173 // C has the same value as before, B doesn't; as such, keep the debug number
1174 // of C but not of B.
1175 if (unsigned OldRootNum = Root.peekDebugInstrNum())
1176 MIB2.getInstr()->setDebugInstrNum(OldRootNum);
1177}
1178
1179void TargetInstrInfo::genAlternativeCodeSequence(
1180 MachineInstr &Root, MachineCombinerPattern Pattern,
1181 SmallVectorImpl<MachineInstr *> &InsInstrs,
1182 SmallVectorImpl<MachineInstr *> &DelInstrs,
1183 DenseMap<unsigned, unsigned> &InstIdxForVirtReg) const {
1184 MachineRegisterInfo &MRI = Root.getMF()->getRegInfo();
1185
1186 // Select the previous instruction in the sequence based on the input pattern.
1187 MachineInstr *Prev = nullptr;
1188 switch (Pattern) {
1189 case MachineCombinerPattern::REASSOC_AX_BY:
1190 case MachineCombinerPattern::REASSOC_XA_BY:
1191 Prev = MRI.getUniqueVRegDef(Reg: Root.getOperand(i: 1).getReg());
1192 break;
1193 case MachineCombinerPattern::REASSOC_AX_YB:
1194 case MachineCombinerPattern::REASSOC_XA_YB:
1195 Prev = MRI.getUniqueVRegDef(Reg: Root.getOperand(i: 2).getReg());
1196 break;
1197 default:
1198 llvm_unreachable("Unknown pattern for machine combiner");
1199 }
1200
1201 // Don't reassociate if Prev and Root are in different blocks.
1202 if (Prev->getParent() != Root.getParent())
1203 return;
1204
1205 reassociateOps(Root, Prev&: *Prev, Pattern, InsInstrs, DelInstrs, InstrIdxForVirtReg&: InstIdxForVirtReg);
1206}
1207
1208MachineTraceStrategy TargetInstrInfo::getMachineCombinerTraceStrategy() const {
1209 return MachineTraceStrategy::TS_MinInstrCount;
1210}
1211
1212bool TargetInstrInfo::isReallyTriviallyReMaterializable(
1213 const MachineInstr &MI) const {
1214 const MachineFunction &MF = *MI.getMF();
1215 const MachineRegisterInfo &MRI = MF.getRegInfo();
1216
1217 // Remat clients assume operand 0 is the defined register.
1218 if (!MI.getNumOperands() || !MI.getOperand(i: 0).isReg())
1219 return false;
1220 Register DefReg = MI.getOperand(i: 0).getReg();
1221
1222 // A sub-register definition can only be rematerialized if the instruction
1223 // doesn't read the other parts of the register. Otherwise it is really a
1224 // read-modify-write operation on the full virtual register which cannot be
1225 // moved safely.
1226 if (DefReg.isVirtual() && MI.getOperand(i: 0).getSubReg() &&
1227 MI.readsVirtualRegister(Reg: DefReg))
1228 return false;
1229
1230 // A load from a fixed stack slot can be rematerialized. This may be
1231 // redundant with subsequent checks, but it's target-independent,
1232 // simple, and a common case.
1233 int FrameIdx = 0;
1234 if (isLoadFromStackSlot(MI, FrameIndex&: FrameIdx) &&
1235 MF.getFrameInfo().isImmutableObjectIndex(ObjectIdx: FrameIdx))
1236 return true;
1237
1238 // Avoid instructions obviously unsafe for remat.
1239 if (MI.isNotDuplicable() || MI.mayStore() || MI.mayRaiseFPException() ||
1240 MI.hasUnmodeledSideEffects())
1241 return false;
1242
1243 // Don't remat inline asm. We have no idea how expensive it is
1244 // even if it's side effect free.
1245 if (MI.isInlineAsm())
1246 return false;
1247
1248 // Avoid instructions which load from potentially varying memory.
1249 if (MI.mayLoad() && !MI.isDereferenceableInvariantLoad())
1250 return false;
1251
1252 // If any of the registers accessed are non-constant, conservatively assume
1253 // the instruction is not rematerializable.
1254 for (const MachineOperand &MO : MI.operands()) {
1255 if (!MO.isReg()) continue;
1256 Register Reg = MO.getReg();
1257 if (Reg == 0)
1258 continue;
1259
1260 // Check for a well-behaved physical register.
1261 if (Reg.isPhysical()) {
1262 if (MO.isUse()) {
1263 // If the physreg has no defs anywhere, it's just an ambient register
1264 // and we can freely move its uses. Alternatively, if it's allocatable,
1265 // it could get allocated to something with a def during allocation.
1266 if (!MRI.isConstantPhysReg(PhysReg: Reg))
1267 return false;
1268 } else {
1269 // A physreg def. We can't remat it.
1270 return false;
1271 }
1272 continue;
1273 }
1274
1275 // Only allow one virtual-register def. There may be multiple defs of the
1276 // same virtual register, though.
1277 if (MO.isDef() && Reg != DefReg)
1278 return false;
1279
1280 // Don't allow any virtual-register uses. Rematting an instruction with
1281 // virtual register uses would length the live ranges of the uses, which
1282 // is not necessarily a good idea, certainly not "trivial".
1283 if (MO.isUse())
1284 return false;
1285 }
1286
1287 // Everything checked out.
1288 return true;
1289}
1290
1291int TargetInstrInfo::getSPAdjust(const MachineInstr &MI) const {
1292 const MachineFunction *MF = MI.getMF();
1293 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
1294 bool StackGrowsDown =
1295 TFI->getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown;
1296
1297 unsigned FrameSetupOpcode = getCallFrameSetupOpcode();
1298 unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode();
1299
1300 if (!isFrameInstr(I: MI))
1301 return 0;
1302
1303 int SPAdj = TFI->alignSPAdjust(SPAdj: getFrameSize(I: MI));
1304
1305 if ((!StackGrowsDown && MI.getOpcode() == FrameSetupOpcode) ||
1306 (StackGrowsDown && MI.getOpcode() == FrameDestroyOpcode))
1307 SPAdj = -SPAdj;
1308
1309 return SPAdj;
1310}
1311
1312/// isSchedulingBoundary - Test if the given instruction should be
1313/// considered a scheduling boundary. This primarily includes labels
1314/// and terminators.
1315bool TargetInstrInfo::isSchedulingBoundary(const MachineInstr &MI,
1316 const MachineBasicBlock *MBB,
1317 const MachineFunction &MF) const {
1318 // Terminators and labels can't be scheduled around.
1319 if (MI.isTerminator() || MI.isPosition())
1320 return true;
1321
1322 // INLINEASM_BR can jump to another block
1323 if (MI.getOpcode() == TargetOpcode::INLINEASM_BR)
1324 return true;
1325
1326 // Don't attempt to schedule around any instruction that defines
1327 // a stack-oriented pointer, as it's unlikely to be profitable. This
1328 // saves compile time, because it doesn't require every single
1329 // stack slot reference to depend on the instruction that does the
1330 // modification.
1331 const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
1332 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
1333 return MI.modifiesRegister(Reg: TLI.getStackPointerRegisterToSaveRestore(), TRI);
1334}
1335
1336// Provide a global flag for disabling the PreRA hazard recognizer that targets
1337// may choose to honor.
1338bool TargetInstrInfo::usePreRAHazardRecognizer() const {
1339 return !DisableHazardRecognizer;
1340}
1341
1342// Default implementation of CreateTargetRAHazardRecognizer.
1343ScheduleHazardRecognizer *TargetInstrInfo::
1344CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI,
1345 const ScheduleDAG *DAG) const {
1346 // Dummy hazard recognizer allows all instructions to issue.
1347 return new ScheduleHazardRecognizer();
1348}
1349
1350// Default implementation of CreateTargetMIHazardRecognizer.
1351ScheduleHazardRecognizer *TargetInstrInfo::CreateTargetMIHazardRecognizer(
1352 const InstrItineraryData *II, const ScheduleDAGMI *DAG) const {
1353 return new ScoreboardHazardRecognizer(II, DAG, "machine-scheduler");
1354}
1355
1356// Default implementation of CreateTargetPostRAHazardRecognizer.
1357ScheduleHazardRecognizer *TargetInstrInfo::
1358CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
1359 const ScheduleDAG *DAG) const {
1360 return new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched");
1361}
1362
1363// Default implementation of getMemOperandWithOffset.
1364bool TargetInstrInfo::getMemOperandWithOffset(
1365 const MachineInstr &MI, const MachineOperand *&BaseOp, int64_t &Offset,
1366 bool &OffsetIsScalable, const TargetRegisterInfo *TRI) const {
1367 SmallVector<const MachineOperand *, 4> BaseOps;
1368 unsigned Width;
1369 if (!getMemOperandsWithOffsetWidth(MI, BaseOps, Offset, OffsetIsScalable,
1370 Width, TRI) ||
1371 BaseOps.size() != 1)
1372 return false;
1373 BaseOp = BaseOps.front();
1374 return true;
1375}
1376
1377//===----------------------------------------------------------------------===//
1378// SelectionDAG latency interface.
1379//===----------------------------------------------------------------------===//
1380
1381std::optional<unsigned>
1382TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
1383 SDNode *DefNode, unsigned DefIdx,
1384 SDNode *UseNode, unsigned UseIdx) const {
1385 if (!ItinData || ItinData->isEmpty())
1386 return std::nullopt;
1387
1388 if (!DefNode->isMachineOpcode())
1389 return std::nullopt;
1390
1391 unsigned DefClass = get(Opcode: DefNode->getMachineOpcode()).getSchedClass();
1392 if (!UseNode->isMachineOpcode())
1393 return ItinData->getOperandCycle(ItinClassIndx: DefClass, OperandIdx: DefIdx);
1394 unsigned UseClass = get(Opcode: UseNode->getMachineOpcode()).getSchedClass();
1395 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1396}
1397
1398unsigned TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
1399 SDNode *N) const {
1400 if (!ItinData || ItinData->isEmpty())
1401 return 1;
1402
1403 if (!N->isMachineOpcode())
1404 return 1;
1405
1406 return ItinData->getStageLatency(ItinClassIndx: get(Opcode: N->getMachineOpcode()).getSchedClass());
1407}
1408
1409//===----------------------------------------------------------------------===//
1410// MachineInstr latency interface.
1411//===----------------------------------------------------------------------===//
1412
1413unsigned TargetInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
1414 const MachineInstr &MI) const {
1415 if (!ItinData || ItinData->isEmpty())
1416 return 1;
1417
1418 unsigned Class = MI.getDesc().getSchedClass();
1419 int UOps = ItinData->Itineraries[Class].NumMicroOps;
1420 if (UOps >= 0)
1421 return UOps;
1422
1423 // The # of u-ops is dynamically determined. The specific target should
1424 // override this function to return the right number.
1425 return 1;
1426}
1427
1428/// Return the default expected latency for a def based on it's opcode.
1429unsigned TargetInstrInfo::defaultDefLatency(const MCSchedModel &SchedModel,
1430 const MachineInstr &DefMI) const {
1431 if (DefMI.isTransient())
1432 return 0;
1433 if (DefMI.mayLoad())
1434 return SchedModel.LoadLatency;
1435 if (isHighLatencyDef(opc: DefMI.getOpcode()))
1436 return SchedModel.HighLatency;
1437 return 1;
1438}
1439
1440unsigned TargetInstrInfo::getPredicationCost(const MachineInstr &) const {
1441 return 0;
1442}
1443
1444unsigned TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
1445 const MachineInstr &MI,
1446 unsigned *PredCost) const {
1447 // Default to one cycle for no itinerary. However, an "empty" itinerary may
1448 // still have a MinLatency property, which getStageLatency checks.
1449 if (!ItinData)
1450 return MI.mayLoad() ? 2 : 1;
1451
1452 return ItinData->getStageLatency(ItinClassIndx: MI.getDesc().getSchedClass());
1453}
1454
1455bool TargetInstrInfo::hasLowDefLatency(const TargetSchedModel &SchedModel,
1456 const MachineInstr &DefMI,
1457 unsigned DefIdx) const {
1458 const InstrItineraryData *ItinData = SchedModel.getInstrItineraries();
1459 if (!ItinData || ItinData->isEmpty())
1460 return false;
1461
1462 unsigned DefClass = DefMI.getDesc().getSchedClass();
1463 std::optional<unsigned> DefCycle =
1464 ItinData->getOperandCycle(ItinClassIndx: DefClass, OperandIdx: DefIdx);
1465 return DefCycle && DefCycle <= 1U;
1466}
1467
1468bool TargetInstrInfo::isFunctionSafeToSplit(const MachineFunction &MF) const {
1469 // TODO: We don't split functions where a section attribute has been set
1470 // since the split part may not be placed in a contiguous region. It may also
1471 // be more beneficial to augment the linker to ensure contiguous layout of
1472 // split functions within the same section as specified by the attribute.
1473 if (MF.getFunction().hasSection() ||
1474 MF.getFunction().hasFnAttribute(Kind: "implicit-section-name"))
1475 return false;
1476
1477 // We don't want to proceed further for cold functions
1478 // or functions of unknown hotness. Lukewarm functions have no prefix.
1479 std::optional<StringRef> SectionPrefix = MF.getFunction().getSectionPrefix();
1480 if (SectionPrefix &&
1481 (*SectionPrefix == "unlikely" || *SectionPrefix == "unknown")) {
1482 return false;
1483 }
1484
1485 return true;
1486}
1487
1488std::optional<ParamLoadedValue>
1489TargetInstrInfo::describeLoadedValue(const MachineInstr &MI,
1490 Register Reg) const {
1491 const MachineFunction *MF = MI.getMF();
1492 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
1493 DIExpression *Expr = DIExpression::get(Context&: MF->getFunction().getContext(), Elements: {});
1494 int64_t Offset;
1495 bool OffsetIsScalable;
1496
1497 // To simplify the sub-register handling, verify that we only need to
1498 // consider physical registers.
1499 assert(MF->getProperties().hasProperty(
1500 MachineFunctionProperties::Property::NoVRegs));
1501
1502 if (auto DestSrc = isCopyInstr(MI)) {
1503 Register DestReg = DestSrc->Destination->getReg();
1504
1505 // If the copy destination is the forwarding reg, describe the forwarding
1506 // reg using the copy source as the backup location. Example:
1507 //
1508 // x0 = MOV x7
1509 // call callee(x0) ; x0 described as x7
1510 if (Reg == DestReg)
1511 return ParamLoadedValue(*DestSrc->Source, Expr);
1512
1513 // If the target's hook couldn't describe this copy, give up.
1514 return std::nullopt;
1515 } else if (auto RegImm = isAddImmediate(MI, Reg)) {
1516 Register SrcReg = RegImm->Reg;
1517 Offset = RegImm->Imm;
1518 Expr = DIExpression::prepend(Expr, Flags: DIExpression::ApplyOffset, Offset);
1519 return ParamLoadedValue(MachineOperand::CreateReg(Reg: SrcReg, isDef: false), Expr);
1520 } else if (MI.hasOneMemOperand()) {
1521 // Only describe memory which provably does not escape the function. As
1522 // described in llvm.org/PR43343, escaped memory may be clobbered by the
1523 // callee (or by another thread).
1524 const auto &TII = MF->getSubtarget().getInstrInfo();
1525 const MachineFrameInfo &MFI = MF->getFrameInfo();
1526 const MachineMemOperand *MMO = MI.memoperands()[0];
1527 const PseudoSourceValue *PSV = MMO->getPseudoValue();
1528
1529 // If the address points to "special" memory (e.g. a spill slot), it's
1530 // sufficient to check that it isn't aliased by any high-level IR value.
1531 if (!PSV || PSV->mayAlias(&MFI))
1532 return std::nullopt;
1533
1534 const MachineOperand *BaseOp;
1535 if (!TII->getMemOperandWithOffset(MI, BaseOp, Offset, OffsetIsScalable,
1536 TRI))
1537 return std::nullopt;
1538
1539 // FIXME: Scalable offsets are not yet handled in the offset code below.
1540 if (OffsetIsScalable)
1541 return std::nullopt;
1542
1543 // TODO: Can currently only handle mem instructions with a single define.
1544 // An example from the x86 target:
1545 // ...
1546 // DIV64m $rsp, 1, $noreg, 24, $noreg, implicit-def dead $rax, implicit-def $rdx
1547 // ...
1548 //
1549 if (MI.getNumExplicitDefs() != 1)
1550 return std::nullopt;
1551
1552 // TODO: In what way do we need to take Reg into consideration here?
1553
1554 SmallVector<uint64_t, 8> Ops;
1555 DIExpression::appendOffset(Ops, Offset);
1556 Ops.push_back(Elt: dwarf::DW_OP_deref_size);
1557 Ops.push_back(Elt: MMO->getSize());
1558 Expr = DIExpression::prependOpcodes(Expr, Ops);
1559 return ParamLoadedValue(*BaseOp, Expr);
1560 }
1561
1562 return std::nullopt;
1563}
1564
1565// Get the call frame size just before MI.
1566unsigned TargetInstrInfo::getCallFrameSizeAt(MachineInstr &MI) const {
1567 // Search backwards from MI for the most recent call frame instruction.
1568 MachineBasicBlock *MBB = MI.getParent();
1569 for (auto &AdjI : reverse(C: make_range(x: MBB->instr_begin(), y: MI.getIterator()))) {
1570 if (AdjI.getOpcode() == getCallFrameSetupOpcode())
1571 return getFrameTotalSize(I: AdjI);
1572 if (AdjI.getOpcode() == getCallFrameDestroyOpcode())
1573 return 0;
1574 }
1575
1576 // If none was found, use the call frame size from the start of the basic
1577 // block.
1578 return MBB->getCallFrameSize();
1579}
1580
1581/// Both DefMI and UseMI must be valid. By default, call directly to the
1582/// itinerary. This may be overriden by the target.
1583std::optional<unsigned> TargetInstrInfo::getOperandLatency(
1584 const InstrItineraryData *ItinData, const MachineInstr &DefMI,
1585 unsigned DefIdx, const MachineInstr &UseMI, unsigned UseIdx) const {
1586 unsigned DefClass = DefMI.getDesc().getSchedClass();
1587 unsigned UseClass = UseMI.getDesc().getSchedClass();
1588 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1589}
1590
1591bool TargetInstrInfo::getRegSequenceInputs(
1592 const MachineInstr &MI, unsigned DefIdx,
1593 SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
1594 assert((MI.isRegSequence() ||
1595 MI.isRegSequenceLike()) && "Instruction do not have the proper type");
1596
1597 if (!MI.isRegSequence())
1598 return getRegSequenceLikeInputs(MI, DefIdx, InputRegs);
1599
1600 // We are looking at:
1601 // Def = REG_SEQUENCE v0, sub0, v1, sub1, ...
1602 assert(DefIdx == 0 && "REG_SEQUENCE only has one def");
1603 for (unsigned OpIdx = 1, EndOpIdx = MI.getNumOperands(); OpIdx != EndOpIdx;
1604 OpIdx += 2) {
1605 const MachineOperand &MOReg = MI.getOperand(i: OpIdx);
1606 if (MOReg.isUndef())
1607 continue;
1608 const MachineOperand &MOSubIdx = MI.getOperand(i: OpIdx + 1);
1609 assert(MOSubIdx.isImm() &&
1610 "One of the subindex of the reg_sequence is not an immediate");
1611 // Record Reg:SubReg, SubIdx.
1612 InputRegs.push_back(Elt: RegSubRegPairAndIdx(MOReg.getReg(), MOReg.getSubReg(),
1613 (unsigned)MOSubIdx.getImm()));
1614 }
1615 return true;
1616}
1617
1618bool TargetInstrInfo::getExtractSubregInputs(
1619 const MachineInstr &MI, unsigned DefIdx,
1620 RegSubRegPairAndIdx &InputReg) const {
1621 assert((MI.isExtractSubreg() ||
1622 MI.isExtractSubregLike()) && "Instruction do not have the proper type");
1623
1624 if (!MI.isExtractSubreg())
1625 return getExtractSubregLikeInputs(MI, DefIdx, InputReg);
1626
1627 // We are looking at:
1628 // Def = EXTRACT_SUBREG v0.sub1, sub0.
1629 assert(DefIdx == 0 && "EXTRACT_SUBREG only has one def");
1630 const MachineOperand &MOReg = MI.getOperand(i: 1);
1631 if (MOReg.isUndef())
1632 return false;
1633 const MachineOperand &MOSubIdx = MI.getOperand(i: 2);
1634 assert(MOSubIdx.isImm() &&
1635 "The subindex of the extract_subreg is not an immediate");
1636
1637 InputReg.Reg = MOReg.getReg();
1638 InputReg.SubReg = MOReg.getSubReg();
1639 InputReg.SubIdx = (unsigned)MOSubIdx.getImm();
1640 return true;
1641}
1642
1643bool TargetInstrInfo::getInsertSubregInputs(
1644 const MachineInstr &MI, unsigned DefIdx,
1645 RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const {
1646 assert((MI.isInsertSubreg() ||
1647 MI.isInsertSubregLike()) && "Instruction do not have the proper type");
1648
1649 if (!MI.isInsertSubreg())
1650 return getInsertSubregLikeInputs(MI, DefIdx, BaseReg, InsertedReg);
1651
1652 // We are looking at:
1653 // Def = INSERT_SEQUENCE v0, v1, sub0.
1654 assert(DefIdx == 0 && "INSERT_SUBREG only has one def");
1655 const MachineOperand &MOBaseReg = MI.getOperand(i: 1);
1656 const MachineOperand &MOInsertedReg = MI.getOperand(i: 2);
1657 if (MOInsertedReg.isUndef())
1658 return false;
1659 const MachineOperand &MOSubIdx = MI.getOperand(i: 3);
1660 assert(MOSubIdx.isImm() &&
1661 "One of the subindex of the reg_sequence is not an immediate");
1662 BaseReg.Reg = MOBaseReg.getReg();
1663 BaseReg.SubReg = MOBaseReg.getSubReg();
1664
1665 InsertedReg.Reg = MOInsertedReg.getReg();
1666 InsertedReg.SubReg = MOInsertedReg.getSubReg();
1667 InsertedReg.SubIdx = (unsigned)MOSubIdx.getImm();
1668 return true;
1669}
1670
1671// Returns a MIRPrinter comment for this machine operand.
1672std::string TargetInstrInfo::createMIROperandComment(
1673 const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx,
1674 const TargetRegisterInfo *TRI) const {
1675
1676 if (!MI.isInlineAsm())
1677 return "";
1678
1679 std::string Flags;
1680 raw_string_ostream OS(Flags);
1681
1682 if (OpIdx == InlineAsm::MIOp_ExtraInfo) {
1683 // Print HasSideEffects, MayLoad, MayStore, IsAlignStack
1684 unsigned ExtraInfo = Op.getImm();
1685 bool First = true;
1686 for (StringRef Info : InlineAsm::getExtraInfoNames(ExtraInfo)) {
1687 if (!First)
1688 OS << " ";
1689 First = false;
1690 OS << Info;
1691 }
1692
1693 return OS.str();
1694 }
1695
1696 int FlagIdx = MI.findInlineAsmFlagIdx(OpIdx);
1697 if (FlagIdx < 0 || (unsigned)FlagIdx != OpIdx)
1698 return "";
1699
1700 assert(Op.isImm() && "Expected flag operand to be an immediate");
1701 // Pretty print the inline asm operand descriptor.
1702 unsigned Flag = Op.getImm();
1703 const InlineAsm::Flag F(Flag);
1704 OS << F.getKindName();
1705
1706 unsigned RCID;
1707 if (!F.isImmKind() && !F.isMemKind() && F.hasRegClassConstraint(RC&: RCID)) {
1708 if (TRI) {
1709 OS << ':' << TRI->getRegClassName(Class: TRI->getRegClass(i: RCID));
1710 } else
1711 OS << ":RC" << RCID;
1712 }
1713
1714 if (F.isMemKind()) {
1715 InlineAsm::ConstraintCode MCID = F.getMemoryConstraintID();
1716 OS << ":" << InlineAsm::getMemConstraintName(C: MCID);
1717 }
1718
1719 unsigned TiedTo;
1720 if (F.isUseOperandTiedToDef(Idx&: TiedTo))
1721 OS << " tiedto:$" << TiedTo;
1722
1723 if ((F.isRegDefKind() || F.isRegDefEarlyClobberKind() || F.isRegUseKind()) &&
1724 F.getRegMayBeFolded())
1725 OS << " foldable";
1726
1727 return OS.str();
1728}
1729
1730TargetInstrInfo::PipelinerLoopInfo::~PipelinerLoopInfo() = default;
1731
1732void TargetInstrInfo::mergeOutliningCandidateAttributes(
1733 Function &F, std::vector<outliner::Candidate> &Candidates) const {
1734 // Include target features from an arbitrary candidate for the outlined
1735 // function. This makes sure the outlined function knows what kinds of
1736 // instructions are going into it. This is fine, since all parent functions
1737 // must necessarily support the instructions that are in the outlined region.
1738 outliner::Candidate &FirstCand = Candidates.front();
1739 const Function &ParentFn = FirstCand.getMF()->getFunction();
1740 if (ParentFn.hasFnAttribute(Kind: "target-features"))
1741 F.addFnAttr(Attr: ParentFn.getFnAttribute(Kind: "target-features"));
1742 if (ParentFn.hasFnAttribute(Kind: "target-cpu"))
1743 F.addFnAttr(Attr: ParentFn.getFnAttribute(Kind: "target-cpu"));
1744
1745 // Set nounwind, so we don't generate eh_frame.
1746 if (llvm::all_of(Range&: Candidates, P: [](const outliner::Candidate &C) {
1747 return C.getMF()->getFunction().hasFnAttribute(Attribute::NoUnwind);
1748 }))
1749 F.addFnAttr(Attribute::NoUnwind);
1750}
1751
1752outliner::InstrType TargetInstrInfo::getOutliningType(
1753 MachineBasicBlock::iterator &MIT, unsigned Flags) const {
1754 MachineInstr &MI = *MIT;
1755
1756 // NOTE: MI.isMetaInstruction() will match CFI_INSTRUCTION, but some targets
1757 // have support for outlining those. Special-case that here.
1758 if (MI.isCFIInstruction())
1759 // Just go right to the target implementation.
1760 return getOutliningTypeImpl(MIT, Flags);
1761
1762 // Be conservative about inline assembly.
1763 if (MI.isInlineAsm())
1764 return outliner::InstrType::Illegal;
1765
1766 // Labels generally can't safely be outlined.
1767 if (MI.isLabel())
1768 return outliner::InstrType::Illegal;
1769
1770 // Don't let debug instructions impact analysis.
1771 if (MI.isDebugInstr())
1772 return outliner::InstrType::Invisible;
1773
1774 // Some other special cases.
1775 switch (MI.getOpcode()) {
1776 case TargetOpcode::IMPLICIT_DEF:
1777 case TargetOpcode::KILL:
1778 case TargetOpcode::LIFETIME_START:
1779 case TargetOpcode::LIFETIME_END:
1780 return outliner::InstrType::Invisible;
1781 default:
1782 break;
1783 }
1784
1785 // Is this a terminator for a basic block?
1786 if (MI.isTerminator()) {
1787 // If this is a branch to another block, we can't outline it.
1788 if (!MI.getParent()->succ_empty())
1789 return outliner::InstrType::Illegal;
1790
1791 // Don't outline if the branch is not unconditional.
1792 if (isPredicated(MI))
1793 return outliner::InstrType::Illegal;
1794 }
1795
1796 // Make sure none of the operands of this instruction do anything that
1797 // might break if they're moved outside their current function.
1798 // This includes MachineBasicBlock references, BlockAddressses,
1799 // Constant pool indices and jump table indices.
1800 //
1801 // A quick note on MO_TargetIndex:
1802 // This doesn't seem to be used in any of the architectures that the
1803 // MachineOutliner supports, but it was still filtered out in all of them.
1804 // There was one exception (RISC-V), but MO_TargetIndex also isn't used there.
1805 // As such, this check is removed both here and in the target-specific
1806 // implementations. Instead, we assert to make sure this doesn't
1807 // catch anyone off-guard somewhere down the line.
1808 for (const MachineOperand &MOP : MI.operands()) {
1809 // If you hit this assertion, please remove it and adjust
1810 // `getOutliningTypeImpl` for your target appropriately if necessary.
1811 // Adding the assertion back to other supported architectures
1812 // would be nice too :)
1813 assert(!MOP.isTargetIndex() && "This isn't used quite yet!");
1814
1815 // CFI instructions should already have been filtered out at this point.
1816 assert(!MOP.isCFIIndex() && "CFI instructions handled elsewhere!");
1817
1818 // PrologEpilogInserter should've already run at this point.
1819 assert(!MOP.isFI() && "FrameIndex instructions should be gone by now!");
1820
1821 if (MOP.isMBB() || MOP.isBlockAddress() || MOP.isCPI() || MOP.isJTI())
1822 return outliner::InstrType::Illegal;
1823 }
1824
1825 // If we don't know, delegate to the target-specific hook.
1826 return getOutliningTypeImpl(MIT, Flags);
1827}
1828
1829bool TargetInstrInfo::isMBBSafeToOutlineFrom(MachineBasicBlock &MBB,
1830 unsigned &Flags) const {
1831 // Some instrumentations create special TargetOpcode at the start which
1832 // expands to special code sequences which must be present.
1833 auto First = MBB.getFirstNonDebugInstr();
1834 if (First == MBB.end())
1835 return true;
1836
1837 if (First->getOpcode() == TargetOpcode::FENTRY_CALL ||
1838 First->getOpcode() == TargetOpcode::PATCHABLE_FUNCTION_ENTER)
1839 return false;
1840
1841 // Some instrumentations create special pseudo-instructions at or just before
1842 // the end that must be present.
1843 auto Last = MBB.getLastNonDebugInstr();
1844 if (Last->getOpcode() == TargetOpcode::PATCHABLE_RET ||
1845 Last->getOpcode() == TargetOpcode::PATCHABLE_TAIL_CALL)
1846 return false;
1847
1848 if (Last != First && Last->isReturn()) {
1849 --Last;
1850 if (Last->getOpcode() == TargetOpcode::PATCHABLE_FUNCTION_EXIT ||
1851 Last->getOpcode() == TargetOpcode::PATCHABLE_TAIL_CALL)
1852 return false;
1853 }
1854 return true;
1855}
1856

source code of llvm/lib/CodeGen/TargetInstrInfo.cpp