1//===- MachineVerifier.cpp - Machine Code Verifier ------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Pass to verify generated machine code. The following is checked:
10//
11// Operand counts: All explicit operands must be present.
12//
13// Register classes: All physical and virtual register operands must be
14// compatible with the register class required by the instruction descriptor.
15//
16// Register live intervals: Registers must be defined only once, and must be
17// defined before use.
18//
19// The machine code verifier is enabled with the command-line option
20// -verify-machineinstrs.
21//===----------------------------------------------------------------------===//
22
23#include "llvm/ADT/BitVector.h"
24#include "llvm/ADT/DenseMap.h"
25#include "llvm/ADT/DenseSet.h"
26#include "llvm/ADT/DepthFirstIterator.h"
27#include "llvm/ADT/PostOrderIterator.h"
28#include "llvm/ADT/STLExtras.h"
29#include "llvm/ADT/SetOperations.h"
30#include "llvm/ADT/SmallPtrSet.h"
31#include "llvm/ADT/SmallVector.h"
32#include "llvm/ADT/StringRef.h"
33#include "llvm/ADT/Twine.h"
34#include "llvm/CodeGen/CodeGenCommonISel.h"
35#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
36#include "llvm/CodeGen/LiveInterval.h"
37#include "llvm/CodeGen/LiveIntervals.h"
38#include "llvm/CodeGen/LiveRangeCalc.h"
39#include "llvm/CodeGen/LiveStacks.h"
40#include "llvm/CodeGen/LiveVariables.h"
41#include "llvm/CodeGen/MachineBasicBlock.h"
42#include "llvm/CodeGen/MachineFrameInfo.h"
43#include "llvm/CodeGen/MachineFunction.h"
44#include "llvm/CodeGen/MachineFunctionPass.h"
45#include "llvm/CodeGen/MachineInstr.h"
46#include "llvm/CodeGen/MachineInstrBundle.h"
47#include "llvm/CodeGen/MachineMemOperand.h"
48#include "llvm/CodeGen/MachineOperand.h"
49#include "llvm/CodeGen/MachineRegisterInfo.h"
50#include "llvm/CodeGen/PseudoSourceValue.h"
51#include "llvm/CodeGen/RegisterBank.h"
52#include "llvm/CodeGen/RegisterBankInfo.h"
53#include "llvm/CodeGen/SlotIndexes.h"
54#include "llvm/CodeGen/StackMaps.h"
55#include "llvm/CodeGen/TargetInstrInfo.h"
56#include "llvm/CodeGen/TargetOpcodes.h"
57#include "llvm/CodeGen/TargetRegisterInfo.h"
58#include "llvm/CodeGen/TargetSubtargetInfo.h"
59#include "llvm/CodeGenTypes/LowLevelType.h"
60#include "llvm/IR/BasicBlock.h"
61#include "llvm/IR/Constants.h"
62#include "llvm/IR/EHPersonalities.h"
63#include "llvm/IR/Function.h"
64#include "llvm/IR/InlineAsm.h"
65#include "llvm/IR/Instructions.h"
66#include "llvm/InitializePasses.h"
67#include "llvm/MC/LaneBitmask.h"
68#include "llvm/MC/MCAsmInfo.h"
69#include "llvm/MC/MCDwarf.h"
70#include "llvm/MC/MCInstrDesc.h"
71#include "llvm/MC/MCRegisterInfo.h"
72#include "llvm/MC/MCTargetOptions.h"
73#include "llvm/Pass.h"
74#include "llvm/Support/Casting.h"
75#include "llvm/Support/ErrorHandling.h"
76#include "llvm/Support/MathExtras.h"
77#include "llvm/Support/ModRef.h"
78#include "llvm/Support/raw_ostream.h"
79#include "llvm/Target/TargetMachine.h"
80#include <algorithm>
81#include <cassert>
82#include <cstddef>
83#include <cstdint>
84#include <iterator>
85#include <string>
86#include <utility>
87
88using namespace llvm;
89
90namespace {
91
92 struct MachineVerifier {
93 MachineVerifier(Pass *pass, const char *b) : PASS(pass), Banner(b) {}
94
95 MachineVerifier(const char *b, LiveVariables *LiveVars,
96 LiveIntervals *LiveInts, LiveStacks *LiveStks,
97 SlotIndexes *Indexes)
98 : Banner(b), LiveVars(LiveVars), LiveInts(LiveInts), LiveStks(LiveStks),
99 Indexes(Indexes) {}
100
101 unsigned verify(const MachineFunction &MF);
102
103 Pass *const PASS = nullptr;
104 const char *Banner;
105 const MachineFunction *MF = nullptr;
106 const TargetMachine *TM = nullptr;
107 const TargetInstrInfo *TII = nullptr;
108 const TargetRegisterInfo *TRI = nullptr;
109 const MachineRegisterInfo *MRI = nullptr;
110 const RegisterBankInfo *RBI = nullptr;
111
112 unsigned foundErrors = 0;
113
114 // Avoid querying the MachineFunctionProperties for each operand.
115 bool isFunctionRegBankSelected = false;
116 bool isFunctionSelected = false;
117 bool isFunctionTracksDebugUserValues = false;
118
119 using RegVector = SmallVector<Register, 16>;
120 using RegMaskVector = SmallVector<const uint32_t *, 4>;
121 using RegSet = DenseSet<Register>;
122 using RegMap = DenseMap<Register, const MachineInstr *>;
123 using BlockSet = SmallPtrSet<const MachineBasicBlock *, 8>;
124
125 const MachineInstr *FirstNonPHI = nullptr;
126 const MachineInstr *FirstTerminator = nullptr;
127 BlockSet FunctionBlocks;
128
129 BitVector regsReserved;
130 RegSet regsLive;
131 RegVector regsDefined, regsDead, regsKilled;
132 RegMaskVector regMasks;
133
134 SlotIndex lastIndex;
135
136 // Add Reg and any sub-registers to RV
137 void addRegWithSubRegs(RegVector &RV, Register Reg) {
138 RV.push_back(Elt: Reg);
139 if (Reg.isPhysical())
140 append_range(C&: RV, R: TRI->subregs(Reg: Reg.asMCReg()));
141 }
142
143 struct BBInfo {
144 // Is this MBB reachable from the MF entry point?
145 bool reachable = false;
146
147 // Vregs that must be live in because they are used without being
148 // defined. Map value is the user. vregsLiveIn doesn't include regs
149 // that only are used by PHI nodes.
150 RegMap vregsLiveIn;
151
152 // Regs killed in MBB. They may be defined again, and will then be in both
153 // regsKilled and regsLiveOut.
154 RegSet regsKilled;
155
156 // Regs defined in MBB and live out. Note that vregs passing through may
157 // be live out without being mentioned here.
158 RegSet regsLiveOut;
159
160 // Vregs that pass through MBB untouched. This set is disjoint from
161 // regsKilled and regsLiveOut.
162 RegSet vregsPassed;
163
164 // Vregs that must pass through MBB because they are needed by a successor
165 // block. This set is disjoint from regsLiveOut.
166 RegSet vregsRequired;
167
168 // Set versions of block's predecessor and successor lists.
169 BlockSet Preds, Succs;
170
171 BBInfo() = default;
172
173 // Add register to vregsRequired if it belongs there. Return true if
174 // anything changed.
175 bool addRequired(Register Reg) {
176 if (!Reg.isVirtual())
177 return false;
178 if (regsLiveOut.count(V: Reg))
179 return false;
180 return vregsRequired.insert(V: Reg).second;
181 }
182
183 // Same for a full set.
184 bool addRequired(const RegSet &RS) {
185 bool Changed = false;
186 for (Register Reg : RS)
187 Changed |= addRequired(Reg);
188 return Changed;
189 }
190
191 // Same for a full map.
192 bool addRequired(const RegMap &RM) {
193 bool Changed = false;
194 for (const auto &I : RM)
195 Changed |= addRequired(Reg: I.first);
196 return Changed;
197 }
198
199 // Live-out registers are either in regsLiveOut or vregsPassed.
200 bool isLiveOut(Register Reg) const {
201 return regsLiveOut.count(V: Reg) || vregsPassed.count(V: Reg);
202 }
203 };
204
205 // Extra register info per MBB.
206 DenseMap<const MachineBasicBlock*, BBInfo> MBBInfoMap;
207
208 bool isReserved(Register Reg) {
209 return Reg.id() < regsReserved.size() && regsReserved.test(Idx: Reg.id());
210 }
211
212 bool isAllocatable(Register Reg) const {
213 return Reg.id() < TRI->getNumRegs() && TRI->isInAllocatableClass(RegNo: Reg) &&
214 !regsReserved.test(Idx: Reg.id());
215 }
216
217 // Analysis information if available
218 LiveVariables *LiveVars = nullptr;
219 LiveIntervals *LiveInts = nullptr;
220 LiveStacks *LiveStks = nullptr;
221 SlotIndexes *Indexes = nullptr;
222
223 void visitMachineFunctionBefore();
224 void visitMachineBasicBlockBefore(const MachineBasicBlock *MBB);
225 void visitMachineBundleBefore(const MachineInstr *MI);
226
227 /// Verify that all of \p MI's virtual register operands are scalars.
228 /// \returns True if all virtual register operands are scalar. False
229 /// otherwise.
230 bool verifyAllRegOpsScalar(const MachineInstr &MI,
231 const MachineRegisterInfo &MRI);
232 bool verifyVectorElementMatch(LLT Ty0, LLT Ty1, const MachineInstr *MI);
233
234 bool verifyGIntrinsicSideEffects(const MachineInstr *MI);
235 bool verifyGIntrinsicConvergence(const MachineInstr *MI);
236 void verifyPreISelGenericInstruction(const MachineInstr *MI);
237
238 void visitMachineInstrBefore(const MachineInstr *MI);
239 void visitMachineOperand(const MachineOperand *MO, unsigned MONum);
240 void visitMachineBundleAfter(const MachineInstr *MI);
241 void visitMachineBasicBlockAfter(const MachineBasicBlock *MBB);
242 void visitMachineFunctionAfter();
243
244 void report(const char *msg, const MachineFunction *MF);
245 void report(const char *msg, const MachineBasicBlock *MBB);
246 void report(const char *msg, const MachineInstr *MI);
247 void report(const char *msg, const MachineOperand *MO, unsigned MONum,
248 LLT MOVRegType = LLT{});
249 void report(const Twine &Msg, const MachineInstr *MI);
250
251 void report_context(const LiveInterval &LI) const;
252 void report_context(const LiveRange &LR, Register VRegUnit,
253 LaneBitmask LaneMask) const;
254 void report_context(const LiveRange::Segment &S) const;
255 void report_context(const VNInfo &VNI) const;
256 void report_context(SlotIndex Pos) const;
257 void report_context(MCPhysReg PhysReg) const;
258 void report_context_liverange(const LiveRange &LR) const;
259 void report_context_lanemask(LaneBitmask LaneMask) const;
260 void report_context_vreg(Register VReg) const;
261 void report_context_vreg_regunit(Register VRegOrUnit) const;
262
263 void verifyInlineAsm(const MachineInstr *MI);
264
265 void checkLiveness(const MachineOperand *MO, unsigned MONum);
266 void checkLivenessAtUse(const MachineOperand *MO, unsigned MONum,
267 SlotIndex UseIdx, const LiveRange &LR,
268 Register VRegOrUnit,
269 LaneBitmask LaneMask = LaneBitmask::getNone());
270 void checkLivenessAtDef(const MachineOperand *MO, unsigned MONum,
271 SlotIndex DefIdx, const LiveRange &LR,
272 Register VRegOrUnit, bool SubRangeCheck = false,
273 LaneBitmask LaneMask = LaneBitmask::getNone());
274
275 void markReachable(const MachineBasicBlock *MBB);
276 void calcRegsPassed();
277 void checkPHIOps(const MachineBasicBlock &MBB);
278
279 void calcRegsRequired();
280 void verifyLiveVariables();
281 void verifyLiveIntervals();
282 void verifyLiveInterval(const LiveInterval&);
283 void verifyLiveRangeValue(const LiveRange &, const VNInfo *, Register,
284 LaneBitmask);
285 void verifyLiveRangeSegment(const LiveRange &,
286 const LiveRange::const_iterator I, Register,
287 LaneBitmask);
288 void verifyLiveRange(const LiveRange &, Register,
289 LaneBitmask LaneMask = LaneBitmask::getNone());
290
291 void verifyStackFrame();
292
293 void verifySlotIndexes() const;
294 void verifyProperties(const MachineFunction &MF);
295 };
296
297 struct MachineVerifierPass : public MachineFunctionPass {
298 static char ID; // Pass ID, replacement for typeid
299
300 const std::string Banner;
301
302 MachineVerifierPass(std::string banner = std::string())
303 : MachineFunctionPass(ID), Banner(std::move(banner)) {
304 initializeMachineVerifierPassPass(*PassRegistry::getPassRegistry());
305 }
306
307 void getAnalysisUsage(AnalysisUsage &AU) const override {
308 AU.addUsedIfAvailable<LiveStacks>();
309 AU.addUsedIfAvailable<LiveVariables>();
310 AU.addUsedIfAvailable<SlotIndexes>();
311 AU.addUsedIfAvailable<LiveIntervals>();
312 AU.setPreservesAll();
313 MachineFunctionPass::getAnalysisUsage(AU);
314 }
315
316 bool runOnMachineFunction(MachineFunction &MF) override {
317 // Skip functions that have known verification problems.
318 // FIXME: Remove this mechanism when all problematic passes have been
319 // fixed.
320 if (MF.getProperties().hasProperty(
321 P: MachineFunctionProperties::Property::FailsVerification))
322 return false;
323
324 unsigned FoundErrors = MachineVerifier(this, Banner.c_str()).verify(MF);
325 if (FoundErrors)
326 report_fatal_error(reason: "Found "+Twine(FoundErrors)+" machine code errors.");
327 return false;
328 }
329 };
330
331} // end anonymous namespace
332
333char MachineVerifierPass::ID = 0;
334
335INITIALIZE_PASS(MachineVerifierPass, "machineverifier",
336 "Verify generated machine code", false, false)
337
338FunctionPass *llvm::createMachineVerifierPass(const std::string &Banner) {
339 return new MachineVerifierPass(Banner);
340}
341
342void llvm::verifyMachineFunction(const std::string &Banner,
343 const MachineFunction &MF) {
344 // TODO: Use MFAM after porting below analyses.
345 // LiveVariables *LiveVars;
346 // LiveIntervals *LiveInts;
347 // LiveStacks *LiveStks;
348 // SlotIndexes *Indexes;
349 unsigned FoundErrors = MachineVerifier(nullptr, Banner.c_str()).verify(MF);
350 if (FoundErrors)
351 report_fatal_error(reason: "Found " + Twine(FoundErrors) + " machine code errors.");
352}
353
354bool MachineFunction::verify(Pass *p, const char *Banner, bool AbortOnErrors)
355 const {
356 MachineFunction &MF = const_cast<MachineFunction&>(*this);
357 unsigned FoundErrors = MachineVerifier(p, Banner).verify(MF);
358 if (AbortOnErrors && FoundErrors)
359 report_fatal_error(reason: "Found "+Twine(FoundErrors)+" machine code errors.");
360 return FoundErrors == 0;
361}
362
363bool MachineFunction::verify(LiveIntervals *LiveInts, SlotIndexes *Indexes,
364 const char *Banner, bool AbortOnErrors) const {
365 MachineFunction &MF = const_cast<MachineFunction &>(*this);
366 unsigned FoundErrors =
367 MachineVerifier(Banner, nullptr, LiveInts, nullptr, Indexes).verify(MF);
368 if (AbortOnErrors && FoundErrors)
369 report_fatal_error(reason: "Found " + Twine(FoundErrors) + " machine code errors.");
370 return FoundErrors == 0;
371}
372
373void MachineVerifier::verifySlotIndexes() const {
374 if (Indexes == nullptr)
375 return;
376
377 // Ensure the IdxMBB list is sorted by slot indexes.
378 SlotIndex Last;
379 for (SlotIndexes::MBBIndexIterator I = Indexes->MBBIndexBegin(),
380 E = Indexes->MBBIndexEnd(); I != E; ++I) {
381 assert(!Last.isValid() || I->first > Last);
382 Last = I->first;
383 }
384}
385
386void MachineVerifier::verifyProperties(const MachineFunction &MF) {
387 // If a pass has introduced virtual registers without clearing the
388 // NoVRegs property (or set it without allocating the vregs)
389 // then report an error.
390 if (MF.getProperties().hasProperty(
391 P: MachineFunctionProperties::Property::NoVRegs) &&
392 MRI->getNumVirtRegs())
393 report(msg: "Function has NoVRegs property but there are VReg operands", MF: &MF);
394}
395
396unsigned MachineVerifier::verify(const MachineFunction &MF) {
397 foundErrors = 0;
398
399 this->MF = &MF;
400 TM = &MF.getTarget();
401 TII = MF.getSubtarget().getInstrInfo();
402 TRI = MF.getSubtarget().getRegisterInfo();
403 RBI = MF.getSubtarget().getRegBankInfo();
404 MRI = &MF.getRegInfo();
405
406 const bool isFunctionFailedISel = MF.getProperties().hasProperty(
407 P: MachineFunctionProperties::Property::FailedISel);
408
409 // If we're mid-GlobalISel and we already triggered the fallback path then
410 // it's expected that the MIR is somewhat broken but that's ok since we'll
411 // reset it and clear the FailedISel attribute in ResetMachineFunctions.
412 if (isFunctionFailedISel)
413 return foundErrors;
414
415 isFunctionRegBankSelected = MF.getProperties().hasProperty(
416 P: MachineFunctionProperties::Property::RegBankSelected);
417 isFunctionSelected = MF.getProperties().hasProperty(
418 P: MachineFunctionProperties::Property::Selected);
419 isFunctionTracksDebugUserValues = MF.getProperties().hasProperty(
420 P: MachineFunctionProperties::Property::TracksDebugUserValues);
421
422 if (PASS) {
423 LiveInts = PASS->getAnalysisIfAvailable<LiveIntervals>();
424 // We don't want to verify LiveVariables if LiveIntervals is available.
425 if (!LiveInts)
426 LiveVars = PASS->getAnalysisIfAvailable<LiveVariables>();
427 LiveStks = PASS->getAnalysisIfAvailable<LiveStacks>();
428 Indexes = PASS->getAnalysisIfAvailable<SlotIndexes>();
429 }
430
431 verifySlotIndexes();
432
433 verifyProperties(MF);
434
435 visitMachineFunctionBefore();
436 for (const MachineBasicBlock &MBB : MF) {
437 visitMachineBasicBlockBefore(MBB: &MBB);
438 // Keep track of the current bundle header.
439 const MachineInstr *CurBundle = nullptr;
440 // Do we expect the next instruction to be part of the same bundle?
441 bool InBundle = false;
442
443 for (const MachineInstr &MI : MBB.instrs()) {
444 if (MI.getParent() != &MBB) {
445 report(msg: "Bad instruction parent pointer", MBB: &MBB);
446 errs() << "Instruction: " << MI;
447 continue;
448 }
449
450 // Check for consistent bundle flags.
451 if (InBundle && !MI.isBundledWithPred())
452 report(msg: "Missing BundledPred flag, "
453 "BundledSucc was set on predecessor",
454 MI: &MI);
455 if (!InBundle && MI.isBundledWithPred())
456 report(msg: "BundledPred flag is set, "
457 "but BundledSucc not set on predecessor",
458 MI: &MI);
459
460 // Is this a bundle header?
461 if (!MI.isInsideBundle()) {
462 if (CurBundle)
463 visitMachineBundleAfter(MI: CurBundle);
464 CurBundle = &MI;
465 visitMachineBundleBefore(MI: CurBundle);
466 } else if (!CurBundle)
467 report(msg: "No bundle header", MI: &MI);
468 visitMachineInstrBefore(MI: &MI);
469 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
470 const MachineOperand &Op = MI.getOperand(i: I);
471 if (Op.getParent() != &MI) {
472 // Make sure to use correct addOperand / removeOperand / ChangeTo
473 // functions when replacing operands of a MachineInstr.
474 report(msg: "Instruction has operand with wrong parent set", MI: &MI);
475 }
476
477 visitMachineOperand(MO: &Op, MONum: I);
478 }
479
480 // Was this the last bundled instruction?
481 InBundle = MI.isBundledWithSucc();
482 }
483 if (CurBundle)
484 visitMachineBundleAfter(MI: CurBundle);
485 if (InBundle)
486 report(msg: "BundledSucc flag set on last instruction in block", MI: &MBB.back());
487 visitMachineBasicBlockAfter(MBB: &MBB);
488 }
489 visitMachineFunctionAfter();
490
491 // Clean up.
492 regsLive.clear();
493 regsDefined.clear();
494 regsDead.clear();
495 regsKilled.clear();
496 regMasks.clear();
497 MBBInfoMap.clear();
498
499 return foundErrors;
500}
501
502void MachineVerifier::report(const char *msg, const MachineFunction *MF) {
503 assert(MF);
504 errs() << '\n';
505 if (!foundErrors++) {
506 if (Banner)
507 errs() << "# " << Banner << '\n';
508 if (LiveInts != nullptr)
509 LiveInts->print(O&: errs());
510 else
511 MF->print(OS&: errs(), Indexes);
512 }
513 errs() << "*** Bad machine code: " << msg << " ***\n"
514 << "- function: " << MF->getName() << "\n";
515}
516
517void MachineVerifier::report(const char *msg, const MachineBasicBlock *MBB) {
518 assert(MBB);
519 report(msg, MF: MBB->getParent());
520 errs() << "- basic block: " << printMBBReference(MBB: *MBB) << ' '
521 << MBB->getName() << " (" << (const void *)MBB << ')';
522 if (Indexes)
523 errs() << " [" << Indexes->getMBBStartIdx(mbb: MBB)
524 << ';' << Indexes->getMBBEndIdx(mbb: MBB) << ')';
525 errs() << '\n';
526}
527
528void MachineVerifier::report(const char *msg, const MachineInstr *MI) {
529 assert(MI);
530 report(msg, MBB: MI->getParent());
531 errs() << "- instruction: ";
532 if (Indexes && Indexes->hasIndex(instr: *MI))
533 errs() << Indexes->getInstructionIndex(MI: *MI) << '\t';
534 MI->print(OS&: errs(), /*IsStandalone=*/true);
535}
536
537void MachineVerifier::report(const char *msg, const MachineOperand *MO,
538 unsigned MONum, LLT MOVRegType) {
539 assert(MO);
540 report(msg, MI: MO->getParent());
541 errs() << "- operand " << MONum << ": ";
542 MO->print(os&: errs(), TypeToPrint: MOVRegType, TRI);
543 errs() << "\n";
544}
545
546void MachineVerifier::report(const Twine &Msg, const MachineInstr *MI) {
547 report(msg: Msg.str().c_str(), MI);
548}
549
550void MachineVerifier::report_context(SlotIndex Pos) const {
551 errs() << "- at: " << Pos << '\n';
552}
553
554void MachineVerifier::report_context(const LiveInterval &LI) const {
555 errs() << "- interval: " << LI << '\n';
556}
557
558void MachineVerifier::report_context(const LiveRange &LR, Register VRegUnit,
559 LaneBitmask LaneMask) const {
560 report_context_liverange(LR);
561 report_context_vreg_regunit(VRegOrUnit: VRegUnit);
562 if (LaneMask.any())
563 report_context_lanemask(LaneMask);
564}
565
566void MachineVerifier::report_context(const LiveRange::Segment &S) const {
567 errs() << "- segment: " << S << '\n';
568}
569
570void MachineVerifier::report_context(const VNInfo &VNI) const {
571 errs() << "- ValNo: " << VNI.id << " (def " << VNI.def << ")\n";
572}
573
574void MachineVerifier::report_context_liverange(const LiveRange &LR) const {
575 errs() << "- liverange: " << LR << '\n';
576}
577
578void MachineVerifier::report_context(MCPhysReg PReg) const {
579 errs() << "- p. register: " << printReg(Reg: PReg, TRI) << '\n';
580}
581
582void MachineVerifier::report_context_vreg(Register VReg) const {
583 errs() << "- v. register: " << printReg(Reg: VReg, TRI) << '\n';
584}
585
586void MachineVerifier::report_context_vreg_regunit(Register VRegOrUnit) const {
587 if (VRegOrUnit.isVirtual()) {
588 report_context_vreg(VReg: VRegOrUnit);
589 } else {
590 errs() << "- regunit: " << printRegUnit(Unit: VRegOrUnit, TRI) << '\n';
591 }
592}
593
594void MachineVerifier::report_context_lanemask(LaneBitmask LaneMask) const {
595 errs() << "- lanemask: " << PrintLaneMask(LaneMask) << '\n';
596}
597
598void MachineVerifier::markReachable(const MachineBasicBlock *MBB) {
599 BBInfo &MInfo = MBBInfoMap[MBB];
600 if (!MInfo.reachable) {
601 MInfo.reachable = true;
602 for (const MachineBasicBlock *Succ : MBB->successors())
603 markReachable(MBB: Succ);
604 }
605}
606
607void MachineVerifier::visitMachineFunctionBefore() {
608 lastIndex = SlotIndex();
609 regsReserved = MRI->reservedRegsFrozen() ? MRI->getReservedRegs()
610 : TRI->getReservedRegs(MF: *MF);
611
612 if (!MF->empty())
613 markReachable(MBB: &MF->front());
614
615 // Build a set of the basic blocks in the function.
616 FunctionBlocks.clear();
617 for (const auto &MBB : *MF) {
618 FunctionBlocks.insert(Ptr: &MBB);
619 BBInfo &MInfo = MBBInfoMap[&MBB];
620
621 MInfo.Preds.insert(I: MBB.pred_begin(), E: MBB.pred_end());
622 if (MInfo.Preds.size() != MBB.pred_size())
623 report(msg: "MBB has duplicate entries in its predecessor list.", MBB: &MBB);
624
625 MInfo.Succs.insert(I: MBB.succ_begin(), E: MBB.succ_end());
626 if (MInfo.Succs.size() != MBB.succ_size())
627 report(msg: "MBB has duplicate entries in its successor list.", MBB: &MBB);
628 }
629
630 // Check that the register use lists are sane.
631 MRI->verifyUseLists();
632
633 if (!MF->empty())
634 verifyStackFrame();
635}
636
637void
638MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
639 FirstTerminator = nullptr;
640 FirstNonPHI = nullptr;
641
642 if (!MF->getProperties().hasProperty(
643 P: MachineFunctionProperties::Property::NoPHIs) && MRI->tracksLiveness()) {
644 // If this block has allocatable physical registers live-in, check that
645 // it is an entry block or landing pad.
646 for (const auto &LI : MBB->liveins()) {
647 if (isAllocatable(Reg: LI.PhysReg) && !MBB->isEHPad() &&
648 MBB->getIterator() != MBB->getParent()->begin() &&
649 !MBB->isInlineAsmBrIndirectTarget()) {
650 report(msg: "MBB has allocatable live-in, but isn't entry, landing-pad, or "
651 "inlineasm-br-indirect-target.",
652 MBB);
653 report_context(PReg: LI.PhysReg);
654 }
655 }
656 }
657
658 if (MBB->isIRBlockAddressTaken()) {
659 if (!MBB->getAddressTakenIRBlock()->hasAddressTaken())
660 report(msg: "ir-block-address-taken is associated with basic block not used by "
661 "a blockaddress.",
662 MBB);
663 }
664
665 // Count the number of landing pad successors.
666 SmallPtrSet<const MachineBasicBlock*, 4> LandingPadSuccs;
667 for (const auto *succ : MBB->successors()) {
668 if (succ->isEHPad())
669 LandingPadSuccs.insert(Ptr: succ);
670 if (!FunctionBlocks.count(Ptr: succ))
671 report(msg: "MBB has successor that isn't part of the function.", MBB);
672 if (!MBBInfoMap[succ].Preds.count(Ptr: MBB)) {
673 report(msg: "Inconsistent CFG", MBB);
674 errs() << "MBB is not in the predecessor list of the successor "
675 << printMBBReference(MBB: *succ) << ".\n";
676 }
677 }
678
679 // Check the predecessor list.
680 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
681 if (!FunctionBlocks.count(Ptr: Pred))
682 report(msg: "MBB has predecessor that isn't part of the function.", MBB);
683 if (!MBBInfoMap[Pred].Succs.count(Ptr: MBB)) {
684 report(msg: "Inconsistent CFG", MBB);
685 errs() << "MBB is not in the successor list of the predecessor "
686 << printMBBReference(MBB: *Pred) << ".\n";
687 }
688 }
689
690 const MCAsmInfo *AsmInfo = TM->getMCAsmInfo();
691 const BasicBlock *BB = MBB->getBasicBlock();
692 const Function &F = MF->getFunction();
693 if (LandingPadSuccs.size() > 1 &&
694 !(AsmInfo &&
695 AsmInfo->getExceptionHandlingType() == ExceptionHandling::SjLj &&
696 BB && isa<SwitchInst>(Val: BB->getTerminator())) &&
697 !isScopedEHPersonality(Pers: classifyEHPersonality(Pers: F.getPersonalityFn())))
698 report(msg: "MBB has more than one landing pad successor", MBB);
699
700 // Call analyzeBranch. If it succeeds, there several more conditions to check.
701 MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
702 SmallVector<MachineOperand, 4> Cond;
703 if (!TII->analyzeBranch(MBB&: *const_cast<MachineBasicBlock *>(MBB), TBB, FBB,
704 Cond)) {
705 // Ok, analyzeBranch thinks it knows what's going on with this block. Let's
706 // check whether its answers match up with reality.
707 if (!TBB && !FBB) {
708 // Block falls through to its successor.
709 if (!MBB->empty() && MBB->back().isBarrier() &&
710 !TII->isPredicated(MI: MBB->back())) {
711 report(msg: "MBB exits via unconditional fall-through but ends with a "
712 "barrier instruction!", MBB);
713 }
714 if (!Cond.empty()) {
715 report(msg: "MBB exits via unconditional fall-through but has a condition!",
716 MBB);
717 }
718 } else if (TBB && !FBB && Cond.empty()) {
719 // Block unconditionally branches somewhere.
720 if (MBB->empty()) {
721 report(msg: "MBB exits via unconditional branch but doesn't contain "
722 "any instructions!", MBB);
723 } else if (!MBB->back().isBarrier()) {
724 report(msg: "MBB exits via unconditional branch but doesn't end with a "
725 "barrier instruction!", MBB);
726 } else if (!MBB->back().isTerminator()) {
727 report(msg: "MBB exits via unconditional branch but the branch isn't a "
728 "terminator instruction!", MBB);
729 }
730 } else if (TBB && !FBB && !Cond.empty()) {
731 // Block conditionally branches somewhere, otherwise falls through.
732 if (MBB->empty()) {
733 report(msg: "MBB exits via conditional branch/fall-through but doesn't "
734 "contain any instructions!", MBB);
735 } else if (MBB->back().isBarrier()) {
736 report(msg: "MBB exits via conditional branch/fall-through but ends with a "
737 "barrier instruction!", MBB);
738 } else if (!MBB->back().isTerminator()) {
739 report(msg: "MBB exits via conditional branch/fall-through but the branch "
740 "isn't a terminator instruction!", MBB);
741 }
742 } else if (TBB && FBB) {
743 // Block conditionally branches somewhere, otherwise branches
744 // somewhere else.
745 if (MBB->empty()) {
746 report(msg: "MBB exits via conditional branch/branch but doesn't "
747 "contain any instructions!", MBB);
748 } else if (!MBB->back().isBarrier()) {
749 report(msg: "MBB exits via conditional branch/branch but doesn't end with a "
750 "barrier instruction!", MBB);
751 } else if (!MBB->back().isTerminator()) {
752 report(msg: "MBB exits via conditional branch/branch but the branch "
753 "isn't a terminator instruction!", MBB);
754 }
755 if (Cond.empty()) {
756 report(msg: "MBB exits via conditional branch/branch but there's no "
757 "condition!", MBB);
758 }
759 } else {
760 report(msg: "analyzeBranch returned invalid data!", MBB);
761 }
762
763 // Now check that the successors match up with the answers reported by
764 // analyzeBranch.
765 if (TBB && !MBB->isSuccessor(MBB: TBB))
766 report(msg: "MBB exits via jump or conditional branch, but its target isn't a "
767 "CFG successor!",
768 MBB);
769 if (FBB && !MBB->isSuccessor(MBB: FBB))
770 report(msg: "MBB exits via conditional branch, but its target isn't a CFG "
771 "successor!",
772 MBB);
773
774 // There might be a fallthrough to the next block if there's either no
775 // unconditional true branch, or if there's a condition, and one of the
776 // branches is missing.
777 bool Fallthrough = !TBB || (!Cond.empty() && !FBB);
778
779 // A conditional fallthrough must be an actual CFG successor, not
780 // unreachable. (Conversely, an unconditional fallthrough might not really
781 // be a successor, because the block might end in unreachable.)
782 if (!Cond.empty() && !FBB) {
783 MachineFunction::const_iterator MBBI = std::next(x: MBB->getIterator());
784 if (MBBI == MF->end()) {
785 report(msg: "MBB conditionally falls through out of function!", MBB);
786 } else if (!MBB->isSuccessor(MBB: &*MBBI))
787 report(msg: "MBB exits via conditional branch/fall-through but the CFG "
788 "successors don't match the actual successors!",
789 MBB);
790 }
791
792 // Verify that there aren't any extra un-accounted-for successors.
793 for (const MachineBasicBlock *SuccMBB : MBB->successors()) {
794 // If this successor is one of the branch targets, it's okay.
795 if (SuccMBB == TBB || SuccMBB == FBB)
796 continue;
797 // If we might have a fallthrough, and the successor is the fallthrough
798 // block, that's also ok.
799 if (Fallthrough && SuccMBB == MBB->getNextNode())
800 continue;
801 // Also accept successors which are for exception-handling or might be
802 // inlineasm_br targets.
803 if (SuccMBB->isEHPad() || SuccMBB->isInlineAsmBrIndirectTarget())
804 continue;
805 report(msg: "MBB has unexpected successors which are not branch targets, "
806 "fallthrough, EHPads, or inlineasm_br targets.",
807 MBB);
808 }
809 }
810
811 regsLive.clear();
812 if (MRI->tracksLiveness()) {
813 for (const auto &LI : MBB->liveins()) {
814 if (!Register::isPhysicalRegister(Reg: LI.PhysReg)) {
815 report(msg: "MBB live-in list contains non-physical register", MBB);
816 continue;
817 }
818 for (const MCPhysReg &SubReg : TRI->subregs_inclusive(Reg: LI.PhysReg))
819 regsLive.insert(V: SubReg);
820 }
821 }
822
823 const MachineFrameInfo &MFI = MF->getFrameInfo();
824 BitVector PR = MFI.getPristineRegs(MF: *MF);
825 for (unsigned I : PR.set_bits()) {
826 for (const MCPhysReg &SubReg : TRI->subregs_inclusive(Reg: I))
827 regsLive.insert(V: SubReg);
828 }
829
830 regsKilled.clear();
831 regsDefined.clear();
832
833 if (Indexes)
834 lastIndex = Indexes->getMBBStartIdx(mbb: MBB);
835}
836
837// This function gets called for all bundle headers, including normal
838// stand-alone unbundled instructions.
839void MachineVerifier::visitMachineBundleBefore(const MachineInstr *MI) {
840 if (Indexes && Indexes->hasIndex(instr: *MI)) {
841 SlotIndex idx = Indexes->getInstructionIndex(MI: *MI);
842 if (!(idx > lastIndex)) {
843 report(msg: "Instruction index out of order", MI);
844 errs() << "Last instruction was at " << lastIndex << '\n';
845 }
846 lastIndex = idx;
847 }
848
849 // Ensure non-terminators don't follow terminators.
850 if (MI->isTerminator()) {
851 if (!FirstTerminator)
852 FirstTerminator = MI;
853 } else if (FirstTerminator) {
854 // For GlobalISel, G_INVOKE_REGION_START is a terminator that we allow to
855 // precede non-terminators.
856 if (FirstTerminator->getOpcode() != TargetOpcode::G_INVOKE_REGION_START) {
857 report(msg: "Non-terminator instruction after the first terminator", MI);
858 errs() << "First terminator was:\t" << *FirstTerminator;
859 }
860 }
861}
862
863// The operands on an INLINEASM instruction must follow a template.
864// Verify that the flag operands make sense.
865void MachineVerifier::verifyInlineAsm(const MachineInstr *MI) {
866 // The first two operands on INLINEASM are the asm string and global flags.
867 if (MI->getNumOperands() < 2) {
868 report(msg: "Too few operands on inline asm", MI);
869 return;
870 }
871 if (!MI->getOperand(i: 0).isSymbol())
872 report(msg: "Asm string must be an external symbol", MI);
873 if (!MI->getOperand(i: 1).isImm())
874 report(msg: "Asm flags must be an immediate", MI);
875 // Allowed flags are Extra_HasSideEffects = 1, Extra_IsAlignStack = 2,
876 // Extra_AsmDialect = 4, Extra_MayLoad = 8, and Extra_MayStore = 16,
877 // and Extra_IsConvergent = 32.
878 if (!isUInt<6>(x: MI->getOperand(i: 1).getImm()))
879 report(msg: "Unknown asm flags", MO: &MI->getOperand(i: 1), MONum: 1);
880
881 static_assert(InlineAsm::MIOp_FirstOperand == 2, "Asm format changed");
882
883 unsigned OpNo = InlineAsm::MIOp_FirstOperand;
884 unsigned NumOps;
885 for (unsigned e = MI->getNumOperands(); OpNo < e; OpNo += NumOps) {
886 const MachineOperand &MO = MI->getOperand(i: OpNo);
887 // There may be implicit ops after the fixed operands.
888 if (!MO.isImm())
889 break;
890 const InlineAsm::Flag F(MO.getImm());
891 NumOps = 1 + F.getNumOperandRegisters();
892 }
893
894 if (OpNo > MI->getNumOperands())
895 report(msg: "Missing operands in last group", MI);
896
897 // An optional MDNode follows the groups.
898 if (OpNo < MI->getNumOperands() && MI->getOperand(i: OpNo).isMetadata())
899 ++OpNo;
900
901 // All trailing operands must be implicit registers.
902 for (unsigned e = MI->getNumOperands(); OpNo < e; ++OpNo) {
903 const MachineOperand &MO = MI->getOperand(i: OpNo);
904 if (!MO.isReg() || !MO.isImplicit())
905 report(msg: "Expected implicit register after groups", MO: &MO, MONum: OpNo);
906 }
907
908 if (MI->getOpcode() == TargetOpcode::INLINEASM_BR) {
909 const MachineBasicBlock *MBB = MI->getParent();
910
911 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = MI->getNumOperands();
912 i != e; ++i) {
913 const MachineOperand &MO = MI->getOperand(i);
914
915 if (!MO.isMBB())
916 continue;
917
918 // Check the successor & predecessor lists look ok, assume they are
919 // not. Find the indirect target without going through the successors.
920 const MachineBasicBlock *IndirectTargetMBB = MO.getMBB();
921 if (!IndirectTargetMBB) {
922 report(msg: "INLINEASM_BR indirect target does not exist", MO: &MO, MONum: i);
923 break;
924 }
925
926 if (!MBB->isSuccessor(MBB: IndirectTargetMBB))
927 report(msg: "INLINEASM_BR indirect target missing from successor list", MO: &MO,
928 MONum: i);
929
930 if (!IndirectTargetMBB->isPredecessor(MBB))
931 report(msg: "INLINEASM_BR indirect target predecessor list missing parent",
932 MO: &MO, MONum: i);
933 }
934 }
935}
936
937bool MachineVerifier::verifyAllRegOpsScalar(const MachineInstr &MI,
938 const MachineRegisterInfo &MRI) {
939 if (none_of(Range: MI.explicit_operands(), P: [&MRI](const MachineOperand &Op) {
940 if (!Op.isReg())
941 return false;
942 const auto Reg = Op.getReg();
943 if (Reg.isPhysical())
944 return false;
945 return !MRI.getType(Reg).isScalar();
946 }))
947 return true;
948 report(msg: "All register operands must have scalar types", MI: &MI);
949 return false;
950}
951
952/// Check that types are consistent when two operands need to have the same
953/// number of vector elements.
954/// \return true if the types are valid.
955bool MachineVerifier::verifyVectorElementMatch(LLT Ty0, LLT Ty1,
956 const MachineInstr *MI) {
957 if (Ty0.isVector() != Ty1.isVector()) {
958 report(msg: "operand types must be all-vector or all-scalar", MI);
959 // Generally we try to report as many issues as possible at once, but in
960 // this case it's not clear what should we be comparing the size of the
961 // scalar with: the size of the whole vector or its lane. Instead of
962 // making an arbitrary choice and emitting not so helpful message, let's
963 // avoid the extra noise and stop here.
964 return false;
965 }
966
967 if (Ty0.isVector() && Ty0.getElementCount() != Ty1.getElementCount()) {
968 report(msg: "operand types must preserve number of vector elements", MI);
969 return false;
970 }
971
972 return true;
973}
974
975bool MachineVerifier::verifyGIntrinsicSideEffects(const MachineInstr *MI) {
976 auto Opcode = MI->getOpcode();
977 bool NoSideEffects = Opcode == TargetOpcode::G_INTRINSIC ||
978 Opcode == TargetOpcode::G_INTRINSIC_CONVERGENT;
979 unsigned IntrID = cast<GIntrinsic>(Val: MI)->getIntrinsicID();
980 if (IntrID != 0 && IntrID < Intrinsic::num_intrinsics) {
981 AttributeList Attrs = Intrinsic::getAttributes(
982 C&: MF->getFunction().getContext(), id: static_cast<Intrinsic::ID>(IntrID));
983 bool DeclHasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
984 if (NoSideEffects && DeclHasSideEffects) {
985 report(Msg: Twine(TII->getName(Opcode),
986 " used with intrinsic that accesses memory"),
987 MI);
988 return false;
989 }
990 if (!NoSideEffects && !DeclHasSideEffects) {
991 report(Msg: Twine(TII->getName(Opcode), " used with readnone intrinsic"), MI);
992 return false;
993 }
994 }
995
996 return true;
997}
998
999bool MachineVerifier::verifyGIntrinsicConvergence(const MachineInstr *MI) {
1000 auto Opcode = MI->getOpcode();
1001 bool NotConvergent = Opcode == TargetOpcode::G_INTRINSIC ||
1002 Opcode == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS;
1003 unsigned IntrID = cast<GIntrinsic>(Val: MI)->getIntrinsicID();
1004 if (IntrID != 0 && IntrID < Intrinsic::num_intrinsics) {
1005 AttributeList Attrs = Intrinsic::getAttributes(
1006 C&: MF->getFunction().getContext(), id: static_cast<Intrinsic::ID>(IntrID));
1007 bool DeclIsConvergent = Attrs.hasFnAttr(Attribute::Convergent);
1008 if (NotConvergent && DeclIsConvergent) {
1009 report(Msg: Twine(TII->getName(Opcode), " used with a convergent intrinsic"),
1010 MI);
1011 return false;
1012 }
1013 if (!NotConvergent && !DeclIsConvergent) {
1014 report(
1015 Msg: Twine(TII->getName(Opcode), " used with a non-convergent intrinsic"),
1016 MI);
1017 return false;
1018 }
1019 }
1020
1021 return true;
1022}
1023
1024void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
1025 if (isFunctionSelected)
1026 report(msg: "Unexpected generic instruction in a Selected function", MI);
1027
1028 const MCInstrDesc &MCID = MI->getDesc();
1029 unsigned NumOps = MI->getNumOperands();
1030
1031 // Branches must reference a basic block if they are not indirect
1032 if (MI->isBranch() && !MI->isIndirectBranch()) {
1033 bool HasMBB = false;
1034 for (const MachineOperand &Op : MI->operands()) {
1035 if (Op.isMBB()) {
1036 HasMBB = true;
1037 break;
1038 }
1039 }
1040
1041 if (!HasMBB) {
1042 report(msg: "Branch instruction is missing a basic block operand or "
1043 "isIndirectBranch property",
1044 MI);
1045 }
1046 }
1047
1048 // Check types.
1049 SmallVector<LLT, 4> Types;
1050 for (unsigned I = 0, E = std::min(a: MCID.getNumOperands(), b: NumOps);
1051 I != E; ++I) {
1052 if (!MCID.operands()[I].isGenericType())
1053 continue;
1054 // Generic instructions specify type equality constraints between some of
1055 // their operands. Make sure these are consistent.
1056 size_t TypeIdx = MCID.operands()[I].getGenericTypeIndex();
1057 Types.resize(N: std::max(a: TypeIdx + 1, b: Types.size()));
1058
1059 const MachineOperand *MO = &MI->getOperand(i: I);
1060 if (!MO->isReg()) {
1061 report(msg: "generic instruction must use register operands", MI);
1062 continue;
1063 }
1064
1065 LLT OpTy = MRI->getType(Reg: MO->getReg());
1066 // Don't report a type mismatch if there is no actual mismatch, only a
1067 // type missing, to reduce noise:
1068 if (OpTy.isValid()) {
1069 // Only the first valid type for a type index will be printed: don't
1070 // overwrite it later so it's always clear which type was expected:
1071 if (!Types[TypeIdx].isValid())
1072 Types[TypeIdx] = OpTy;
1073 else if (Types[TypeIdx] != OpTy)
1074 report(msg: "Type mismatch in generic instruction", MO, MONum: I, MOVRegType: OpTy);
1075 } else {
1076 // Generic instructions must have types attached to their operands.
1077 report(msg: "Generic instruction is missing a virtual register type", MO, MONum: I);
1078 }
1079 }
1080
1081 // Generic opcodes must not have physical register operands.
1082 for (unsigned I = 0; I < MI->getNumOperands(); ++I) {
1083 const MachineOperand *MO = &MI->getOperand(i: I);
1084 if (MO->isReg() && MO->getReg().isPhysical())
1085 report(msg: "Generic instruction cannot have physical register", MO, MONum: I);
1086 }
1087
1088 // Avoid out of bounds in checks below. This was already reported earlier.
1089 if (MI->getNumOperands() < MCID.getNumOperands())
1090 return;
1091
1092 StringRef ErrorInfo;
1093 if (!TII->verifyInstruction(MI: *MI, ErrInfo&: ErrorInfo))
1094 report(msg: ErrorInfo.data(), MI);
1095
1096 // Verify properties of various specific instruction types
1097 unsigned Opc = MI->getOpcode();
1098 switch (Opc) {
1099 case TargetOpcode::G_ASSERT_SEXT:
1100 case TargetOpcode::G_ASSERT_ZEXT: {
1101 std::string OpcName =
1102 Opc == TargetOpcode::G_ASSERT_ZEXT ? "G_ASSERT_ZEXT" : "G_ASSERT_SEXT";
1103 if (!MI->getOperand(i: 2).isImm()) {
1104 report(Msg: Twine(OpcName, " expects an immediate operand #2"), MI);
1105 break;
1106 }
1107
1108 Register Dst = MI->getOperand(i: 0).getReg();
1109 Register Src = MI->getOperand(i: 1).getReg();
1110 LLT SrcTy = MRI->getType(Reg: Src);
1111 int64_t Imm = MI->getOperand(i: 2).getImm();
1112 if (Imm <= 0) {
1113 report(Msg: Twine(OpcName, " size must be >= 1"), MI);
1114 break;
1115 }
1116
1117 if (Imm >= SrcTy.getScalarSizeInBits()) {
1118 report(Msg: Twine(OpcName, " size must be less than source bit width"), MI);
1119 break;
1120 }
1121
1122 const RegisterBank *SrcRB = RBI->getRegBank(Reg: Src, MRI: *MRI, TRI: *TRI);
1123 const RegisterBank *DstRB = RBI->getRegBank(Reg: Dst, MRI: *MRI, TRI: *TRI);
1124
1125 // Allow only the source bank to be set.
1126 if ((SrcRB && DstRB && SrcRB != DstRB) || (DstRB && !SrcRB)) {
1127 report(Msg: Twine(OpcName, " cannot change register bank"), MI);
1128 break;
1129 }
1130
1131 // Don't allow a class change. Do allow member class->regbank.
1132 const TargetRegisterClass *DstRC = MRI->getRegClassOrNull(Reg: Dst);
1133 if (DstRC && DstRC != MRI->getRegClassOrNull(Reg: Src)) {
1134 report(
1135 Msg: Twine(OpcName, " source and destination register classes must match"),
1136 MI);
1137 break;
1138 }
1139
1140 break;
1141 }
1142
1143 case TargetOpcode::G_CONSTANT:
1144 case TargetOpcode::G_FCONSTANT: {
1145 LLT DstTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
1146 if (DstTy.isVector())
1147 report(msg: "Instruction cannot use a vector result type", MI);
1148
1149 if (MI->getOpcode() == TargetOpcode::G_CONSTANT) {
1150 if (!MI->getOperand(i: 1).isCImm()) {
1151 report(msg: "G_CONSTANT operand must be cimm", MI);
1152 break;
1153 }
1154
1155 const ConstantInt *CI = MI->getOperand(i: 1).getCImm();
1156 if (CI->getBitWidth() != DstTy.getSizeInBits())
1157 report(msg: "inconsistent constant size", MI);
1158 } else {
1159 if (!MI->getOperand(i: 1).isFPImm()) {
1160 report(msg: "G_FCONSTANT operand must be fpimm", MI);
1161 break;
1162 }
1163 const ConstantFP *CF = MI->getOperand(i: 1).getFPImm();
1164
1165 if (APFloat::getSizeInBits(Sem: CF->getValueAPF().getSemantics()) !=
1166 DstTy.getSizeInBits()) {
1167 report(msg: "inconsistent constant size", MI);
1168 }
1169 }
1170
1171 break;
1172 }
1173 case TargetOpcode::G_LOAD:
1174 case TargetOpcode::G_STORE:
1175 case TargetOpcode::G_ZEXTLOAD:
1176 case TargetOpcode::G_SEXTLOAD: {
1177 LLT ValTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
1178 LLT PtrTy = MRI->getType(Reg: MI->getOperand(i: 1).getReg());
1179 if (!PtrTy.isPointer())
1180 report(msg: "Generic memory instruction must access a pointer", MI);
1181
1182 // Generic loads and stores must have a single MachineMemOperand
1183 // describing that access.
1184 if (!MI->hasOneMemOperand()) {
1185 report(msg: "Generic instruction accessing memory must have one mem operand",
1186 MI);
1187 } else {
1188 const MachineMemOperand &MMO = **MI->memoperands_begin();
1189 if (MI->getOpcode() == TargetOpcode::G_ZEXTLOAD ||
1190 MI->getOpcode() == TargetOpcode::G_SEXTLOAD) {
1191 if (MMO.getSizeInBits() >= ValTy.getSizeInBits())
1192 report(msg: "Generic extload must have a narrower memory type", MI);
1193 } else if (MI->getOpcode() == TargetOpcode::G_LOAD) {
1194 if (MMO.getSize() > ValTy.getSizeInBytes())
1195 report(msg: "load memory size cannot exceed result size", MI);
1196 } else if (MI->getOpcode() == TargetOpcode::G_STORE) {
1197 if (ValTy.getSizeInBytes() < MMO.getSize())
1198 report(msg: "store memory size cannot exceed value size", MI);
1199 }
1200
1201 const AtomicOrdering Order = MMO.getSuccessOrdering();
1202 if (Opc == TargetOpcode::G_STORE) {
1203 if (Order == AtomicOrdering::Acquire ||
1204 Order == AtomicOrdering::AcquireRelease)
1205 report(msg: "atomic store cannot use acquire ordering", MI);
1206
1207 } else {
1208 if (Order == AtomicOrdering::Release ||
1209 Order == AtomicOrdering::AcquireRelease)
1210 report(msg: "atomic load cannot use release ordering", MI);
1211 }
1212 }
1213
1214 break;
1215 }
1216 case TargetOpcode::G_PHI: {
1217 LLT DstTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
1218 if (!DstTy.isValid() || !all_of(Range: drop_begin(RangeOrContainer: MI->operands()),
1219 P: [this, &DstTy](const MachineOperand &MO) {
1220 if (!MO.isReg())
1221 return true;
1222 LLT Ty = MRI->getType(Reg: MO.getReg());
1223 if (!Ty.isValid() || (Ty != DstTy))
1224 return false;
1225 return true;
1226 }))
1227 report(msg: "Generic Instruction G_PHI has operands with incompatible/missing "
1228 "types",
1229 MI);
1230 break;
1231 }
1232 case TargetOpcode::G_BITCAST: {
1233 LLT DstTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
1234 LLT SrcTy = MRI->getType(Reg: MI->getOperand(i: 1).getReg());
1235 if (!DstTy.isValid() || !SrcTy.isValid())
1236 break;
1237
1238 if (SrcTy.isPointer() != DstTy.isPointer())
1239 report(msg: "bitcast cannot convert between pointers and other types", MI);
1240
1241 if (SrcTy.getSizeInBits() != DstTy.getSizeInBits())
1242 report(msg: "bitcast sizes must match", MI);
1243
1244 if (SrcTy == DstTy)
1245 report(msg: "bitcast must change the type", MI);
1246
1247 break;
1248 }
1249 case TargetOpcode::G_INTTOPTR:
1250 case TargetOpcode::G_PTRTOINT:
1251 case TargetOpcode::G_ADDRSPACE_CAST: {
1252 LLT DstTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
1253 LLT SrcTy = MRI->getType(Reg: MI->getOperand(i: 1).getReg());
1254 if (!DstTy.isValid() || !SrcTy.isValid())
1255 break;
1256
1257 verifyVectorElementMatch(Ty0: DstTy, Ty1: SrcTy, MI);
1258
1259 DstTy = DstTy.getScalarType();
1260 SrcTy = SrcTy.getScalarType();
1261
1262 if (MI->getOpcode() == TargetOpcode::G_INTTOPTR) {
1263 if (!DstTy.isPointer())
1264 report(msg: "inttoptr result type must be a pointer", MI);
1265 if (SrcTy.isPointer())
1266 report(msg: "inttoptr source type must not be a pointer", MI);
1267 } else if (MI->getOpcode() == TargetOpcode::G_PTRTOINT) {
1268 if (!SrcTy.isPointer())
1269 report(msg: "ptrtoint source type must be a pointer", MI);
1270 if (DstTy.isPointer())
1271 report(msg: "ptrtoint result type must not be a pointer", MI);
1272 } else {
1273 assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST);
1274 if (!SrcTy.isPointer() || !DstTy.isPointer())
1275 report(msg: "addrspacecast types must be pointers", MI);
1276 else {
1277 if (SrcTy.getAddressSpace() == DstTy.getAddressSpace())
1278 report(msg: "addrspacecast must convert different address spaces", MI);
1279 }
1280 }
1281
1282 break;
1283 }
1284 case TargetOpcode::G_PTR_ADD: {
1285 LLT DstTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
1286 LLT PtrTy = MRI->getType(Reg: MI->getOperand(i: 1).getReg());
1287 LLT OffsetTy = MRI->getType(Reg: MI->getOperand(i: 2).getReg());
1288 if (!DstTy.isValid() || !PtrTy.isValid() || !OffsetTy.isValid())
1289 break;
1290
1291 if (!PtrTy.isPointerOrPointerVector())
1292 report(msg: "gep first operand must be a pointer", MI);
1293
1294 if (OffsetTy.isPointerOrPointerVector())
1295 report(msg: "gep offset operand must not be a pointer", MI);
1296
1297 // TODO: Is the offset allowed to be a scalar with a vector?
1298 break;
1299 }
1300 case TargetOpcode::G_PTRMASK: {
1301 LLT DstTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
1302 LLT SrcTy = MRI->getType(Reg: MI->getOperand(i: 1).getReg());
1303 LLT MaskTy = MRI->getType(Reg: MI->getOperand(i: 2).getReg());
1304 if (!DstTy.isValid() || !SrcTy.isValid() || !MaskTy.isValid())
1305 break;
1306
1307 if (!DstTy.isPointerOrPointerVector())
1308 report(msg: "ptrmask result type must be a pointer", MI);
1309
1310 if (!MaskTy.getScalarType().isScalar())
1311 report(msg: "ptrmask mask type must be an integer", MI);
1312
1313 verifyVectorElementMatch(Ty0: DstTy, Ty1: MaskTy, MI);
1314 break;
1315 }
1316 case TargetOpcode::G_SEXT:
1317 case TargetOpcode::G_ZEXT:
1318 case TargetOpcode::G_ANYEXT:
1319 case TargetOpcode::G_TRUNC:
1320 case TargetOpcode::G_FPEXT:
1321 case TargetOpcode::G_FPTRUNC: {
1322 // Number of operands and presense of types is already checked (and
1323 // reported in case of any issues), so no need to report them again. As
1324 // we're trying to report as many issues as possible at once, however, the
1325 // instructions aren't guaranteed to have the right number of operands or
1326 // types attached to them at this point
1327 assert(MCID.getNumOperands() == 2 && "Expected 2 operands G_*{EXT,TRUNC}");
1328 LLT DstTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
1329 LLT SrcTy = MRI->getType(Reg: MI->getOperand(i: 1).getReg());
1330 if (!DstTy.isValid() || !SrcTy.isValid())
1331 break;
1332
1333 if (DstTy.isPointerOrPointerVector() || SrcTy.isPointerOrPointerVector())
1334 report(msg: "Generic extend/truncate can not operate on pointers", MI);
1335
1336 verifyVectorElementMatch(Ty0: DstTy, Ty1: SrcTy, MI);
1337
1338 unsigned DstSize = DstTy.getScalarSizeInBits();
1339 unsigned SrcSize = SrcTy.getScalarSizeInBits();
1340 switch (MI->getOpcode()) {
1341 default:
1342 if (DstSize <= SrcSize)
1343 report(msg: "Generic extend has destination type no larger than source", MI);
1344 break;
1345 case TargetOpcode::G_TRUNC:
1346 case TargetOpcode::G_FPTRUNC:
1347 if (DstSize >= SrcSize)
1348 report(msg: "Generic truncate has destination type no smaller than source",
1349 MI);
1350 break;
1351 }
1352 break;
1353 }
1354 case TargetOpcode::G_SELECT: {
1355 LLT SelTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
1356 LLT CondTy = MRI->getType(Reg: MI->getOperand(i: 1).getReg());
1357 if (!SelTy.isValid() || !CondTy.isValid())
1358 break;
1359
1360 // Scalar condition select on a vector is valid.
1361 if (CondTy.isVector())
1362 verifyVectorElementMatch(Ty0: SelTy, Ty1: CondTy, MI);
1363 break;
1364 }
1365 case TargetOpcode::G_MERGE_VALUES: {
1366 // G_MERGE_VALUES should only be used to merge scalars into a larger scalar,
1367 // e.g. s2N = MERGE sN, sN
1368 // Merging multiple scalars into a vector is not allowed, should use
1369 // G_BUILD_VECTOR for that.
1370 LLT DstTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
1371 LLT SrcTy = MRI->getType(Reg: MI->getOperand(i: 1).getReg());
1372 if (DstTy.isVector() || SrcTy.isVector())
1373 report(msg: "G_MERGE_VALUES cannot operate on vectors", MI);
1374
1375 const unsigned NumOps = MI->getNumOperands();
1376 if (DstTy.getSizeInBits() != SrcTy.getSizeInBits() * (NumOps - 1))
1377 report(msg: "G_MERGE_VALUES result size is inconsistent", MI);
1378
1379 for (unsigned I = 2; I != NumOps; ++I) {
1380 if (MRI->getType(Reg: MI->getOperand(i: I).getReg()) != SrcTy)
1381 report(msg: "G_MERGE_VALUES source types do not match", MI);
1382 }
1383
1384 break;
1385 }
1386 case TargetOpcode::G_UNMERGE_VALUES: {
1387 unsigned NumDsts = MI->getNumOperands() - 1;
1388 LLT DstTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
1389 for (unsigned i = 1; i < NumDsts; ++i) {
1390 if (MRI->getType(Reg: MI->getOperand(i).getReg()) != DstTy) {
1391 report(msg: "G_UNMERGE_VALUES destination types do not match", MI);
1392 break;
1393 }
1394 }
1395
1396 LLT SrcTy = MRI->getType(Reg: MI->getOperand(i: NumDsts).getReg());
1397 if (DstTy.isVector()) {
1398 // This case is the converse of G_CONCAT_VECTORS.
1399 if (!SrcTy.isVector() || SrcTy.getScalarType() != DstTy.getScalarType() ||
1400 SrcTy.isScalableVector() != DstTy.isScalableVector() ||
1401 SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits())
1402 report(msg: "G_UNMERGE_VALUES source operand does not match vector "
1403 "destination operands",
1404 MI);
1405 } else if (SrcTy.isVector()) {
1406 // This case is the converse of G_BUILD_VECTOR, but relaxed to allow
1407 // mismatched types as long as the total size matches:
1408 // %0:_(s64), %1:_(s64) = G_UNMERGE_VALUES %2:_(<4 x s32>)
1409 if (SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits())
1410 report(msg: "G_UNMERGE_VALUES vector source operand does not match scalar "
1411 "destination operands",
1412 MI);
1413 } else {
1414 // This case is the converse of G_MERGE_VALUES.
1415 if (SrcTy.getSizeInBits() != NumDsts * DstTy.getSizeInBits()) {
1416 report(msg: "G_UNMERGE_VALUES scalar source operand does not match scalar "
1417 "destination operands",
1418 MI);
1419 }
1420 }
1421 break;
1422 }
1423 case TargetOpcode::G_BUILD_VECTOR: {
1424 // Source types must be scalars, dest type a vector. Total size of scalars
1425 // must match the dest vector size.
1426 LLT DstTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
1427 LLT SrcEltTy = MRI->getType(Reg: MI->getOperand(i: 1).getReg());
1428 if (!DstTy.isVector() || SrcEltTy.isVector()) {
1429 report(msg: "G_BUILD_VECTOR must produce a vector from scalar operands", MI);
1430 break;
1431 }
1432
1433 if (DstTy.getElementType() != SrcEltTy)
1434 report(msg: "G_BUILD_VECTOR result element type must match source type", MI);
1435
1436 if (DstTy.getNumElements() != MI->getNumOperands() - 1)
1437 report(msg: "G_BUILD_VECTOR must have an operand for each elemement", MI);
1438
1439 for (const MachineOperand &MO : llvm::drop_begin(RangeOrContainer: MI->operands(), N: 2))
1440 if (MRI->getType(Reg: MI->getOperand(i: 1).getReg()) != MRI->getType(Reg: MO.getReg()))
1441 report(msg: "G_BUILD_VECTOR source operand types are not homogeneous", MI);
1442
1443 break;
1444 }
1445 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1446 // Source types must be scalars, dest type a vector. Scalar types must be
1447 // larger than the dest vector elt type, as this is a truncating operation.
1448 LLT DstTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
1449 LLT SrcEltTy = MRI->getType(Reg: MI->getOperand(i: 1).getReg());
1450 if (!DstTy.isVector() || SrcEltTy.isVector())
1451 report(msg: "G_BUILD_VECTOR_TRUNC must produce a vector from scalar operands",
1452 MI);
1453 for (const MachineOperand &MO : llvm::drop_begin(RangeOrContainer: MI->operands(), N: 2))
1454 if (MRI->getType(Reg: MI->getOperand(i: 1).getReg()) != MRI->getType(Reg: MO.getReg()))
1455 report(msg: "G_BUILD_VECTOR_TRUNC source operand types are not homogeneous",
1456 MI);
1457 if (SrcEltTy.getSizeInBits() <= DstTy.getElementType().getSizeInBits())
1458 report(msg: "G_BUILD_VECTOR_TRUNC source operand types are not larger than "
1459 "dest elt type",
1460 MI);
1461 break;
1462 }
1463 case TargetOpcode::G_CONCAT_VECTORS: {
1464 // Source types should be vectors, and total size should match the dest
1465 // vector size.
1466 LLT DstTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
1467 LLT SrcTy = MRI->getType(Reg: MI->getOperand(i: 1).getReg());
1468 if (!DstTy.isVector() || !SrcTy.isVector())
1469 report(msg: "G_CONCAT_VECTOR requires vector source and destination operands",
1470 MI);
1471
1472 if (MI->getNumOperands() < 3)
1473 report(msg: "G_CONCAT_VECTOR requires at least 2 source operands", MI);
1474
1475 for (const MachineOperand &MO : llvm::drop_begin(RangeOrContainer: MI->operands(), N: 2))
1476 if (MRI->getType(Reg: MI->getOperand(i: 1).getReg()) != MRI->getType(Reg: MO.getReg()))
1477 report(msg: "G_CONCAT_VECTOR source operand types are not homogeneous", MI);
1478 if (DstTy.getElementCount() !=
1479 SrcTy.getElementCount() * (MI->getNumOperands() - 1))
1480 report(msg: "G_CONCAT_VECTOR num dest and source elements should match", MI);
1481 break;
1482 }
1483 case TargetOpcode::G_ICMP:
1484 case TargetOpcode::G_FCMP: {
1485 LLT DstTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
1486 LLT SrcTy = MRI->getType(Reg: MI->getOperand(i: 2).getReg());
1487
1488 if ((DstTy.isVector() != SrcTy.isVector()) ||
1489 (DstTy.isVector() && DstTy.getNumElements() != SrcTy.getNumElements()))
1490 report(msg: "Generic vector icmp/fcmp must preserve number of lanes", MI);
1491
1492 break;
1493 }
1494 case TargetOpcode::G_EXTRACT: {
1495 const MachineOperand &SrcOp = MI->getOperand(i: 1);
1496 if (!SrcOp.isReg()) {
1497 report(msg: "extract source must be a register", MI);
1498 break;
1499 }
1500
1501 const MachineOperand &OffsetOp = MI->getOperand(i: 2);
1502 if (!OffsetOp.isImm()) {
1503 report(msg: "extract offset must be a constant", MI);
1504 break;
1505 }
1506
1507 unsigned DstSize = MRI->getType(Reg: MI->getOperand(i: 0).getReg()).getSizeInBits();
1508 unsigned SrcSize = MRI->getType(Reg: SrcOp.getReg()).getSizeInBits();
1509 if (SrcSize == DstSize)
1510 report(msg: "extract source must be larger than result", MI);
1511
1512 if (DstSize + OffsetOp.getImm() > SrcSize)
1513 report(msg: "extract reads past end of register", MI);
1514 break;
1515 }
1516 case TargetOpcode::G_INSERT: {
1517 const MachineOperand &SrcOp = MI->getOperand(i: 2);
1518 if (!SrcOp.isReg()) {
1519 report(msg: "insert source must be a register", MI);
1520 break;
1521 }
1522
1523 const MachineOperand &OffsetOp = MI->getOperand(i: 3);
1524 if (!OffsetOp.isImm()) {
1525 report(msg: "insert offset must be a constant", MI);
1526 break;
1527 }
1528
1529 unsigned DstSize = MRI->getType(Reg: MI->getOperand(i: 0).getReg()).getSizeInBits();
1530 unsigned SrcSize = MRI->getType(Reg: SrcOp.getReg()).getSizeInBits();
1531
1532 if (DstSize <= SrcSize)
1533 report(msg: "inserted size must be smaller than total register", MI);
1534
1535 if (SrcSize + OffsetOp.getImm() > DstSize)
1536 report(msg: "insert writes past end of register", MI);
1537
1538 break;
1539 }
1540 case TargetOpcode::G_JUMP_TABLE: {
1541 if (!MI->getOperand(i: 1).isJTI())
1542 report(msg: "G_JUMP_TABLE source operand must be a jump table index", MI);
1543 LLT DstTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
1544 if (!DstTy.isPointer())
1545 report(msg: "G_JUMP_TABLE dest operand must have a pointer type", MI);
1546 break;
1547 }
1548 case TargetOpcode::G_BRJT: {
1549 if (!MRI->getType(Reg: MI->getOperand(i: 0).getReg()).isPointer())
1550 report(msg: "G_BRJT src operand 0 must be a pointer type", MI);
1551
1552 if (!MI->getOperand(i: 1).isJTI())
1553 report(msg: "G_BRJT src operand 1 must be a jump table index", MI);
1554
1555 const auto &IdxOp = MI->getOperand(i: 2);
1556 if (!IdxOp.isReg() || MRI->getType(Reg: IdxOp.getReg()).isPointer())
1557 report(msg: "G_BRJT src operand 2 must be a scalar reg type", MI);
1558 break;
1559 }
1560 case TargetOpcode::G_INTRINSIC:
1561 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1562 case TargetOpcode::G_INTRINSIC_CONVERGENT:
1563 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS: {
1564 // TODO: Should verify number of def and use operands, but the current
1565 // interface requires passing in IR types for mangling.
1566 const MachineOperand &IntrIDOp = MI->getOperand(i: MI->getNumExplicitDefs());
1567 if (!IntrIDOp.isIntrinsicID()) {
1568 report(msg: "G_INTRINSIC first src operand must be an intrinsic ID", MI);
1569 break;
1570 }
1571
1572 if (!verifyGIntrinsicSideEffects(MI))
1573 break;
1574 if (!verifyGIntrinsicConvergence(MI))
1575 break;
1576
1577 break;
1578 }
1579 case TargetOpcode::G_SEXT_INREG: {
1580 if (!MI->getOperand(i: 2).isImm()) {
1581 report(msg: "G_SEXT_INREG expects an immediate operand #2", MI);
1582 break;
1583 }
1584
1585 LLT SrcTy = MRI->getType(Reg: MI->getOperand(i: 1).getReg());
1586 int64_t Imm = MI->getOperand(i: 2).getImm();
1587 if (Imm <= 0)
1588 report(msg: "G_SEXT_INREG size must be >= 1", MI);
1589 if (Imm >= SrcTy.getScalarSizeInBits())
1590 report(msg: "G_SEXT_INREG size must be less than source bit width", MI);
1591 break;
1592 }
1593 case TargetOpcode::G_BSWAP: {
1594 LLT DstTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
1595 if (DstTy.getScalarSizeInBits() % 16 != 0)
1596 report(msg: "G_BSWAP size must be a multiple of 16 bits", MI);
1597 break;
1598 }
1599 case TargetOpcode::G_SHUFFLE_VECTOR: {
1600 const MachineOperand &MaskOp = MI->getOperand(i: 3);
1601 if (!MaskOp.isShuffleMask()) {
1602 report(msg: "Incorrect mask operand type for G_SHUFFLE_VECTOR", MI);
1603 break;
1604 }
1605
1606 LLT DstTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
1607 LLT Src0Ty = MRI->getType(Reg: MI->getOperand(i: 1).getReg());
1608 LLT Src1Ty = MRI->getType(Reg: MI->getOperand(i: 2).getReg());
1609
1610 if (Src0Ty != Src1Ty)
1611 report(msg: "Source operands must be the same type", MI);
1612
1613 if (Src0Ty.getScalarType() != DstTy.getScalarType())
1614 report(msg: "G_SHUFFLE_VECTOR cannot change element type", MI);
1615
1616 // Don't check that all operands are vector because scalars are used in
1617 // place of 1 element vectors.
1618 int SrcNumElts = Src0Ty.isVector() ? Src0Ty.getNumElements() : 1;
1619 int DstNumElts = DstTy.isVector() ? DstTy.getNumElements() : 1;
1620
1621 ArrayRef<int> MaskIdxes = MaskOp.getShuffleMask();
1622
1623 if (static_cast<int>(MaskIdxes.size()) != DstNumElts)
1624 report(msg: "Wrong result type for shufflemask", MI);
1625
1626 for (int Idx : MaskIdxes) {
1627 if (Idx < 0)
1628 continue;
1629
1630 if (Idx >= 2 * SrcNumElts)
1631 report(msg: "Out of bounds shuffle index", MI);
1632 }
1633
1634 break;
1635 }
1636 case TargetOpcode::G_DYN_STACKALLOC: {
1637 const MachineOperand &DstOp = MI->getOperand(i: 0);
1638 const MachineOperand &AllocOp = MI->getOperand(i: 1);
1639 const MachineOperand &AlignOp = MI->getOperand(i: 2);
1640
1641 if (!DstOp.isReg() || !MRI->getType(Reg: DstOp.getReg()).isPointer()) {
1642 report(msg: "dst operand 0 must be a pointer type", MI);
1643 break;
1644 }
1645
1646 if (!AllocOp.isReg() || !MRI->getType(Reg: AllocOp.getReg()).isScalar()) {
1647 report(msg: "src operand 1 must be a scalar reg type", MI);
1648 break;
1649 }
1650
1651 if (!AlignOp.isImm()) {
1652 report(msg: "src operand 2 must be an immediate type", MI);
1653 break;
1654 }
1655 break;
1656 }
1657 case TargetOpcode::G_MEMCPY_INLINE:
1658 case TargetOpcode::G_MEMCPY:
1659 case TargetOpcode::G_MEMMOVE: {
1660 ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
1661 if (MMOs.size() != 2) {
1662 report(msg: "memcpy/memmove must have 2 memory operands", MI);
1663 break;
1664 }
1665
1666 if ((!MMOs[0]->isStore() || MMOs[0]->isLoad()) ||
1667 (MMOs[1]->isStore() || !MMOs[1]->isLoad())) {
1668 report(msg: "wrong memory operand types", MI);
1669 break;
1670 }
1671
1672 if (MMOs[0]->getSize() != MMOs[1]->getSize())
1673 report(msg: "inconsistent memory operand sizes", MI);
1674
1675 LLT DstPtrTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
1676 LLT SrcPtrTy = MRI->getType(Reg: MI->getOperand(i: 1).getReg());
1677
1678 if (!DstPtrTy.isPointer() || !SrcPtrTy.isPointer()) {
1679 report(msg: "memory instruction operand must be a pointer", MI);
1680 break;
1681 }
1682
1683 if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
1684 report(msg: "inconsistent store address space", MI);
1685 if (SrcPtrTy.getAddressSpace() != MMOs[1]->getAddrSpace())
1686 report(msg: "inconsistent load address space", MI);
1687
1688 if (Opc != TargetOpcode::G_MEMCPY_INLINE)
1689 if (!MI->getOperand(i: 3).isImm() || (MI->getOperand(i: 3).getImm() & ~1LL))
1690 report(msg: "'tail' flag (operand 3) must be an immediate 0 or 1", MI);
1691
1692 break;
1693 }
1694 case TargetOpcode::G_BZERO:
1695 case TargetOpcode::G_MEMSET: {
1696 ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
1697 std::string Name = Opc == TargetOpcode::G_MEMSET ? "memset" : "bzero";
1698 if (MMOs.size() != 1) {
1699 report(Msg: Twine(Name, " must have 1 memory operand"), MI);
1700 break;
1701 }
1702
1703 if ((!MMOs[0]->isStore() || MMOs[0]->isLoad())) {
1704 report(Msg: Twine(Name, " memory operand must be a store"), MI);
1705 break;
1706 }
1707
1708 LLT DstPtrTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
1709 if (!DstPtrTy.isPointer()) {
1710 report(Msg: Twine(Name, " operand must be a pointer"), MI);
1711 break;
1712 }
1713
1714 if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
1715 report(Msg: "inconsistent " + Twine(Name, " address space"), MI);
1716
1717 if (!MI->getOperand(i: MI->getNumOperands() - 1).isImm() ||
1718 (MI->getOperand(i: MI->getNumOperands() - 1).getImm() & ~1LL))
1719 report(msg: "'tail' flag (last operand) must be an immediate 0 or 1", MI);
1720
1721 break;
1722 }
1723 case TargetOpcode::G_VECREDUCE_SEQ_FADD:
1724 case TargetOpcode::G_VECREDUCE_SEQ_FMUL: {
1725 LLT DstTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
1726 LLT Src1Ty = MRI->getType(Reg: MI->getOperand(i: 1).getReg());
1727 LLT Src2Ty = MRI->getType(Reg: MI->getOperand(i: 2).getReg());
1728 if (!DstTy.isScalar())
1729 report(msg: "Vector reduction requires a scalar destination type", MI);
1730 if (!Src1Ty.isScalar())
1731 report(msg: "Sequential FADD/FMUL vector reduction requires a scalar 1st operand", MI);
1732 if (!Src2Ty.isVector())
1733 report(msg: "Sequential FADD/FMUL vector reduction must have a vector 2nd operand", MI);
1734 break;
1735 }
1736 case TargetOpcode::G_VECREDUCE_FADD:
1737 case TargetOpcode::G_VECREDUCE_FMUL:
1738 case TargetOpcode::G_VECREDUCE_FMAX:
1739 case TargetOpcode::G_VECREDUCE_FMIN:
1740 case TargetOpcode::G_VECREDUCE_FMAXIMUM:
1741 case TargetOpcode::G_VECREDUCE_FMINIMUM:
1742 case TargetOpcode::G_VECREDUCE_ADD:
1743 case TargetOpcode::G_VECREDUCE_MUL:
1744 case TargetOpcode::G_VECREDUCE_AND:
1745 case TargetOpcode::G_VECREDUCE_OR:
1746 case TargetOpcode::G_VECREDUCE_XOR:
1747 case TargetOpcode::G_VECREDUCE_SMAX:
1748 case TargetOpcode::G_VECREDUCE_SMIN:
1749 case TargetOpcode::G_VECREDUCE_UMAX:
1750 case TargetOpcode::G_VECREDUCE_UMIN: {
1751 LLT DstTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
1752 if (!DstTy.isScalar())
1753 report(msg: "Vector reduction requires a scalar destination type", MI);
1754 break;
1755 }
1756
1757 case TargetOpcode::G_SBFX:
1758 case TargetOpcode::G_UBFX: {
1759 LLT DstTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
1760 if (DstTy.isVector()) {
1761 report(msg: "Bitfield extraction is not supported on vectors", MI);
1762 break;
1763 }
1764 break;
1765 }
1766 case TargetOpcode::G_SHL:
1767 case TargetOpcode::G_LSHR:
1768 case TargetOpcode::G_ASHR:
1769 case TargetOpcode::G_ROTR:
1770 case TargetOpcode::G_ROTL: {
1771 LLT Src1Ty = MRI->getType(Reg: MI->getOperand(i: 1).getReg());
1772 LLT Src2Ty = MRI->getType(Reg: MI->getOperand(i: 2).getReg());
1773 if (Src1Ty.isVector() != Src2Ty.isVector()) {
1774 report(msg: "Shifts and rotates require operands to be either all scalars or "
1775 "all vectors",
1776 MI);
1777 break;
1778 }
1779 break;
1780 }
1781 case TargetOpcode::G_LLROUND:
1782 case TargetOpcode::G_LROUND: {
1783 verifyAllRegOpsScalar(MI: *MI, MRI: *MRI);
1784 break;
1785 }
1786 case TargetOpcode::G_IS_FPCLASS: {
1787 LLT DestTy = MRI->getType(Reg: MI->getOperand(i: 0).getReg());
1788 LLT DestEltTy = DestTy.getScalarType();
1789 if (!DestEltTy.isScalar()) {
1790 report(msg: "Destination must be a scalar or vector of scalars", MI);
1791 break;
1792 }
1793 LLT SrcTy = MRI->getType(Reg: MI->getOperand(i: 1).getReg());
1794 LLT SrcEltTy = SrcTy.getScalarType();
1795 if (!SrcEltTy.isScalar()) {
1796 report(msg: "Source must be a scalar or vector of scalars", MI);
1797 break;
1798 }
1799 if (!verifyVectorElementMatch(Ty0: DestTy, Ty1: SrcTy, MI))
1800 break;
1801 const MachineOperand &TestMO = MI->getOperand(i: 2);
1802 if (!TestMO.isImm()) {
1803 report(msg: "floating-point class set (operand 2) must be an immediate", MI);
1804 break;
1805 }
1806 int64_t Test = TestMO.getImm();
1807 if (Test < 0 || Test > fcAllFlags) {
1808 report(msg: "Incorrect floating-point class set (operand 2)", MI);
1809 break;
1810 }
1811 break;
1812 }
1813 case TargetOpcode::G_PREFETCH: {
1814 const MachineOperand &AddrOp = MI->getOperand(i: 0);
1815 if (!AddrOp.isReg() || !MRI->getType(Reg: AddrOp.getReg()).isPointer()) {
1816 report(msg: "addr operand must be a pointer", MO: &AddrOp, MONum: 0);
1817 break;
1818 }
1819 const MachineOperand &RWOp = MI->getOperand(i: 1);
1820 if (!RWOp.isImm() || (uint64_t)RWOp.getImm() >= 2) {
1821 report(msg: "rw operand must be an immediate 0-1", MO: &RWOp, MONum: 1);
1822 break;
1823 }
1824 const MachineOperand &LocalityOp = MI->getOperand(i: 2);
1825 if (!LocalityOp.isImm() || (uint64_t)LocalityOp.getImm() >= 4) {
1826 report(msg: "locality operand must be an immediate 0-3", MO: &LocalityOp, MONum: 2);
1827 break;
1828 }
1829 const MachineOperand &CacheTypeOp = MI->getOperand(i: 3);
1830 if (!CacheTypeOp.isImm() || (uint64_t)CacheTypeOp.getImm() >= 2) {
1831 report(msg: "cache type operand must be an immediate 0-1", MO: &CacheTypeOp, MONum: 3);
1832 break;
1833 }
1834 break;
1835 }
1836 case TargetOpcode::G_ASSERT_ALIGN: {
1837 if (MI->getOperand(i: 2).getImm() < 1)
1838 report(msg: "alignment immediate must be >= 1", MI);
1839 break;
1840 }
1841 case TargetOpcode::G_CONSTANT_POOL: {
1842 if (!MI->getOperand(i: 1).isCPI())
1843 report(msg: "Src operand 1 must be a constant pool index", MI);
1844 if (!MRI->getType(Reg: MI->getOperand(i: 0).getReg()).isPointer())
1845 report(msg: "Dst operand 0 must be a pointer", MI);
1846 break;
1847 }
1848 default:
1849 break;
1850 }
1851}
1852
1853void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
1854 const MCInstrDesc &MCID = MI->getDesc();
1855 if (MI->getNumOperands() < MCID.getNumOperands()) {
1856 report(msg: "Too few operands", MI);
1857 errs() << MCID.getNumOperands() << " operands expected, but "
1858 << MI->getNumOperands() << " given.\n";
1859 }
1860
1861 if (MI->getFlag(Flag: MachineInstr::NoConvergent) && !MCID.isConvergent())
1862 report(msg: "NoConvergent flag expected only on convergent instructions.", MI);
1863
1864 if (MI->isPHI()) {
1865 if (MF->getProperties().hasProperty(
1866 P: MachineFunctionProperties::Property::NoPHIs))
1867 report(msg: "Found PHI instruction with NoPHIs property set", MI);
1868
1869 if (FirstNonPHI)
1870 report(msg: "Found PHI instruction after non-PHI", MI);
1871 } else if (FirstNonPHI == nullptr)
1872 FirstNonPHI = MI;
1873
1874 // Check the tied operands.
1875 if (MI->isInlineAsm())
1876 verifyInlineAsm(MI);
1877
1878 // Check that unspillable terminators define a reg and have at most one use.
1879 if (TII->isUnspillableTerminator(MI)) {
1880 if (!MI->getOperand(i: 0).isReg() || !MI->getOperand(i: 0).isDef())
1881 report(msg: "Unspillable Terminator does not define a reg", MI);
1882 Register Def = MI->getOperand(i: 0).getReg();
1883 if (Def.isVirtual() &&
1884 !MF->getProperties().hasProperty(
1885 P: MachineFunctionProperties::Property::NoPHIs) &&
1886 std::distance(first: MRI->use_nodbg_begin(RegNo: Def), last: MRI->use_nodbg_end()) > 1)
1887 report(msg: "Unspillable Terminator expected to have at most one use!", MI);
1888 }
1889
1890 // A fully-formed DBG_VALUE must have a location. Ignore partially formed
1891 // DBG_VALUEs: these are convenient to use in tests, but should never get
1892 // generated.
1893 if (MI->isDebugValue() && MI->getNumOperands() == 4)
1894 if (!MI->getDebugLoc())
1895 report(msg: "Missing DebugLoc for debug instruction", MI);
1896
1897 // Meta instructions should never be the subject of debug value tracking,
1898 // they don't create a value in the output program at all.
1899 if (MI->isMetaInstruction() && MI->peekDebugInstrNum())
1900 report(msg: "Metadata instruction should not have a value tracking number", MI);
1901
1902 // Check the MachineMemOperands for basic consistency.
1903 for (MachineMemOperand *Op : MI->memoperands()) {
1904 if (Op->isLoad() && !MI->mayLoad())
1905 report(msg: "Missing mayLoad flag", MI);
1906 if (Op->isStore() && !MI->mayStore())
1907 report(msg: "Missing mayStore flag", MI);
1908 }
1909
1910 // Debug values must not have a slot index.
1911 // Other instructions must have one, unless they are inside a bundle.
1912 if (LiveInts) {
1913 bool mapped = !LiveInts->isNotInMIMap(Instr: *MI);
1914 if (MI->isDebugOrPseudoInstr()) {
1915 if (mapped)
1916 report(msg: "Debug instruction has a slot index", MI);
1917 } else if (MI->isInsideBundle()) {
1918 if (mapped)
1919 report(msg: "Instruction inside bundle has a slot index", MI);
1920 } else {
1921 if (!mapped)
1922 report(msg: "Missing slot index", MI);
1923 }
1924 }
1925
1926 unsigned Opc = MCID.getOpcode();
1927 if (isPreISelGenericOpcode(Opcode: Opc) || isPreISelGenericOptimizationHint(Opcode: Opc)) {
1928 verifyPreISelGenericInstruction(MI);
1929 return;
1930 }
1931
1932 StringRef ErrorInfo;
1933 if (!TII->verifyInstruction(MI: *MI, ErrInfo&: ErrorInfo))
1934 report(msg: ErrorInfo.data(), MI);
1935
1936 // Verify properties of various specific instruction types
1937 switch (MI->getOpcode()) {
1938 case TargetOpcode::COPY: {
1939 const MachineOperand &DstOp = MI->getOperand(i: 0);
1940 const MachineOperand &SrcOp = MI->getOperand(i: 1);
1941 const Register SrcReg = SrcOp.getReg();
1942 const Register DstReg = DstOp.getReg();
1943
1944 LLT DstTy = MRI->getType(Reg: DstReg);
1945 LLT SrcTy = MRI->getType(Reg: SrcReg);
1946 if (SrcTy.isValid() && DstTy.isValid()) {
1947 // If both types are valid, check that the types are the same.
1948 if (SrcTy != DstTy) {
1949 report(msg: "Copy Instruction is illegal with mismatching types", MI);
1950 errs() << "Def = " << DstTy << ", Src = " << SrcTy << "\n";
1951 }
1952
1953 break;
1954 }
1955
1956 if (!SrcTy.isValid() && !DstTy.isValid())
1957 break;
1958
1959 // If we have only one valid type, this is likely a copy between a virtual
1960 // and physical register.
1961 TypeSize SrcSize = TRI->getRegSizeInBits(Reg: SrcReg, MRI: *MRI);
1962 TypeSize DstSize = TRI->getRegSizeInBits(Reg: DstReg, MRI: *MRI);
1963 if (SrcReg.isPhysical() && DstTy.isValid()) {
1964 const TargetRegisterClass *SrcRC =
1965 TRI->getMinimalPhysRegClassLLT(Reg: SrcReg, Ty: DstTy);
1966 if (SrcRC)
1967 SrcSize = TRI->getRegSizeInBits(RC: *SrcRC);
1968 }
1969
1970 if (DstReg.isPhysical() && SrcTy.isValid()) {
1971 const TargetRegisterClass *DstRC =
1972 TRI->getMinimalPhysRegClassLLT(Reg: DstReg, Ty: SrcTy);
1973 if (DstRC)
1974 DstSize = TRI->getRegSizeInBits(RC: *DstRC);
1975 }
1976
1977 // The next two checks allow COPY between physical and virtual registers,
1978 // when the virtual register has a scalable size and the physical register
1979 // has a fixed size. These checks allow COPY between *potentialy* mismatched
1980 // sizes. However, once RegisterBankSelection occurs, MachineVerifier should
1981 // be able to resolve a fixed size for the scalable vector, and at that
1982 // point this function will know for sure whether the sizes are mismatched
1983 // and correctly report a size mismatch.
1984 if (SrcReg.isPhysical() && DstReg.isVirtual() && DstSize.isScalable() &&
1985 !SrcSize.isScalable())
1986 break;
1987 if (SrcReg.isVirtual() && DstReg.isPhysical() && SrcSize.isScalable() &&
1988 !DstSize.isScalable())
1989 break;
1990
1991 if (SrcSize.isNonZero() && DstSize.isNonZero() && SrcSize != DstSize) {
1992 if (!DstOp.getSubReg() && !SrcOp.getSubReg()) {
1993 report(msg: "Copy Instruction is illegal with mismatching sizes", MI);
1994 errs() << "Def Size = " << DstSize << ", Src Size = " << SrcSize
1995 << "\n";
1996 }
1997 }
1998 break;
1999 }
2000 case TargetOpcode::STATEPOINT: {
2001 StatepointOpers SO(MI);
2002 if (!MI->getOperand(i: SO.getIDPos()).isImm() ||
2003 !MI->getOperand(i: SO.getNBytesPos()).isImm() ||
2004 !MI->getOperand(i: SO.getNCallArgsPos()).isImm()) {
2005 report(msg: "meta operands to STATEPOINT not constant!", MI);
2006 break;
2007 }
2008
2009 auto VerifyStackMapConstant = [&](unsigned Offset) {
2010 if (Offset >= MI->getNumOperands()) {
2011 report(msg: "stack map constant to STATEPOINT is out of range!", MI);
2012 return;
2013 }
2014 if (!MI->getOperand(i: Offset - 1).isImm() ||
2015 MI->getOperand(i: Offset - 1).getImm() != StackMaps::ConstantOp ||
2016 !MI->getOperand(i: Offset).isImm())
2017 report(msg: "stack map constant to STATEPOINT not well formed!", MI);
2018 };
2019 VerifyStackMapConstant(SO.getCCIdx());
2020 VerifyStackMapConstant(SO.getFlagsIdx());
2021 VerifyStackMapConstant(SO.getNumDeoptArgsIdx());
2022 VerifyStackMapConstant(SO.getNumGCPtrIdx());
2023 VerifyStackMapConstant(SO.getNumAllocaIdx());
2024 VerifyStackMapConstant(SO.getNumGcMapEntriesIdx());
2025
2026 // Verify that all explicit statepoint defs are tied to gc operands as
2027 // they are expected to be a relocation of gc operands.
2028 unsigned FirstGCPtrIdx = SO.getFirstGCPtrIdx();
2029 unsigned LastGCPtrIdx = SO.getNumAllocaIdx() - 2;
2030 for (unsigned Idx = 0; Idx < MI->getNumDefs(); Idx++) {
2031 unsigned UseOpIdx;
2032 if (!MI->isRegTiedToUseOperand(DefOpIdx: Idx, UseOpIdx: &UseOpIdx)) {
2033 report(msg: "STATEPOINT defs expected to be tied", MI);
2034 break;
2035 }
2036 if (UseOpIdx < FirstGCPtrIdx || UseOpIdx > LastGCPtrIdx) {
2037 report(msg: "STATEPOINT def tied to non-gc operand", MI);
2038 break;
2039 }
2040 }
2041
2042 // TODO: verify we have properly encoded deopt arguments
2043 } break;
2044 case TargetOpcode::INSERT_SUBREG: {
2045 unsigned InsertedSize;
2046 if (unsigned SubIdx = MI->getOperand(i: 2).getSubReg())
2047 InsertedSize = TRI->getSubRegIdxSize(Idx: SubIdx);
2048 else
2049 InsertedSize = TRI->getRegSizeInBits(Reg: MI->getOperand(i: 2).getReg(), MRI: *MRI);
2050 unsigned SubRegSize = TRI->getSubRegIdxSize(Idx: MI->getOperand(i: 3).getImm());
2051 if (SubRegSize < InsertedSize) {
2052 report(msg: "INSERT_SUBREG expected inserted value to have equal or lesser "
2053 "size than the subreg it was inserted into", MI);
2054 break;
2055 }
2056 } break;
2057 case TargetOpcode::REG_SEQUENCE: {
2058 unsigned NumOps = MI->getNumOperands();
2059 if (!(NumOps & 1)) {
2060 report(msg: "Invalid number of operands for REG_SEQUENCE", MI);
2061 break;
2062 }
2063
2064 for (unsigned I = 1; I != NumOps; I += 2) {
2065 const MachineOperand &RegOp = MI->getOperand(i: I);
2066 const MachineOperand &SubRegOp = MI->getOperand(i: I + 1);
2067
2068 if (!RegOp.isReg())
2069 report(msg: "Invalid register operand for REG_SEQUENCE", MO: &RegOp, MONum: I);
2070
2071 if (!SubRegOp.isImm() || SubRegOp.getImm() == 0 ||
2072 SubRegOp.getImm() >= TRI->getNumSubRegIndices()) {
2073 report(msg: "Invalid subregister index operand for REG_SEQUENCE",
2074 MO: &SubRegOp, MONum: I + 1);
2075 }
2076 }
2077
2078 Register DstReg = MI->getOperand(i: 0).getReg();
2079 if (DstReg.isPhysical())
2080 report(msg: "REG_SEQUENCE does not support physical register results", MI);
2081
2082 if (MI->getOperand(i: 0).getSubReg())
2083 report(msg: "Invalid subreg result for REG_SEQUENCE", MI);
2084
2085 break;
2086 }
2087 }
2088}
2089
2090void
2091MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
2092 const MachineInstr *MI = MO->getParent();
2093 const MCInstrDesc &MCID = MI->getDesc();
2094 unsigned NumDefs = MCID.getNumDefs();
2095 if (MCID.getOpcode() == TargetOpcode::PATCHPOINT)
2096 NumDefs = (MONum == 0 && MO->isReg()) ? NumDefs : 0;
2097
2098 // The first MCID.NumDefs operands must be explicit register defines
2099 if (MONum < NumDefs) {
2100 const MCOperandInfo &MCOI = MCID.operands()[MONum];
2101 if (!MO->isReg())
2102 report(msg: "Explicit definition must be a register", MO, MONum);
2103 else if (!MO->isDef() && !MCOI.isOptionalDef())
2104 report(msg: "Explicit definition marked as use", MO, MONum);
2105 else if (MO->isImplicit())
2106 report(msg: "Explicit definition marked as implicit", MO, MONum);
2107 } else if (MONum < MCID.getNumOperands()) {
2108 const MCOperandInfo &MCOI = MCID.operands()[MONum];
2109 // Don't check if it's the last operand in a variadic instruction. See,
2110 // e.g., LDM_RET in the arm back end. Check non-variadic operands only.
2111 bool IsOptional = MI->isVariadic() && MONum == MCID.getNumOperands() - 1;
2112 if (!IsOptional) {
2113 if (MO->isReg()) {
2114 if (MO->isDef() && !MCOI.isOptionalDef() && !MCID.variadicOpsAreDefs())
2115 report(msg: "Explicit operand marked as def", MO, MONum);
2116 if (MO->isImplicit())
2117 report(msg: "Explicit operand marked as implicit", MO, MONum);
2118 }
2119
2120 // Check that an instruction has register operands only as expected.
2121 if (MCOI.OperandType == MCOI::OPERAND_REGISTER &&
2122 !MO->isReg() && !MO->isFI())
2123 report(msg: "Expected a register operand.", MO, MONum);
2124 if (MO->isReg()) {
2125 if (MCOI.OperandType == MCOI::OPERAND_IMMEDIATE ||
2126 (MCOI.OperandType == MCOI::OPERAND_PCREL &&
2127 !TII->isPCRelRegisterOperandLegal(MO: *MO)))
2128 report(msg: "Expected a non-register operand.", MO, MONum);
2129 }
2130 }
2131
2132 int TiedTo = MCID.getOperandConstraint(OpNum: MONum, Constraint: MCOI::TIED_TO);
2133 if (TiedTo != -1) {
2134 if (!MO->isReg())
2135 report(msg: "Tied use must be a register", MO, MONum);
2136 else if (!MO->isTied())
2137 report(msg: "Operand should be tied", MO, MONum);
2138 else if (unsigned(TiedTo) != MI->findTiedOperandIdx(OpIdx: MONum))
2139 report(msg: "Tied def doesn't match MCInstrDesc", MO, MONum);
2140 else if (MO->getReg().isPhysical()) {
2141 const MachineOperand &MOTied = MI->getOperand(i: TiedTo);
2142 if (!MOTied.isReg())
2143 report(msg: "Tied counterpart must be a register", MO: &MOTied, MONum: TiedTo);
2144 else if (MOTied.getReg().isPhysical() &&
2145 MO->getReg() != MOTied.getReg())
2146 report(msg: "Tied physical registers must match.", MO: &MOTied, MONum: TiedTo);
2147 }
2148 } else if (MO->isReg() && MO->isTied())
2149 report(msg: "Explicit operand should not be tied", MO, MONum);
2150 } else if (!MI->isVariadic()) {
2151 // ARM adds %reg0 operands to indicate predicates. We'll allow that.
2152 if (!MO->isValidExcessOperand())
2153 report(msg: "Extra explicit operand on non-variadic instruction", MO, MONum);
2154 }
2155
2156 switch (MO->getType()) {
2157 case MachineOperand::MO_Register: {
2158 // Verify debug flag on debug instructions. Check this first because reg0
2159 // indicates an undefined debug value.
2160 if (MI->isDebugInstr() && MO->isUse()) {
2161 if (!MO->isDebug())
2162 report(msg: "Register operand must be marked debug", MO, MONum);
2163 } else if (MO->isDebug()) {
2164 report(msg: "Register operand must not be marked debug", MO, MONum);
2165 }
2166
2167 const Register Reg = MO->getReg();
2168 if (!Reg)
2169 return;
2170 if (MRI->tracksLiveness() && !MI->isDebugInstr())
2171 checkLiveness(MO, MONum);
2172
2173 if (MO->isDef() && MO->isUndef() && !MO->getSubReg() &&
2174 MO->getReg().isVirtual()) // TODO: Apply to physregs too
2175 report(msg: "Undef virtual register def operands require a subregister", MO, MONum);
2176
2177 // Verify the consistency of tied operands.
2178 if (MO->isTied()) {
2179 unsigned OtherIdx = MI->findTiedOperandIdx(OpIdx: MONum);
2180 const MachineOperand &OtherMO = MI->getOperand(i: OtherIdx);
2181 if (!OtherMO.isReg())
2182 report(msg: "Must be tied to a register", MO, MONum);
2183 if (!OtherMO.isTied())
2184 report(msg: "Missing tie flags on tied operand", MO, MONum);
2185 if (MI->findTiedOperandIdx(OpIdx: OtherIdx) != MONum)
2186 report(msg: "Inconsistent tie links", MO, MONum);
2187 if (MONum < MCID.getNumDefs()) {
2188 if (OtherIdx < MCID.getNumOperands()) {
2189 if (-1 == MCID.getOperandConstraint(OpNum: OtherIdx, Constraint: MCOI::TIED_TO))
2190 report(msg: "Explicit def tied to explicit use without tie constraint",
2191 MO, MONum);
2192 } else {
2193 if (!OtherMO.isImplicit())
2194 report(msg: "Explicit def should be tied to implicit use", MO, MONum);
2195 }
2196 }
2197 }
2198
2199 // Verify two-address constraints after the twoaddressinstruction pass.
2200 // Both twoaddressinstruction pass and phi-node-elimination pass call
2201 // MRI->leaveSSA() to set MF as not IsSSA, we should do the verification
2202 // after twoaddressinstruction pass not after phi-node-elimination pass. So
2203 // we shouldn't use the IsSSA as the condition, we should based on
2204 // TiedOpsRewritten property to verify two-address constraints, this
2205 // property will be set in twoaddressinstruction pass.
2206 unsigned DefIdx;
2207 if (MF->getProperties().hasProperty(
2208 P: MachineFunctionProperties::Property::TiedOpsRewritten) &&
2209 MO->isUse() && MI->isRegTiedToDefOperand(UseOpIdx: MONum, DefOpIdx: &DefIdx) &&
2210 Reg != MI->getOperand(i: DefIdx).getReg())
2211 report(msg: "Two-address instruction operands must be identical", MO, MONum);
2212
2213 // Check register classes.
2214 unsigned SubIdx = MO->getSubReg();
2215
2216 if (Reg.isPhysical()) {
2217 if (SubIdx) {
2218 report(msg: "Illegal subregister index for physical register", MO, MONum);
2219 return;
2220 }
2221 if (MONum < MCID.getNumOperands()) {
2222 if (const TargetRegisterClass *DRC =
2223 TII->getRegClass(MCID, OpNum: MONum, TRI, MF: *MF)) {
2224 if (!DRC->contains(Reg)) {
2225 report(msg: "Illegal physical register for instruction", MO, MONum);
2226 errs() << printReg(Reg, TRI) << " is not a "
2227 << TRI->getRegClassName(Class: DRC) << " register.\n";
2228 }
2229 }
2230 }
2231 if (MO->isRenamable()) {
2232 if (MRI->isReserved(PhysReg: Reg)) {
2233 report(msg: "isRenamable set on reserved register", MO, MONum);
2234 return;
2235 }
2236 }
2237 } else {
2238 // Virtual register.
2239 const TargetRegisterClass *RC = MRI->getRegClassOrNull(Reg);
2240 if (!RC) {
2241 // This is a generic virtual register.
2242
2243 // Do not allow undef uses for generic virtual registers. This ensures
2244 // getVRegDef can never fail and return null on a generic register.
2245 //
2246 // FIXME: This restriction should probably be broadened to all SSA
2247 // MIR. However, DetectDeadLanes/ProcessImplicitDefs technically still
2248 // run on the SSA function just before phi elimination.
2249 if (MO->isUndef())
2250 report(msg: "Generic virtual register use cannot be undef", MO, MONum);
2251
2252 // Debug value instruction is permitted to use undefined vregs.
2253 // This is a performance measure to skip the overhead of immediately
2254 // pruning unused debug operands. The final undef substitution occurs
2255 // when debug values are allocated in LDVImpl::handleDebugValue, so
2256 // these verifications always apply after this pass.
2257 if (isFunctionTracksDebugUserValues || !MO->isUse() ||
2258 !MI->isDebugValue() || !MRI->def_empty(RegNo: Reg)) {
2259 // If we're post-Select, we can't have gvregs anymore.
2260 if (isFunctionSelected) {
2261 report(msg: "Generic virtual register invalid in a Selected function",
2262 MO, MONum);
2263 return;
2264 }
2265
2266 // The gvreg must have a type and it must not have a SubIdx.
2267 LLT Ty = MRI->getType(Reg);
2268 if (!Ty.isValid()) {
2269 report(msg: "Generic virtual register must have a valid type", MO,
2270 MONum);
2271 return;
2272 }
2273
2274 const RegisterBank *RegBank = MRI->getRegBankOrNull(Reg);
2275 const RegisterBankInfo *RBI = MF->getSubtarget().getRegBankInfo();
2276
2277 // If we're post-RegBankSelect, the gvreg must have a bank.
2278 if (!RegBank && isFunctionRegBankSelected) {
2279 report(msg: "Generic virtual register must have a bank in a "
2280 "RegBankSelected function",
2281 MO, MONum);
2282 return;
2283 }
2284
2285 // Make sure the register fits into its register bank if any.
2286 if (RegBank && Ty.isValid() && !Ty.isScalableVector() &&
2287 RBI->getMaximumSize(RegBankID: RegBank->getID()) < Ty.getSizeInBits()) {
2288 report(msg: "Register bank is too small for virtual register", MO,
2289 MONum);
2290 errs() << "Register bank " << RegBank->getName() << " too small("
2291 << RBI->getMaximumSize(RegBankID: RegBank->getID()) << ") to fit "
2292 << Ty.getSizeInBits() << "-bits\n";
2293 return;
2294 }
2295 }
2296
2297 if (SubIdx) {
2298 report(msg: "Generic virtual register does not allow subregister index", MO,
2299 MONum);
2300 return;
2301 }
2302
2303 // If this is a target specific instruction and this operand
2304 // has register class constraint, the virtual register must
2305 // comply to it.
2306 if (!isPreISelGenericOpcode(Opcode: MCID.getOpcode()) &&
2307 MONum < MCID.getNumOperands() &&
2308 TII->getRegClass(MCID, OpNum: MONum, TRI, MF: *MF)) {
2309 report(msg: "Virtual register does not match instruction constraint", MO,
2310 MONum);
2311 errs() << "Expect register class "
2312 << TRI->getRegClassName(
2313 Class: TII->getRegClass(MCID, OpNum: MONum, TRI, MF: *MF))
2314 << " but got nothing\n";
2315 return;
2316 }
2317
2318 break;
2319 }
2320 if (SubIdx) {
2321 const TargetRegisterClass *SRC =
2322 TRI->getSubClassWithSubReg(RC, Idx: SubIdx);
2323 if (!SRC) {
2324 report(msg: "Invalid subregister index for virtual register", MO, MONum);
2325 errs() << "Register class " << TRI->getRegClassName(Class: RC)
2326 << " does not support subreg index " << SubIdx << "\n";
2327 return;
2328 }
2329 if (RC != SRC) {
2330 report(msg: "Invalid register class for subregister index", MO, MONum);
2331 errs() << "Register class " << TRI->getRegClassName(Class: RC)
2332 << " does not fully support subreg index " << SubIdx << "\n";
2333 return;
2334 }
2335 }
2336 if (MONum < MCID.getNumOperands()) {
2337 if (const TargetRegisterClass *DRC =
2338 TII->getRegClass(MCID, OpNum: MONum, TRI, MF: *MF)) {
2339 if (SubIdx) {
2340 const TargetRegisterClass *SuperRC =
2341 TRI->getLargestLegalSuperClass(RC, *MF);
2342 if (!SuperRC) {
2343 report(msg: "No largest legal super class exists.", MO, MONum);
2344 return;
2345 }
2346 DRC = TRI->getMatchingSuperRegClass(A: SuperRC, B: DRC, Idx: SubIdx);
2347 if (!DRC) {
2348 report(msg: "No matching super-reg register class.", MO, MONum);
2349 return;
2350 }
2351 }
2352 if (!RC->hasSuperClassEq(RC: DRC)) {
2353 report(msg: "Illegal virtual register for instruction", MO, MONum);
2354 errs() << "Expected a " << TRI->getRegClassName(Class: DRC)
2355 << " register, but got a " << TRI->getRegClassName(Class: RC)
2356 << " register\n";
2357 }
2358 }
2359 }
2360 }
2361 break;
2362 }
2363
2364 case MachineOperand::MO_RegisterMask:
2365 regMasks.push_back(Elt: MO->getRegMask());
2366 break;
2367
2368 case MachineOperand::MO_MachineBasicBlock:
2369 if (MI->isPHI() && !MO->getMBB()->isSuccessor(MBB: MI->getParent()))
2370 report(msg: "PHI operand is not in the CFG", MO, MONum);
2371 break;
2372
2373 case MachineOperand::MO_FrameIndex:
2374 if (LiveStks && LiveStks->hasInterval(Slot: MO->getIndex()) &&
2375 LiveInts && !LiveInts->isNotInMIMap(Instr: *MI)) {
2376 int FI = MO->getIndex();
2377 LiveInterval &LI = LiveStks->getInterval(Slot: FI);
2378 SlotIndex Idx = LiveInts->getInstructionIndex(Instr: *MI);
2379
2380 bool stores = MI->mayStore();
2381 bool loads = MI->mayLoad();
2382 // For a memory-to-memory move, we need to check if the frame
2383 // index is used for storing or loading, by inspecting the
2384 // memory operands.
2385 if (stores && loads) {
2386 for (auto *MMO : MI->memoperands()) {
2387 const PseudoSourceValue *PSV = MMO->getPseudoValue();
2388 if (PSV == nullptr) continue;
2389 const FixedStackPseudoSourceValue *Value =
2390 dyn_cast<FixedStackPseudoSourceValue>(Val: PSV);
2391 if (Value == nullptr) continue;
2392 if (Value->getFrameIndex() != FI) continue;
2393
2394 if (MMO->isStore())
2395 loads = false;
2396 else
2397 stores = false;
2398 break;
2399 }
2400 if (loads == stores)
2401 report(msg: "Missing fixed stack memoperand.", MI);
2402 }
2403 if (loads && !LI.liveAt(index: Idx.getRegSlot(EC: true))) {
2404 report(msg: "Instruction loads from dead spill slot", MO, MONum);
2405 errs() << "Live stack: " << LI << '\n';
2406 }
2407 if (stores && !LI.liveAt(index: Idx.getRegSlot())) {
2408 report(msg: "Instruction stores to dead spill slot", MO, MONum);
2409 errs() << "Live stack: " << LI << '\n';
2410 }
2411 }
2412 break;
2413
2414 case MachineOperand::MO_CFIIndex:
2415 if (MO->getCFIIndex() >= MF->getFrameInstructions().size())
2416 report(msg: "CFI instruction has invalid index", MO, MONum);
2417 break;
2418
2419 default:
2420 break;
2421 }
2422}
2423
2424void MachineVerifier::checkLivenessAtUse(const MachineOperand *MO,
2425 unsigned MONum, SlotIndex UseIdx,
2426 const LiveRange &LR,
2427 Register VRegOrUnit,
2428 LaneBitmask LaneMask) {
2429 const MachineInstr *MI = MO->getParent();
2430 LiveQueryResult LRQ = LR.Query(Idx: UseIdx);
2431 bool HasValue = LRQ.valueIn() || (MI->isPHI() && LRQ.valueOut());
2432 // Check if we have a segment at the use, note however that we only need one
2433 // live subregister range, the others may be dead.
2434 if (!HasValue && LaneMask.none()) {
2435 report(msg: "No live segment at use", MO, MONum);
2436 report_context_liverange(LR);
2437 report_context_vreg_regunit(VRegOrUnit);
2438 report_context(Pos: UseIdx);
2439 }
2440 if (MO->isKill() && !LRQ.isKill()) {
2441 report(msg: "Live range continues after kill flag", MO, MONum);
2442 report_context_liverange(LR);
2443 report_context_vreg_regunit(VRegOrUnit);
2444 if (LaneMask.any())
2445 report_context_lanemask(LaneMask);
2446 report_context(Pos: UseIdx);
2447 }
2448}
2449
2450void MachineVerifier::checkLivenessAtDef(const MachineOperand *MO,
2451 unsigned MONum, SlotIndex DefIdx,
2452 const LiveRange &LR,
2453 Register VRegOrUnit,
2454 bool SubRangeCheck,
2455 LaneBitmask LaneMask) {
2456 if (const VNInfo *VNI = LR.getVNInfoAt(Idx: DefIdx)) {
2457 // The LR can correspond to the whole reg and its def slot is not obliged
2458 // to be the same as the MO' def slot. E.g. when we check here "normal"
2459 // subreg MO but there is other EC subreg MO in the same instruction so the
2460 // whole reg has EC def slot and differs from the currently checked MO' def
2461 // slot. For example:
2462 // %0 [16e,32r:0) 0@16e L..3 [16e,32r:0) 0@16e L..C [16r,32r:0) 0@16r
2463 // Check that there is an early-clobber def of the same superregister
2464 // somewhere is performed in visitMachineFunctionAfter()
2465 if (((SubRangeCheck || MO->getSubReg() == 0) && VNI->def != DefIdx) ||
2466 !SlotIndex::isSameInstr(A: VNI->def, B: DefIdx) ||
2467 (VNI->def != DefIdx &&
2468 (!VNI->def.isEarlyClobber() || !DefIdx.isRegister()))) {
2469 report(msg: "Inconsistent valno->def", MO, MONum);
2470 report_context_liverange(LR);
2471 report_context_vreg_regunit(VRegOrUnit);
2472 if (LaneMask.any())
2473 report_context_lanemask(LaneMask);
2474 report_context(VNI: *VNI);
2475 report_context(Pos: DefIdx);
2476 }
2477 } else {
2478 report(msg: "No live segment at def", MO, MONum);
2479 report_context_liverange(LR);
2480 report_context_vreg_regunit(VRegOrUnit);
2481 if (LaneMask.any())
2482 report_context_lanemask(LaneMask);
2483 report_context(Pos: DefIdx);
2484 }
2485 // Check that, if the dead def flag is present, LiveInts agree.
2486 if (MO->isDead()) {
2487 LiveQueryResult LRQ = LR.Query(Idx: DefIdx);
2488 if (!LRQ.isDeadDef()) {
2489 assert(VRegOrUnit.isVirtual() && "Expecting a virtual register.");
2490 // A dead subreg def only tells us that the specific subreg is dead. There
2491 // could be other non-dead defs of other subregs, or we could have other
2492 // parts of the register being live through the instruction. So unless we
2493 // are checking liveness for a subrange it is ok for the live range to
2494 // continue, given that we have a dead def of a subregister.
2495 if (SubRangeCheck || MO->getSubReg() == 0) {
2496 report(msg: "Live range continues after dead def flag", MO, MONum);
2497 report_context_liverange(LR);
2498 report_context_vreg_regunit(VRegOrUnit);
2499 if (LaneMask.any())
2500 report_context_lanemask(LaneMask);
2501 }
2502 }
2503 }
2504}
2505
2506void MachineVerifier::checkLiveness(const MachineOperand *MO, unsigned MONum) {
2507 const MachineInstr *MI = MO->getParent();
2508 const Register Reg = MO->getReg();
2509 const unsigned SubRegIdx = MO->getSubReg();
2510
2511 const LiveInterval *LI = nullptr;
2512 if (LiveInts && Reg.isVirtual()) {
2513 if (LiveInts->hasInterval(Reg)) {
2514 LI = &LiveInts->getInterval(Reg);
2515 if (SubRegIdx != 0 && (MO->isDef() || !MO->isUndef()) && !LI->empty() &&
2516 !LI->hasSubRanges() && MRI->shouldTrackSubRegLiveness(VReg: Reg))
2517 report(msg: "Live interval for subreg operand has no subranges", MO, MONum);
2518 } else {
2519 report(msg: "Virtual register has no live interval", MO, MONum);
2520 }
2521 }
2522
2523 // Both use and def operands can read a register.
2524 if (MO->readsReg()) {
2525 if (MO->isKill())
2526 addRegWithSubRegs(RV&: regsKilled, Reg);
2527
2528 // Check that LiveVars knows this kill (unless we are inside a bundle, in
2529 // which case we have already checked that LiveVars knows any kills on the
2530 // bundle header instead).
2531 if (LiveVars && Reg.isVirtual() && MO->isKill() &&
2532 !MI->isBundledWithPred()) {
2533 LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
2534 if (!is_contained(Range&: VI.Kills, Element: MI))
2535 report(msg: "Kill missing from LiveVariables", MO, MONum);
2536 }
2537
2538 // Check LiveInts liveness and kill.
2539 if (LiveInts && !LiveInts->isNotInMIMap(Instr: *MI)) {
2540 SlotIndex UseIdx;
2541 if (MI->isPHI()) {
2542 // PHI use occurs on the edge, so check for live out here instead.
2543 UseIdx = LiveInts->getMBBEndIdx(
2544 mbb: MI->getOperand(i: MONum + 1).getMBB()).getPrevSlot();
2545 } else {
2546 UseIdx = LiveInts->getInstructionIndex(Instr: *MI);
2547 }
2548 // Check the cached regunit intervals.
2549 if (Reg.isPhysical() && !isReserved(Reg)) {
2550 for (MCRegUnit Unit : TRI->regunits(Reg: Reg.asMCReg())) {
2551 if (MRI->isReservedRegUnit(Unit))
2552 continue;
2553 if (const LiveRange *LR = LiveInts->getCachedRegUnit(Unit))
2554 checkLivenessAtUse(MO, MONum, UseIdx, LR: *LR, VRegOrUnit: Unit);
2555 }
2556 }
2557
2558 if (Reg.isVirtual()) {
2559 // This is a virtual register interval.
2560 checkLivenessAtUse(MO, MONum, UseIdx, LR: *LI, VRegOrUnit: Reg);
2561
2562 if (LI->hasSubRanges() && !MO->isDef()) {
2563 LaneBitmask MOMask = SubRegIdx != 0
2564 ? TRI->getSubRegIndexLaneMask(SubIdx: SubRegIdx)
2565 : MRI->getMaxLaneMaskForVReg(Reg);
2566 LaneBitmask LiveInMask;
2567 for (const LiveInterval::SubRange &SR : LI->subranges()) {
2568 if ((MOMask & SR.LaneMask).none())
2569 continue;
2570 checkLivenessAtUse(MO, MONum, UseIdx, LR: SR, VRegOrUnit: Reg, LaneMask: SR.LaneMask);
2571 LiveQueryResult LRQ = SR.Query(Idx: UseIdx);
2572 if (LRQ.valueIn() || (MI->isPHI() && LRQ.valueOut()))
2573 LiveInMask |= SR.LaneMask;
2574 }
2575 // At least parts of the register has to be live at the use.
2576 if ((LiveInMask & MOMask).none()) {
2577 report(msg: "No live subrange at use", MO, MONum);
2578 report_context(LI: *LI);
2579 report_context(Pos: UseIdx);
2580 }
2581 // For PHIs all lanes should be live
2582 if (MI->isPHI() && LiveInMask != MOMask) {
2583 report(msg: "Not all lanes of PHI source live at use", MO, MONum);
2584 report_context(LI: *LI);
2585 report_context(Pos: UseIdx);
2586 }
2587 }
2588 }
2589 }
2590
2591 // Use of a dead register.
2592 if (!regsLive.count(V: Reg)) {
2593 if (Reg.isPhysical()) {
2594 // Reserved registers may be used even when 'dead'.
2595 bool Bad = !isReserved(Reg);
2596 // We are fine if just any subregister has a defined value.
2597 if (Bad) {
2598
2599 for (const MCPhysReg &SubReg : TRI->subregs(Reg)) {
2600 if (regsLive.count(V: SubReg)) {
2601 Bad = false;
2602 break;
2603 }
2604 }
2605 }
2606 // If there is an additional implicit-use of a super register we stop
2607 // here. By definition we are fine if the super register is not
2608 // (completely) dead, if the complete super register is dead we will
2609 // get a report for its operand.
2610 if (Bad) {
2611 for (const MachineOperand &MOP : MI->uses()) {
2612 if (!MOP.isReg() || !MOP.isImplicit())
2613 continue;
2614
2615 if (!MOP.getReg().isPhysical())
2616 continue;
2617
2618 if (llvm::is_contained(Range: TRI->subregs(Reg: MOP.getReg()), Element: Reg))
2619 Bad = false;
2620 }
2621 }
2622 if (Bad)
2623 report(msg: "Using an undefined physical register", MO, MONum);
2624 } else if (MRI->def_empty(RegNo: Reg)) {
2625 report(msg: "Reading virtual register without a def", MO, MONum);
2626 } else {
2627 BBInfo &MInfo = MBBInfoMap[MI->getParent()];
2628 // We don't know which virtual registers are live in, so only complain
2629 // if vreg was killed in this MBB. Otherwise keep track of vregs that
2630 // must be live in. PHI instructions are handled separately.
2631 if (MInfo.regsKilled.count(V: Reg))
2632 report(msg: "Using a killed virtual register", MO, MONum);
2633 else if (!MI->isPHI())
2634 MInfo.vregsLiveIn.insert(KV: std::make_pair(x: Reg, y&: MI));
2635 }
2636 }
2637 }
2638
2639 if (MO->isDef()) {
2640 // Register defined.
2641 // TODO: verify that earlyclobber ops are not used.
2642 if (MO->isDead())
2643 addRegWithSubRegs(RV&: regsDead, Reg);
2644 else
2645 addRegWithSubRegs(RV&: regsDefined, Reg);
2646
2647 // Verify SSA form.
2648 if (MRI->isSSA() && Reg.isVirtual() &&
2649 std::next(x: MRI->def_begin(RegNo: Reg)) != MRI->def_end())
2650 report(msg: "Multiple virtual register defs in SSA form", MO, MONum);
2651
2652 // Check LiveInts for a live segment, but only for virtual registers.
2653 if (LiveInts && !LiveInts->isNotInMIMap(Instr: *MI)) {
2654 SlotIndex DefIdx = LiveInts->getInstructionIndex(Instr: *MI);
2655 DefIdx = DefIdx.getRegSlot(EC: MO->isEarlyClobber());
2656
2657 if (Reg.isVirtual()) {
2658 checkLivenessAtDef(MO, MONum, DefIdx, LR: *LI, VRegOrUnit: Reg);
2659
2660 if (LI->hasSubRanges()) {
2661 LaneBitmask MOMask = SubRegIdx != 0
2662 ? TRI->getSubRegIndexLaneMask(SubIdx: SubRegIdx)
2663 : MRI->getMaxLaneMaskForVReg(Reg);
2664 for (const LiveInterval::SubRange &SR : LI->subranges()) {
2665 if ((SR.LaneMask & MOMask).none())
2666 continue;
2667 checkLivenessAtDef(MO, MONum, DefIdx, LR: SR, VRegOrUnit: Reg, SubRangeCheck: true, LaneMask: SR.LaneMask);
2668 }
2669 }
2670 }
2671 }
2672 }
2673}
2674
2675// This function gets called after visiting all instructions in a bundle. The
2676// argument points to the bundle header.
2677// Normal stand-alone instructions are also considered 'bundles', and this
2678// function is called for all of them.
2679void MachineVerifier::visitMachineBundleAfter(const MachineInstr *MI) {
2680 BBInfo &MInfo = MBBInfoMap[MI->getParent()];
2681 set_union(S1&: MInfo.regsKilled, S2: regsKilled);
2682 set_subtract(S1&: regsLive, S2: regsKilled); regsKilled.clear();
2683 // Kill any masked registers.
2684 while (!regMasks.empty()) {
2685 const uint32_t *Mask = regMasks.pop_back_val();
2686 for (Register Reg : regsLive)
2687 if (Reg.isPhysical() &&
2688 MachineOperand::clobbersPhysReg(RegMask: Mask, PhysReg: Reg.asMCReg()))
2689 regsDead.push_back(Elt: Reg);
2690 }
2691 set_subtract(S1&: regsLive, S2: regsDead); regsDead.clear();
2692 set_union(S1&: regsLive, S2: regsDefined); regsDefined.clear();
2693}
2694
2695void
2696MachineVerifier::visitMachineBasicBlockAfter(const MachineBasicBlock *MBB) {
2697 MBBInfoMap[MBB].regsLiveOut = regsLive;
2698 regsLive.clear();
2699
2700 if (Indexes) {
2701 SlotIndex stop = Indexes->getMBBEndIdx(mbb: MBB);
2702 if (!(stop > lastIndex)) {
2703 report(msg: "Block ends before last instruction index", MBB);
2704 errs() << "Block ends at " << stop
2705 << " last instruction was at " << lastIndex << '\n';
2706 }
2707 lastIndex = stop;
2708 }
2709}
2710
2711namespace {
2712// This implements a set of registers that serves as a filter: can filter other
2713// sets by passing through elements not in the filter and blocking those that
2714// are. Any filter implicitly includes the full set of physical registers upon
2715// creation, thus filtering them all out. The filter itself as a set only grows,
2716// and needs to be as efficient as possible.
2717struct VRegFilter {
2718 // Add elements to the filter itself. \pre Input set \p FromRegSet must have
2719 // no duplicates. Both virtual and physical registers are fine.
2720 template <typename RegSetT> void add(const RegSetT &FromRegSet) {
2721 SmallVector<Register, 0> VRegsBuffer;
2722 filterAndAdd(FromRegSet, VRegsBuffer);
2723 }
2724 // Filter \p FromRegSet through the filter and append passed elements into \p
2725 // ToVRegs. All elements appended are then added to the filter itself.
2726 // \returns true if anything changed.
2727 template <typename RegSetT>
2728 bool filterAndAdd(const RegSetT &FromRegSet,
2729 SmallVectorImpl<Register> &ToVRegs) {
2730 unsigned SparseUniverse = Sparse.size();
2731 unsigned NewSparseUniverse = SparseUniverse;
2732 unsigned NewDenseSize = Dense.size();
2733 size_t Begin = ToVRegs.size();
2734 for (Register Reg : FromRegSet) {
2735 if (!Reg.isVirtual())
2736 continue;
2737 unsigned Index = Register::virtReg2Index(Reg);
2738 if (Index < SparseUniverseMax) {
2739 if (Index < SparseUniverse && Sparse.test(Idx: Index))
2740 continue;
2741 NewSparseUniverse = std::max(a: NewSparseUniverse, b: Index + 1);
2742 } else {
2743 if (Dense.count(V: Reg))
2744 continue;
2745 ++NewDenseSize;
2746 }
2747 ToVRegs.push_back(Elt: Reg);
2748 }
2749 size_t End = ToVRegs.size();
2750 if (Begin == End)
2751 return false;
2752 // Reserving space in sets once performs better than doing so continuously
2753 // and pays easily for double look-ups (even in Dense with SparseUniverseMax
2754 // tuned all the way down) and double iteration (the second one is over a
2755 // SmallVector, which is a lot cheaper compared to DenseSet or BitVector).
2756 Sparse.resize(N: NewSparseUniverse);
2757 Dense.reserve(Size: NewDenseSize);
2758 for (unsigned I = Begin; I < End; ++I) {
2759 Register Reg = ToVRegs[I];
2760 unsigned Index = Register::virtReg2Index(Reg);
2761 if (Index < SparseUniverseMax)
2762 Sparse.set(Index);
2763 else
2764 Dense.insert(V: Reg);
2765 }
2766 return true;
2767 }
2768
2769private:
2770 static constexpr unsigned SparseUniverseMax = 10 * 1024 * 8;
2771 // VRegs indexed within SparseUniverseMax are tracked by Sparse, those beyound
2772 // are tracked by Dense. The only purpose of the threashold and the Dense set
2773 // is to have a reasonably growing memory usage in pathological cases (large
2774 // number of very sparse VRegFilter instances live at the same time). In
2775 // practice even in the worst-by-execution time cases having all elements
2776 // tracked by Sparse (very large SparseUniverseMax scenario) tends to be more
2777 // space efficient than if tracked by Dense. The threashold is set to keep the
2778 // worst-case memory usage within 2x of figures determined empirically for
2779 // "all Dense" scenario in such worst-by-execution-time cases.
2780 BitVector Sparse;
2781 DenseSet<unsigned> Dense;
2782};
2783
2784// Implements both a transfer function and a (binary, in-place) join operator
2785// for a dataflow over register sets with set union join and filtering transfer
2786// (out_b = in_b \ filter_b). filter_b is expected to be set-up ahead of time.
2787// Maintains out_b as its state, allowing for O(n) iteration over it at any
2788// time, where n is the size of the set (as opposed to O(U) where U is the
2789// universe). filter_b implicitly contains all physical registers at all times.
2790class FilteringVRegSet {
2791 VRegFilter Filter;
2792 SmallVector<Register, 0> VRegs;
2793
2794public:
2795 // Set-up the filter_b. \pre Input register set \p RS must have no duplicates.
2796 // Both virtual and physical registers are fine.
2797 template <typename RegSetT> void addToFilter(const RegSetT &RS) {
2798 Filter.add(RS);
2799 }
2800 // Passes \p RS through the filter_b (transfer function) and adds what's left
2801 // to itself (out_b).
2802 template <typename RegSetT> bool add(const RegSetT &RS) {
2803 // Double-duty the Filter: to maintain VRegs a set (and the join operation
2804 // a set union) just add everything being added here to the Filter as well.
2805 return Filter.filterAndAdd(RS, VRegs);
2806 }
2807 using const_iterator = decltype(VRegs)::const_iterator;
2808 const_iterator begin() const { return VRegs.begin(); }
2809 const_iterator end() const { return VRegs.end(); }
2810 size_t size() const { return VRegs.size(); }
2811};
2812} // namespace
2813
2814// Calculate the largest possible vregsPassed sets. These are the registers that
2815// can pass through an MBB live, but may not be live every time. It is assumed
2816// that all vregsPassed sets are empty before the call.
2817void MachineVerifier::calcRegsPassed() {
2818 if (MF->empty())
2819 // ReversePostOrderTraversal doesn't handle empty functions.
2820 return;
2821
2822 for (const MachineBasicBlock *MB :
2823 ReversePostOrderTraversal<const MachineFunction *>(MF)) {
2824 FilteringVRegSet VRegs;
2825 BBInfo &Info = MBBInfoMap[MB];
2826 assert(Info.reachable);
2827
2828 VRegs.addToFilter(RS: Info.regsKilled);
2829 VRegs.addToFilter(RS: Info.regsLiveOut);
2830 for (const MachineBasicBlock *Pred : MB->predecessors()) {
2831 const BBInfo &PredInfo = MBBInfoMap[Pred];
2832 if (!PredInfo.reachable)
2833 continue;
2834
2835 VRegs.add(RS: PredInfo.regsLiveOut);
2836 VRegs.add(RS: PredInfo.vregsPassed);
2837 }
2838 Info.vregsPassed.reserve(Size: VRegs.size());
2839 Info.vregsPassed.insert(I: VRegs.begin(), E: VRegs.end());
2840 }
2841}
2842
2843// Calculate the set of virtual registers that must be passed through each basic
2844// block in order to satisfy the requirements of successor blocks. This is very
2845// similar to calcRegsPassed, only backwards.
2846void MachineVerifier::calcRegsRequired() {
2847 // First push live-in regs to predecessors' vregsRequired.
2848 SmallPtrSet<const MachineBasicBlock*, 8> todo;
2849 for (const auto &MBB : *MF) {
2850 BBInfo &MInfo = MBBInfoMap[&MBB];
2851 for (const MachineBasicBlock *Pred : MBB.predecessors()) {
2852 BBInfo &PInfo = MBBInfoMap[Pred];
2853 if (PInfo.addRequired(RM: MInfo.vregsLiveIn))
2854 todo.insert(Ptr: Pred);
2855 }
2856
2857 // Handle the PHI node.
2858 for (const MachineInstr &MI : MBB.phis()) {
2859 for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) {
2860 // Skip those Operands which are undef regs or not regs.
2861 if (!MI.getOperand(i).isReg() || !MI.getOperand(i).readsReg())
2862 continue;
2863
2864 // Get register and predecessor for one PHI edge.
2865 Register Reg = MI.getOperand(i).getReg();
2866 const MachineBasicBlock *Pred = MI.getOperand(i: i + 1).getMBB();
2867
2868 BBInfo &PInfo = MBBInfoMap[Pred];
2869 if (PInfo.addRequired(Reg))
2870 todo.insert(Ptr: Pred);
2871 }
2872 }
2873 }
2874
2875 // Iteratively push vregsRequired to predecessors. This will converge to the
2876 // same final state regardless of DenseSet iteration order.
2877 while (!todo.empty()) {
2878 const MachineBasicBlock *MBB = *todo.begin();
2879 todo.erase(Ptr: MBB);
2880 BBInfo &MInfo = MBBInfoMap[MBB];
2881 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
2882 if (Pred == MBB)
2883 continue;
2884 BBInfo &SInfo = MBBInfoMap[Pred];
2885 if (SInfo.addRequired(RS: MInfo.vregsRequired))
2886 todo.insert(Ptr: Pred);
2887 }
2888 }
2889}
2890
2891// Check PHI instructions at the beginning of MBB. It is assumed that
2892// calcRegsPassed has been run so BBInfo::isLiveOut is valid.
2893void MachineVerifier::checkPHIOps(const MachineBasicBlock &MBB) {
2894 BBInfo &MInfo = MBBInfoMap[&MBB];
2895
2896 SmallPtrSet<const MachineBasicBlock*, 8> seen;
2897 for (const MachineInstr &Phi : MBB) {
2898 if (!Phi.isPHI())
2899 break;
2900 seen.clear();
2901
2902 const MachineOperand &MODef = Phi.getOperand(i: 0);
2903 if (!MODef.isReg() || !MODef.isDef()) {
2904 report(msg: "Expected first PHI operand to be a register def", MO: &MODef, MONum: 0);
2905 continue;
2906 }
2907 if (MODef.isTied() || MODef.isImplicit() || MODef.isInternalRead() ||
2908 MODef.isEarlyClobber() || MODef.isDebug())
2909 report(msg: "Unexpected flag on PHI operand", MO: &MODef, MONum: 0);
2910 Register DefReg = MODef.getReg();
2911 if (!DefReg.isVirtual())
2912 report(msg: "Expected first PHI operand to be a virtual register", MO: &MODef, MONum: 0);
2913
2914 for (unsigned I = 1, E = Phi.getNumOperands(); I != E; I += 2) {
2915 const MachineOperand &MO0 = Phi.getOperand(i: I);
2916 if (!MO0.isReg()) {
2917 report(msg: "Expected PHI operand to be a register", MO: &MO0, MONum: I);
2918 continue;
2919 }
2920 if (MO0.isImplicit() || MO0.isInternalRead() || MO0.isEarlyClobber() ||
2921 MO0.isDebug() || MO0.isTied())
2922 report(msg: "Unexpected flag on PHI operand", MO: &MO0, MONum: I);
2923
2924 const MachineOperand &MO1 = Phi.getOperand(i: I + 1);
2925 if (!MO1.isMBB()) {
2926 report(msg: "Expected PHI operand to be a basic block", MO: &MO1, MONum: I + 1);
2927 continue;
2928 }
2929
2930 const MachineBasicBlock &Pre = *MO1.getMBB();
2931 if (!Pre.isSuccessor(MBB: &MBB)) {
2932 report(msg: "PHI input is not a predecessor block", MO: &MO1, MONum: I + 1);
2933 continue;
2934 }
2935
2936 if (MInfo.reachable) {
2937 seen.insert(Ptr: &Pre);
2938 BBInfo &PrInfo = MBBInfoMap[&Pre];
2939 if (!MO0.isUndef() && PrInfo.reachable &&
2940 !PrInfo.isLiveOut(Reg: MO0.getReg()))
2941 report(msg: "PHI operand is not live-out from predecessor", MO: &MO0, MONum: I);
2942 }
2943 }
2944
2945 // Did we see all predecessors?
2946 if (MInfo.reachable) {
2947 for (MachineBasicBlock *Pred : MBB.predecessors()) {
2948 if (!seen.count(Ptr: Pred)) {
2949 report(msg: "Missing PHI operand", MI: &Phi);
2950 errs() << printMBBReference(MBB: *Pred)
2951 << " is a predecessor according to the CFG.\n";
2952 }
2953 }
2954 }
2955 }
2956}
2957
2958void MachineVerifier::visitMachineFunctionAfter() {
2959 calcRegsPassed();
2960
2961 for (const MachineBasicBlock &MBB : *MF)
2962 checkPHIOps(MBB);
2963
2964 // Now check liveness info if available
2965 calcRegsRequired();
2966
2967 // Check for killed virtual registers that should be live out.
2968 for (const auto &MBB : *MF) {
2969 BBInfo &MInfo = MBBInfoMap[&MBB];
2970 for (Register VReg : MInfo.vregsRequired)
2971 if (MInfo.regsKilled.count(V: VReg)) {
2972 report(msg: "Virtual register killed in block, but needed live out.", MBB: &MBB);
2973 errs() << "Virtual register " << printReg(Reg: VReg)
2974 << " is used after the block.\n";
2975 }
2976 }
2977
2978 if (!MF->empty()) {
2979 BBInfo &MInfo = MBBInfoMap[&MF->front()];
2980 for (Register VReg : MInfo.vregsRequired) {
2981 report(msg: "Virtual register defs don't dominate all uses.", MF);
2982 report_context_vreg(VReg);
2983 }
2984 }
2985
2986 if (LiveVars)
2987 verifyLiveVariables();
2988 if (LiveInts)
2989 verifyLiveIntervals();
2990
2991 // Check live-in list of each MBB. If a register is live into MBB, check
2992 // that the register is in regsLiveOut of each predecessor block. Since
2993 // this must come from a definition in the predecesssor or its live-in
2994 // list, this will catch a live-through case where the predecessor does not
2995 // have the register in its live-in list. This currently only checks
2996 // registers that have no aliases, are not allocatable and are not
2997 // reserved, which could mean a condition code register for instance.
2998 if (MRI->tracksLiveness())
2999 for (const auto &MBB : *MF)
3000 for (MachineBasicBlock::RegisterMaskPair P : MBB.liveins()) {
3001 MCPhysReg LiveInReg = P.PhysReg;
3002 bool hasAliases = MCRegAliasIterator(LiveInReg, TRI, false).isValid();
3003 if (hasAliases || isAllocatable(Reg: LiveInReg) || isReserved(Reg: LiveInReg))
3004 continue;
3005 for (const MachineBasicBlock *Pred : MBB.predecessors()) {
3006 BBInfo &PInfo = MBBInfoMap[Pred];
3007 if (!PInfo.regsLiveOut.count(V: LiveInReg)) {
3008 report(msg: "Live in register not found to be live out from predecessor.",
3009 MBB: &MBB);
3010 errs() << TRI->getName(RegNo: LiveInReg)
3011 << " not found to be live out from "
3012 << printMBBReference(MBB: *Pred) << "\n";
3013 }
3014 }
3015 }
3016
3017 for (auto CSInfo : MF->getCallSitesInfo())
3018 if (!CSInfo.first->isCall())
3019 report(msg: "Call site info referencing instruction that is not call", MF);
3020
3021 // If there's debug-info, check that we don't have any duplicate value
3022 // tracking numbers.
3023 if (MF->getFunction().getSubprogram()) {
3024 DenseSet<unsigned> SeenNumbers;
3025 for (const auto &MBB : *MF) {
3026 for (const auto &MI : MBB) {
3027 if (auto Num = MI.peekDebugInstrNum()) {
3028 auto Result = SeenNumbers.insert(V: (unsigned)Num);
3029 if (!Result.second)
3030 report(msg: "Instruction has a duplicated value tracking number", MI: &MI);
3031 }
3032 }
3033 }
3034 }
3035}
3036
3037void MachineVerifier::verifyLiveVariables() {
3038 assert(LiveVars && "Don't call verifyLiveVariables without LiveVars");
3039 for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
3040 Register Reg = Register::index2VirtReg(Index: I);
3041 LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
3042 for (const auto &MBB : *MF) {
3043 BBInfo &MInfo = MBBInfoMap[&MBB];
3044
3045 // Our vregsRequired should be identical to LiveVariables' AliveBlocks
3046 if (MInfo.vregsRequired.count(V: Reg)) {
3047 if (!VI.AliveBlocks.test(Idx: MBB.getNumber())) {
3048 report(msg: "LiveVariables: Block missing from AliveBlocks", MBB: &MBB);
3049 errs() << "Virtual register " << printReg(Reg)
3050 << " must be live through the block.\n";
3051 }
3052 } else {
3053 if (VI.AliveBlocks.test(Idx: MBB.getNumber())) {
3054 report(msg: "LiveVariables: Block should not be in AliveBlocks", MBB: &MBB);
3055 errs() << "Virtual register " << printReg(Reg)
3056 << " is not needed live through the block.\n";
3057 }
3058 }
3059 }
3060 }
3061}
3062
3063void MachineVerifier::verifyLiveIntervals() {
3064 assert(LiveInts && "Don't call verifyLiveIntervals without LiveInts");
3065 for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
3066 Register Reg = Register::index2VirtReg(Index: I);
3067
3068 // Spilling and splitting may leave unused registers around. Skip them.
3069 if (MRI->reg_nodbg_empty(RegNo: Reg))
3070 continue;
3071
3072 if (!LiveInts->hasInterval(Reg)) {
3073 report(msg: "Missing live interval for virtual register", MF);
3074 errs() << printReg(Reg, TRI) << " still has defs or uses\n";
3075 continue;
3076 }
3077
3078 const LiveInterval &LI = LiveInts->getInterval(Reg);
3079 assert(Reg == LI.reg() && "Invalid reg to interval mapping");
3080 verifyLiveInterval(LI);
3081 }
3082
3083 // Verify all the cached regunit intervals.
3084 for (unsigned i = 0, e = TRI->getNumRegUnits(); i != e; ++i)
3085 if (const LiveRange *LR = LiveInts->getCachedRegUnit(Unit: i))
3086 verifyLiveRange(*LR, i);
3087}
3088
3089void MachineVerifier::verifyLiveRangeValue(const LiveRange &LR,
3090 const VNInfo *VNI, Register Reg,
3091 LaneBitmask LaneMask) {
3092 if (VNI->isUnused())
3093 return;
3094
3095 const VNInfo *DefVNI = LR.getVNInfoAt(Idx: VNI->def);
3096
3097 if (!DefVNI) {
3098 report(msg: "Value not live at VNInfo def and not marked unused", MF);
3099 report_context(LR, VRegUnit: Reg, LaneMask);
3100 report_context(VNI: *VNI);
3101 return;
3102 }
3103
3104 if (DefVNI != VNI) {
3105 report(msg: "Live segment at def has different VNInfo", MF);
3106 report_context(LR, VRegUnit: Reg, LaneMask);
3107 report_context(VNI: *VNI);
3108 return;
3109 }
3110
3111 const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(index: VNI->def);
3112 if (!MBB) {
3113 report(msg: "Invalid VNInfo definition index", MF);
3114 report_context(LR, VRegUnit: Reg, LaneMask);
3115 report_context(VNI: *VNI);
3116 return;
3117 }
3118
3119 if (VNI->isPHIDef()) {
3120 if (VNI->def != LiveInts->getMBBStartIdx(mbb: MBB)) {
3121 report(msg: "PHIDef VNInfo is not defined at MBB start", MBB);
3122 report_context(LR, VRegUnit: Reg, LaneMask);
3123 report_context(VNI: *VNI);
3124 }
3125 return;
3126 }
3127
3128 // Non-PHI def.
3129 const MachineInstr *MI = LiveInts->getInstructionFromIndex(index: VNI->def);
3130 if (!MI) {
3131 report(msg: "No instruction at VNInfo def index", MBB);
3132 report_context(LR, VRegUnit: Reg, LaneMask);
3133 report_context(VNI: *VNI);
3134 return;
3135 }
3136
3137 if (Reg != 0) {
3138 bool hasDef = false;
3139 bool isEarlyClobber = false;
3140 for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
3141 if (!MOI->isReg() || !MOI->isDef())
3142 continue;
3143 if (Reg.isVirtual()) {
3144 if (MOI->getReg() != Reg)
3145 continue;
3146 } else {
3147 if (!MOI->getReg().isPhysical() || !TRI->hasRegUnit(Reg: MOI->getReg(), RegUnit: Reg))
3148 continue;
3149 }
3150 if (LaneMask.any() &&
3151 (TRI->getSubRegIndexLaneMask(SubIdx: MOI->getSubReg()) & LaneMask).none())
3152 continue;
3153 hasDef = true;
3154 if (MOI->isEarlyClobber())
3155 isEarlyClobber = true;
3156 }
3157
3158 if (!hasDef) {
3159 report(msg: "Defining instruction does not modify register", MI);
3160 report_context(LR, VRegUnit: Reg, LaneMask);
3161 report_context(VNI: *VNI);
3162 }
3163
3164 // Early clobber defs begin at USE slots, but other defs must begin at
3165 // DEF slots.
3166 if (isEarlyClobber) {
3167 if (!VNI->def.isEarlyClobber()) {
3168 report(msg: "Early clobber def must be at an early-clobber slot", MBB);
3169 report_context(LR, VRegUnit: Reg, LaneMask);
3170 report_context(VNI: *VNI);
3171 }
3172 } else if (!VNI->def.isRegister()) {
3173 report(msg: "Non-PHI, non-early clobber def must be at a register slot", MBB);
3174 report_context(LR, VRegUnit: Reg, LaneMask);
3175 report_context(VNI: *VNI);
3176 }
3177 }
3178}
3179
3180void MachineVerifier::verifyLiveRangeSegment(const LiveRange &LR,
3181 const LiveRange::const_iterator I,
3182 Register Reg,
3183 LaneBitmask LaneMask) {
3184 const LiveRange::Segment &S = *I;
3185 const VNInfo *VNI = S.valno;
3186 assert(VNI && "Live segment has no valno");
3187
3188 if (VNI->id >= LR.getNumValNums() || VNI != LR.getValNumInfo(ValNo: VNI->id)) {
3189 report(msg: "Foreign valno in live segment", MF);
3190 report_context(LR, VRegUnit: Reg, LaneMask);
3191 report_context(S);
3192 report_context(VNI: *VNI);
3193 }
3194
3195 if (VNI->isUnused()) {
3196 report(msg: "Live segment valno is marked unused", MF);
3197 report_context(LR, VRegUnit: Reg, LaneMask);
3198 report_context(S);
3199 }
3200
3201 const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(index: S.start);
3202 if (!MBB) {
3203 report(msg: "Bad start of live segment, no basic block", MF);
3204 report_context(LR, VRegUnit: Reg, LaneMask);
3205 report_context(S);
3206 return;
3207 }
3208 SlotIndex MBBStartIdx = LiveInts->getMBBStartIdx(mbb: MBB);
3209 if (S.start != MBBStartIdx && S.start != VNI->def) {
3210 report(msg: "Live segment must begin at MBB entry or valno def", MBB);
3211 report_context(LR, VRegUnit: Reg, LaneMask);
3212 report_context(S);
3213 }
3214
3215 const MachineBasicBlock *EndMBB =
3216 LiveInts->getMBBFromIndex(index: S.end.getPrevSlot());
3217 if (!EndMBB) {
3218 report(msg: "Bad end of live segment, no basic block", MF);
3219 report_context(LR, VRegUnit: Reg, LaneMask);
3220 report_context(S);
3221 return;
3222 }
3223
3224 // Checks for non-live-out segments.
3225 if (S.end != LiveInts->getMBBEndIdx(mbb: EndMBB)) {
3226 // RegUnit intervals are allowed dead phis.
3227 if (!Reg.isVirtual() && VNI->isPHIDef() && S.start == VNI->def &&
3228 S.end == VNI->def.getDeadSlot())
3229 return;
3230
3231 // The live segment is ending inside EndMBB
3232 const MachineInstr *MI =
3233 LiveInts->getInstructionFromIndex(index: S.end.getPrevSlot());
3234 if (!MI) {
3235 report(msg: "Live segment doesn't end at a valid instruction", MBB: EndMBB);
3236 report_context(LR, VRegUnit: Reg, LaneMask);
3237 report_context(S);
3238 return;
3239 }
3240
3241 // The block slot must refer to a basic block boundary.
3242 if (S.end.isBlock()) {
3243 report(msg: "Live segment ends at B slot of an instruction", MBB: EndMBB);
3244 report_context(LR, VRegUnit: Reg, LaneMask);
3245 report_context(S);
3246 }
3247
3248 if (S.end.isDead()) {
3249 // Segment ends on the dead slot.
3250 // That means there must be a dead def.
3251 if (!SlotIndex::isSameInstr(A: S.start, B: S.end)) {
3252 report(msg: "Live segment ending at dead slot spans instructions", MBB: EndMBB);
3253 report_context(LR, VRegUnit: Reg, LaneMask);
3254 report_context(S);
3255 }
3256 }
3257
3258 // After tied operands are rewritten, a live segment can only end at an
3259 // early-clobber slot if it is being redefined by an early-clobber def.
3260 // TODO: Before tied operands are rewritten, a live segment can only end at
3261 // an early-clobber slot if the last use is tied to an early-clobber def.
3262 if (MF->getProperties().hasProperty(
3263 P: MachineFunctionProperties::Property::TiedOpsRewritten) &&
3264 S.end.isEarlyClobber()) {
3265 if (I + 1 == LR.end() || (I + 1)->start != S.end) {
3266 report(msg: "Live segment ending at early clobber slot must be "
3267 "redefined by an EC def in the same instruction",
3268 MBB: EndMBB);
3269 report_context(LR, VRegUnit: Reg, LaneMask);
3270 report_context(S);
3271 }
3272 }
3273
3274 // The following checks only apply to virtual registers. Physreg liveness
3275 // is too weird to check.
3276 if (Reg.isVirtual()) {
3277 // A live segment can end with either a redefinition, a kill flag on a
3278 // use, or a dead flag on a def.
3279 bool hasRead = false;
3280 bool hasSubRegDef = false;
3281 bool hasDeadDef = false;
3282 for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
3283 if (!MOI->isReg() || MOI->getReg() != Reg)
3284 continue;
3285 unsigned Sub = MOI->getSubReg();
3286 LaneBitmask SLM =
3287 Sub != 0 ? TRI->getSubRegIndexLaneMask(SubIdx: Sub) : LaneBitmask::getAll();
3288 if (MOI->isDef()) {
3289 if (Sub != 0) {
3290 hasSubRegDef = true;
3291 // An operand %0:sub0 reads %0:sub1..n. Invert the lane
3292 // mask for subregister defs. Read-undef defs will be handled by
3293 // readsReg below.
3294 SLM = ~SLM;
3295 }
3296 if (MOI->isDead())
3297 hasDeadDef = true;
3298 }
3299 if (LaneMask.any() && (LaneMask & SLM).none())
3300 continue;
3301 if (MOI->readsReg())
3302 hasRead = true;
3303 }
3304 if (S.end.isDead()) {
3305 // Make sure that the corresponding machine operand for a "dead" live
3306 // range has the dead flag. We cannot perform this check for subregister
3307 // liveranges as partially dead values are allowed.
3308 if (LaneMask.none() && !hasDeadDef) {
3309 report(
3310 msg: "Instruction ending live segment on dead slot has no dead flag",
3311 MI);
3312 report_context(LR, VRegUnit: Reg, LaneMask);
3313 report_context(S);
3314 }
3315 } else {
3316 if (!hasRead) {
3317 // When tracking subregister liveness, the main range must start new
3318 // values on partial register writes, even if there is no read.
3319 if (!MRI->shouldTrackSubRegLiveness(VReg: Reg) || LaneMask.any() ||
3320 !hasSubRegDef) {
3321 report(msg: "Instruction ending live segment doesn't read the register",
3322 MI);
3323 report_context(LR, VRegUnit: Reg, LaneMask);
3324 report_context(S);
3325 }
3326 }
3327 }
3328 }
3329 }
3330
3331 // Now check all the basic blocks in this live segment.
3332 MachineFunction::const_iterator MFI = MBB->getIterator();
3333 // Is this live segment the beginning of a non-PHIDef VN?
3334 if (S.start == VNI->def && !VNI->isPHIDef()) {
3335 // Not live-in to any blocks.
3336 if (MBB == EndMBB)
3337 return;
3338 // Skip this block.
3339 ++MFI;
3340 }
3341
3342 SmallVector<SlotIndex, 4> Undefs;
3343 if (LaneMask.any()) {
3344 LiveInterval &OwnerLI = LiveInts->getInterval(Reg);
3345 OwnerLI.computeSubRangeUndefs(Undefs, LaneMask, MRI: *MRI, Indexes: *Indexes);
3346 }
3347
3348 while (true) {
3349 assert(LiveInts->isLiveInToMBB(LR, &*MFI));
3350 // We don't know how to track physregs into a landing pad.
3351 if (!Reg.isVirtual() && MFI->isEHPad()) {
3352 if (&*MFI == EndMBB)
3353 break;
3354 ++MFI;
3355 continue;
3356 }
3357
3358 // Is VNI a PHI-def in the current block?
3359 bool IsPHI = VNI->isPHIDef() &&
3360 VNI->def == LiveInts->getMBBStartIdx(mbb: &*MFI);
3361
3362 // Check that VNI is live-out of all predecessors.
3363 for (const MachineBasicBlock *Pred : MFI->predecessors()) {
3364 SlotIndex PEnd = LiveInts->getMBBEndIdx(mbb: Pred);
3365 // Predecessor of landing pad live-out on last call.
3366 if (MFI->isEHPad()) {
3367 for (const MachineInstr &MI : llvm::reverse(C: *Pred)) {
3368 if (MI.isCall()) {
3369 PEnd = Indexes->getInstructionIndex(MI).getBoundaryIndex();
3370 break;
3371 }
3372 }
3373 }
3374 const VNInfo *PVNI = LR.getVNInfoBefore(Idx: PEnd);
3375
3376 // All predecessors must have a live-out value. However for a phi
3377 // instruction with subregister intervals
3378 // only one of the subregisters (not necessarily the current one) needs to
3379 // be defined.
3380 if (!PVNI && (LaneMask.none() || !IsPHI)) {
3381 if (LiveRangeCalc::isJointlyDominated(MBB: Pred, Defs: Undefs, Indexes: *Indexes))
3382 continue;
3383 report(msg: "Register not marked live out of predecessor", MBB: Pred);
3384 report_context(LR, VRegUnit: Reg, LaneMask);
3385 report_context(VNI: *VNI);
3386 errs() << " live into " << printMBBReference(MBB: *MFI) << '@'
3387 << LiveInts->getMBBStartIdx(mbb: &*MFI) << ", not live before "
3388 << PEnd << '\n';
3389 continue;
3390 }
3391
3392 // Only PHI-defs can take different predecessor values.
3393 if (!IsPHI && PVNI != VNI) {
3394 report(msg: "Different value live out of predecessor", MBB: Pred);
3395 report_context(LR, VRegUnit: Reg, LaneMask);
3396 errs() << "Valno #" << PVNI->id << " live out of "
3397 << printMBBReference(MBB: *Pred) << '@' << PEnd << "\nValno #"
3398 << VNI->id << " live into " << printMBBReference(MBB: *MFI) << '@'
3399 << LiveInts->getMBBStartIdx(mbb: &*MFI) << '\n';
3400 }
3401 }
3402 if (&*MFI == EndMBB)
3403 break;
3404 ++MFI;
3405 }
3406}
3407
3408void MachineVerifier::verifyLiveRange(const LiveRange &LR, Register Reg,
3409 LaneBitmask LaneMask) {
3410 for (const VNInfo *VNI : LR.valnos)
3411 verifyLiveRangeValue(LR, VNI, Reg, LaneMask);
3412
3413 for (LiveRange::const_iterator I = LR.begin(), E = LR.end(); I != E; ++I)
3414 verifyLiveRangeSegment(LR, I, Reg, LaneMask);
3415}
3416
3417void MachineVerifier::verifyLiveInterval(const LiveInterval &LI) {
3418 Register Reg = LI.reg();
3419 assert(Reg.isVirtual());
3420 verifyLiveRange(LR: LI, Reg);
3421
3422 if (LI.hasSubRanges()) {
3423 LaneBitmask Mask;
3424 LaneBitmask MaxMask = MRI->getMaxLaneMaskForVReg(Reg);
3425 for (const LiveInterval::SubRange &SR : LI.subranges()) {
3426 if ((Mask & SR.LaneMask).any()) {
3427 report(msg: "Lane masks of sub ranges overlap in live interval", MF);
3428 report_context(LI);
3429 }
3430 if ((SR.LaneMask & ~MaxMask).any()) {
3431 report(msg: "Subrange lanemask is invalid", MF);
3432 report_context(LI);
3433 }
3434 if (SR.empty()) {
3435 report(msg: "Subrange must not be empty", MF);
3436 report_context(LR: SR, VRegUnit: LI.reg(), LaneMask: SR.LaneMask);
3437 }
3438 Mask |= SR.LaneMask;
3439 verifyLiveRange(LR: SR, Reg: LI.reg(), LaneMask: SR.LaneMask);
3440 if (!LI.covers(Other: SR)) {
3441 report(msg: "A Subrange is not covered by the main range", MF);
3442 report_context(LI);
3443 }
3444 }
3445 }
3446
3447 // Check the LI only has one connected component.
3448 ConnectedVNInfoEqClasses ConEQ(*LiveInts);
3449 unsigned NumComp = ConEQ.Classify(LR: LI);
3450 if (NumComp > 1) {
3451 report(msg: "Multiple connected components in live interval", MF);
3452 report_context(LI);
3453 for (unsigned comp = 0; comp != NumComp; ++comp) {
3454 errs() << comp << ": valnos";
3455 for (const VNInfo *I : LI.valnos)
3456 if (comp == ConEQ.getEqClass(VNI: I))
3457 errs() << ' ' << I->id;
3458 errs() << '\n';
3459 }
3460 }
3461}
3462
3463namespace {
3464
3465 // FrameSetup and FrameDestroy can have zero adjustment, so using a single
3466 // integer, we can't tell whether it is a FrameSetup or FrameDestroy if the
3467 // value is zero.
3468 // We use a bool plus an integer to capture the stack state.
3469 struct StackStateOfBB {
3470 StackStateOfBB() = default;
3471 StackStateOfBB(int EntryVal, int ExitVal, bool EntrySetup, bool ExitSetup) :
3472 EntryValue(EntryVal), ExitValue(ExitVal), EntryIsSetup(EntrySetup),
3473 ExitIsSetup(ExitSetup) {}
3474
3475 // Can be negative, which means we are setting up a frame.
3476 int EntryValue = 0;
3477 int ExitValue = 0;
3478 bool EntryIsSetup = false;
3479 bool ExitIsSetup = false;
3480 };
3481
3482} // end anonymous namespace
3483
3484/// Make sure on every path through the CFG, a FrameSetup <n> is always followed
3485/// by a FrameDestroy <n>, stack adjustments are identical on all
3486/// CFG edges to a merge point, and frame is destroyed at end of a return block.
3487void MachineVerifier::verifyStackFrame() {
3488 unsigned FrameSetupOpcode = TII->getCallFrameSetupOpcode();
3489 unsigned FrameDestroyOpcode = TII->getCallFrameDestroyOpcode();
3490 if (FrameSetupOpcode == ~0u && FrameDestroyOpcode == ~0u)
3491 return;
3492
3493 SmallVector<StackStateOfBB, 8> SPState;
3494 SPState.resize(N: MF->getNumBlockIDs());
3495 df_iterator_default_set<const MachineBasicBlock*> Reachable;
3496
3497 // Visit the MBBs in DFS order.
3498 for (df_ext_iterator<const MachineFunction *,
3499 df_iterator_default_set<const MachineBasicBlock *>>
3500 DFI = df_ext_begin(G: MF, S&: Reachable), DFE = df_ext_end(G: MF, S&: Reachable);
3501 DFI != DFE; ++DFI) {
3502 const MachineBasicBlock *MBB = *DFI;
3503
3504 StackStateOfBB BBState;
3505 // Check the exit state of the DFS stack predecessor.
3506 if (DFI.getPathLength() >= 2) {
3507 const MachineBasicBlock *StackPred = DFI.getPath(n: DFI.getPathLength() - 2);
3508 assert(Reachable.count(StackPred) &&
3509 "DFS stack predecessor is already visited.\n");
3510 BBState.EntryValue = SPState[StackPred->getNumber()].ExitValue;
3511 BBState.EntryIsSetup = SPState[StackPred->getNumber()].ExitIsSetup;
3512 BBState.ExitValue = BBState.EntryValue;
3513 BBState.ExitIsSetup = BBState.EntryIsSetup;
3514 }
3515
3516 if ((int)MBB->getCallFrameSize() != -BBState.EntryValue) {
3517 report(msg: "Call frame size on entry does not match value computed from "
3518 "predecessor",
3519 MBB);
3520 errs() << "Call frame size on entry " << MBB->getCallFrameSize()
3521 << " does not match value computed from predecessor "
3522 << -BBState.EntryValue << '\n';
3523 }
3524
3525 // Update stack state by checking contents of MBB.
3526 for (const auto &I : *MBB) {
3527 if (I.getOpcode() == FrameSetupOpcode) {
3528 if (BBState.ExitIsSetup)
3529 report(msg: "FrameSetup is after another FrameSetup", MI: &I);
3530 BBState.ExitValue -= TII->getFrameTotalSize(I);
3531 BBState.ExitIsSetup = true;
3532 }
3533
3534 if (I.getOpcode() == FrameDestroyOpcode) {
3535 int Size = TII->getFrameTotalSize(I);
3536 if (!BBState.ExitIsSetup)
3537 report(msg: "FrameDestroy is not after a FrameSetup", MI: &I);
3538 int AbsSPAdj = BBState.ExitValue < 0 ? -BBState.ExitValue :
3539 BBState.ExitValue;
3540 if (BBState.ExitIsSetup && AbsSPAdj != Size) {
3541 report(msg: "FrameDestroy <n> is after FrameSetup <m>", MI: &I);
3542 errs() << "FrameDestroy <" << Size << "> is after FrameSetup <"
3543 << AbsSPAdj << ">.\n";
3544 }
3545 BBState.ExitValue += Size;
3546 BBState.ExitIsSetup = false;
3547 }
3548 }
3549 SPState[MBB->getNumber()] = BBState;
3550
3551 // Make sure the exit state of any predecessor is consistent with the entry
3552 // state.
3553 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
3554 if (Reachable.count(Ptr: Pred) &&
3555 (SPState[Pred->getNumber()].ExitValue != BBState.EntryValue ||
3556 SPState[Pred->getNumber()].ExitIsSetup != BBState.EntryIsSetup)) {
3557 report(msg: "The exit stack state of a predecessor is inconsistent.", MBB);
3558 errs() << "Predecessor " << printMBBReference(MBB: *Pred)
3559 << " has exit state (" << SPState[Pred->getNumber()].ExitValue
3560 << ", " << SPState[Pred->getNumber()].ExitIsSetup << "), while "
3561 << printMBBReference(MBB: *MBB) << " has entry state ("
3562 << BBState.EntryValue << ", " << BBState.EntryIsSetup << ").\n";
3563 }
3564 }
3565
3566 // Make sure the entry state of any successor is consistent with the exit
3567 // state.
3568 for (const MachineBasicBlock *Succ : MBB->successors()) {
3569 if (Reachable.count(Ptr: Succ) &&
3570 (SPState[Succ->getNumber()].EntryValue != BBState.ExitValue ||
3571 SPState[Succ->getNumber()].EntryIsSetup != BBState.ExitIsSetup)) {
3572 report(msg: "The entry stack state of a successor is inconsistent.", MBB);
3573 errs() << "Successor " << printMBBReference(MBB: *Succ)
3574 << " has entry state (" << SPState[Succ->getNumber()].EntryValue
3575 << ", " << SPState[Succ->getNumber()].EntryIsSetup << "), while "
3576 << printMBBReference(MBB: *MBB) << " has exit state ("
3577 << BBState.ExitValue << ", " << BBState.ExitIsSetup << ").\n";
3578 }
3579 }
3580
3581 // Make sure a basic block with return ends with zero stack adjustment.
3582 if (!MBB->empty() && MBB->back().isReturn()) {
3583 if (BBState.ExitIsSetup)
3584 report(msg: "A return block ends with a FrameSetup.", MBB);
3585 if (BBState.ExitValue)
3586 report(msg: "A return block ends with a nonzero stack adjustment.", MBB);
3587 }
3588 }
3589}
3590

source code of llvm/lib/CodeGen/MachineVerifier.cpp