1//===- SelectionDAGISel.cpp - Implement the SelectionDAGISel class --------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This implements the SelectionDAGISel class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/CodeGen/SelectionDAGISel.h"
14#include "ScheduleDAGSDNodes.h"
15#include "SelectionDAGBuilder.h"
16#include "llvm/ADT/APInt.h"
17#include "llvm/ADT/DenseMap.h"
18#include "llvm/ADT/PostOrderIterator.h"
19#include "llvm/ADT/STLExtras.h"
20#include "llvm/ADT/SmallPtrSet.h"
21#include "llvm/ADT/SmallVector.h"
22#include "llvm/ADT/Statistic.h"
23#include "llvm/ADT/StringRef.h"
24#include "llvm/Analysis/AliasAnalysis.h"
25#include "llvm/Analysis/AssumptionCache.h"
26#include "llvm/Analysis/BranchProbabilityInfo.h"
27#include "llvm/Analysis/CFG.h"
28#include "llvm/Analysis/LazyBlockFrequencyInfo.h"
29#include "llvm/Analysis/OptimizationRemarkEmitter.h"
30#include "llvm/Analysis/ProfileSummaryInfo.h"
31#include "llvm/Analysis/TargetLibraryInfo.h"
32#include "llvm/Analysis/TargetTransformInfo.h"
33#include "llvm/Analysis/UniformityAnalysis.h"
34#include "llvm/CodeGen/AssignmentTrackingAnalysis.h"
35#include "llvm/CodeGen/CodeGenCommonISel.h"
36#include "llvm/CodeGen/FastISel.h"
37#include "llvm/CodeGen/FunctionLoweringInfo.h"
38#include "llvm/CodeGen/GCMetadata.h"
39#include "llvm/CodeGen/ISDOpcodes.h"
40#include "llvm/CodeGen/MachineBasicBlock.h"
41#include "llvm/CodeGen/MachineFrameInfo.h"
42#include "llvm/CodeGen/MachineFunction.h"
43#include "llvm/CodeGen/MachineFunctionPass.h"
44#include "llvm/CodeGen/MachineInstr.h"
45#include "llvm/CodeGen/MachineInstrBuilder.h"
46#include "llvm/CodeGen/MachineMemOperand.h"
47#include "llvm/CodeGen/MachineModuleInfo.h"
48#include "llvm/CodeGen/MachineOperand.h"
49#include "llvm/CodeGen/MachinePassRegistry.h"
50#include "llvm/CodeGen/MachineRegisterInfo.h"
51#include "llvm/CodeGen/SchedulerRegistry.h"
52#include "llvm/CodeGen/SelectionDAG.h"
53#include "llvm/CodeGen/SelectionDAGNodes.h"
54#include "llvm/CodeGen/StackMaps.h"
55#include "llvm/CodeGen/StackProtector.h"
56#include "llvm/CodeGen/SwiftErrorValueTracking.h"
57#include "llvm/CodeGen/TargetInstrInfo.h"
58#include "llvm/CodeGen/TargetLowering.h"
59#include "llvm/CodeGen/TargetRegisterInfo.h"
60#include "llvm/CodeGen/TargetSubtargetInfo.h"
61#include "llvm/CodeGen/ValueTypes.h"
62#include "llvm/CodeGen/WinEHFuncInfo.h"
63#include "llvm/CodeGenTypes/MachineValueType.h"
64#include "llvm/IR/BasicBlock.h"
65#include "llvm/IR/Constants.h"
66#include "llvm/IR/DataLayout.h"
67#include "llvm/IR/DebugInfo.h"
68#include "llvm/IR/DebugInfoMetadata.h"
69#include "llvm/IR/DebugLoc.h"
70#include "llvm/IR/DiagnosticInfo.h"
71#include "llvm/IR/EHPersonalities.h"
72#include "llvm/IR/Function.h"
73#include "llvm/IR/InlineAsm.h"
74#include "llvm/IR/InstIterator.h"
75#include "llvm/IR/Instruction.h"
76#include "llvm/IR/Instructions.h"
77#include "llvm/IR/IntrinsicInst.h"
78#include "llvm/IR/Intrinsics.h"
79#include "llvm/IR/IntrinsicsWebAssembly.h"
80#include "llvm/IR/Metadata.h"
81#include "llvm/IR/PrintPasses.h"
82#include "llvm/IR/Statepoint.h"
83#include "llvm/IR/Type.h"
84#include "llvm/IR/User.h"
85#include "llvm/IR/Value.h"
86#include "llvm/InitializePasses.h"
87#include "llvm/MC/MCInstrDesc.h"
88#include "llvm/Pass.h"
89#include "llvm/Support/BranchProbability.h"
90#include "llvm/Support/Casting.h"
91#include "llvm/Support/CodeGen.h"
92#include "llvm/Support/CommandLine.h"
93#include "llvm/Support/Compiler.h"
94#include "llvm/Support/Debug.h"
95#include "llvm/Support/ErrorHandling.h"
96#include "llvm/Support/KnownBits.h"
97#include "llvm/Support/Timer.h"
98#include "llvm/Support/raw_ostream.h"
99#include "llvm/Target/TargetIntrinsicInfo.h"
100#include "llvm/Target/TargetMachine.h"
101#include "llvm/Target/TargetOptions.h"
102#include "llvm/Transforms/Utils/BasicBlockUtils.h"
103#include <algorithm>
104#include <cassert>
105#include <cstdint>
106#include <iterator>
107#include <limits>
108#include <memory>
109#include <optional>
110#include <string>
111#include <utility>
112#include <vector>
113
114using namespace llvm;
115
116#define DEBUG_TYPE "isel"
117#define ISEL_DUMP_DEBUG_TYPE DEBUG_TYPE "-dump"
118
119STATISTIC(NumFastIselFailures, "Number of instructions fast isel failed on");
120STATISTIC(NumFastIselSuccess, "Number of instructions fast isel selected");
121STATISTIC(NumFastIselBlocks, "Number of blocks selected entirely by fast isel");
122STATISTIC(NumDAGBlocks, "Number of blocks selected using DAG");
123STATISTIC(NumDAGIselRetries,"Number of times dag isel has to try another path");
124STATISTIC(NumEntryBlocks, "Number of entry blocks encountered");
125STATISTIC(NumFastIselFailLowerArguments,
126 "Number of entry blocks where fast isel failed to lower arguments");
127
128static cl::opt<int> EnableFastISelAbort(
129 "fast-isel-abort", cl::Hidden,
130 cl::desc("Enable abort calls when \"fast\" instruction selection "
131 "fails to lower an instruction: 0 disable the abort, 1 will "
132 "abort but for args, calls and terminators, 2 will also "
133 "abort for argument lowering, and 3 will never fallback "
134 "to SelectionDAG."));
135
136static cl::opt<bool> EnableFastISelFallbackReport(
137 "fast-isel-report-on-fallback", cl::Hidden,
138 cl::desc("Emit a diagnostic when \"fast\" instruction selection "
139 "falls back to SelectionDAG."));
140
141static cl::opt<bool>
142UseMBPI("use-mbpi",
143 cl::desc("use Machine Branch Probability Info"),
144 cl::init(Val: true), cl::Hidden);
145
146#ifndef NDEBUG
147static cl::opt<std::string>
148FilterDAGBasicBlockName("filter-view-dags", cl::Hidden,
149 cl::desc("Only display the basic block whose name "
150 "matches this for all view-*-dags options"));
151static cl::opt<bool>
152ViewDAGCombine1("view-dag-combine1-dags", cl::Hidden,
153 cl::desc("Pop up a window to show dags before the first "
154 "dag combine pass"));
155static cl::opt<bool>
156ViewLegalizeTypesDAGs("view-legalize-types-dags", cl::Hidden,
157 cl::desc("Pop up a window to show dags before legalize types"));
158static cl::opt<bool>
159 ViewDAGCombineLT("view-dag-combine-lt-dags", cl::Hidden,
160 cl::desc("Pop up a window to show dags before the post "
161 "legalize types dag combine pass"));
162static cl::opt<bool>
163 ViewLegalizeDAGs("view-legalize-dags", cl::Hidden,
164 cl::desc("Pop up a window to show dags before legalize"));
165static cl::opt<bool>
166ViewDAGCombine2("view-dag-combine2-dags", cl::Hidden,
167 cl::desc("Pop up a window to show dags before the second "
168 "dag combine pass"));
169static cl::opt<bool>
170ViewISelDAGs("view-isel-dags", cl::Hidden,
171 cl::desc("Pop up a window to show isel dags as they are selected"));
172static cl::opt<bool>
173ViewSchedDAGs("view-sched-dags", cl::Hidden,
174 cl::desc("Pop up a window to show sched dags as they are processed"));
175static cl::opt<bool>
176ViewSUnitDAGs("view-sunit-dags", cl::Hidden,
177 cl::desc("Pop up a window to show SUnit dags after they are processed"));
178#else
179static const bool ViewDAGCombine1 = false, ViewLegalizeTypesDAGs = false,
180 ViewDAGCombineLT = false, ViewLegalizeDAGs = false,
181 ViewDAGCombine2 = false, ViewISelDAGs = false,
182 ViewSchedDAGs = false, ViewSUnitDAGs = false;
183#endif
184
185#ifndef NDEBUG
186#define ISEL_DUMP(X) \
187 do { \
188 if (llvm::DebugFlag && \
189 (isCurrentDebugType(DEBUG_TYPE) || \
190 (isCurrentDebugType(ISEL_DUMP_DEBUG_TYPE) && MatchFilterFuncName))) { \
191 X; \
192 } \
193 } while (false)
194#else
195#define ISEL_DUMP(X) do { } while (false)
196#endif
197
198//===---------------------------------------------------------------------===//
199///
200/// RegisterScheduler class - Track the registration of instruction schedulers.
201///
202//===---------------------------------------------------------------------===//
203MachinePassRegistry<RegisterScheduler::FunctionPassCtor>
204 RegisterScheduler::Registry;
205
206//===---------------------------------------------------------------------===//
207///
208/// ISHeuristic command line option for instruction schedulers.
209///
210//===---------------------------------------------------------------------===//
211static cl::opt<RegisterScheduler::FunctionPassCtor, false,
212 RegisterPassParser<RegisterScheduler>>
213ISHeuristic("pre-RA-sched",
214 cl::init(Val: &createDefaultScheduler), cl::Hidden,
215 cl::desc("Instruction schedulers available (before register"
216 " allocation):"));
217
218static RegisterScheduler
219defaultListDAGScheduler("default", "Best scheduler for the target",
220 createDefaultScheduler);
221
222static bool dontUseFastISelFor(const Function &Fn) {
223 // Don't enable FastISel for functions with swiftasync Arguments.
224 // Debug info on those is reliant on good Argument lowering, and FastISel is
225 // not capable of lowering the entire function. Mixing the two selectors tend
226 // to result in poor lowering of Arguments.
227 return any_of(Range: Fn.args(), P: [](const Argument &Arg) {
228 return Arg.hasAttribute(Attribute::AttrKind::SwiftAsync);
229 });
230}
231
232namespace llvm {
233
234 //===--------------------------------------------------------------------===//
235 /// This class is used by SelectionDAGISel to temporarily override
236 /// the optimization level on a per-function basis.
237 class OptLevelChanger {
238 SelectionDAGISel &IS;
239 CodeGenOptLevel SavedOptLevel;
240 bool SavedFastISel;
241
242 public:
243 OptLevelChanger(SelectionDAGISel &ISel, CodeGenOptLevel NewOptLevel)
244 : IS(ISel) {
245 SavedOptLevel = IS.OptLevel;
246 SavedFastISel = IS.TM.Options.EnableFastISel;
247 if (NewOptLevel != SavedOptLevel) {
248 IS.OptLevel = NewOptLevel;
249 IS.TM.setOptLevel(NewOptLevel);
250 LLVM_DEBUG(dbgs() << "\nChanging optimization level for Function "
251 << IS.MF->getFunction().getName() << "\n");
252 LLVM_DEBUG(dbgs() << "\tBefore: -O" << static_cast<int>(SavedOptLevel)
253 << " ; After: -O" << static_cast<int>(NewOptLevel)
254 << "\n");
255 if (NewOptLevel == CodeGenOptLevel::None)
256 IS.TM.setFastISel(IS.TM.getO0WantsFastISel());
257 }
258 if (dontUseFastISelFor(Fn: IS.MF->getFunction()))
259 IS.TM.setFastISel(false);
260 LLVM_DEBUG(
261 dbgs() << "\tFastISel is "
262 << (IS.TM.Options.EnableFastISel ? "enabled" : "disabled")
263 << "\n");
264 }
265
266 ~OptLevelChanger() {
267 if (IS.OptLevel == SavedOptLevel)
268 return;
269 LLVM_DEBUG(dbgs() << "\nRestoring optimization level for Function "
270 << IS.MF->getFunction().getName() << "\n");
271 LLVM_DEBUG(dbgs() << "\tBefore: -O" << static_cast<int>(IS.OptLevel)
272 << " ; After: -O" << static_cast<int>(SavedOptLevel) << "\n");
273 IS.OptLevel = SavedOptLevel;
274 IS.TM.setOptLevel(SavedOptLevel);
275 IS.TM.setFastISel(SavedFastISel);
276 }
277 };
278
279 //===--------------------------------------------------------------------===//
280 /// createDefaultScheduler - This creates an instruction scheduler appropriate
281 /// for the target.
282 ScheduleDAGSDNodes *createDefaultScheduler(SelectionDAGISel *IS,
283 CodeGenOptLevel OptLevel) {
284 const TargetLowering *TLI = IS->TLI;
285 const TargetSubtargetInfo &ST = IS->MF->getSubtarget();
286
287 // Try first to see if the Target has its own way of selecting a scheduler
288 if (auto *SchedulerCtor = ST.getDAGScheduler(OptLevel)) {
289 return SchedulerCtor(IS, OptLevel);
290 }
291
292 if (OptLevel == CodeGenOptLevel::None ||
293 (ST.enableMachineScheduler() && ST.enableMachineSchedDefaultSched()) ||
294 TLI->getSchedulingPreference() == Sched::Source)
295 return createSourceListDAGScheduler(IS, OptLevel);
296 if (TLI->getSchedulingPreference() == Sched::RegPressure)
297 return createBURRListDAGScheduler(IS, OptLevel);
298 if (TLI->getSchedulingPreference() == Sched::Hybrid)
299 return createHybridListDAGScheduler(IS, OptLevel);
300 if (TLI->getSchedulingPreference() == Sched::VLIW)
301 return createVLIWDAGScheduler(IS, OptLevel);
302 if (TLI->getSchedulingPreference() == Sched::Fast)
303 return createFastDAGScheduler(IS, OptLevel);
304 if (TLI->getSchedulingPreference() == Sched::Linearize)
305 return createDAGLinearizer(IS, OptLevel);
306 assert(TLI->getSchedulingPreference() == Sched::ILP &&
307 "Unknown sched type!");
308 return createILPListDAGScheduler(IS, OptLevel);
309 }
310
311} // end namespace llvm
312
313// EmitInstrWithCustomInserter - This method should be implemented by targets
314// that mark instructions with the 'usesCustomInserter' flag. These
315// instructions are special in various ways, which require special support to
316// insert. The specified MachineInstr is created but not inserted into any
317// basic blocks, and this method is called to expand it into a sequence of
318// instructions, potentially also creating new basic blocks and control flow.
319// When new basic blocks are inserted and the edges from MBB to its successors
320// are modified, the method should insert pairs of <OldSucc, NewSucc> into the
321// DenseMap.
322MachineBasicBlock *
323TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
324 MachineBasicBlock *MBB) const {
325#ifndef NDEBUG
326 dbgs() << "If a target marks an instruction with "
327 "'usesCustomInserter', it must implement "
328 "TargetLowering::EmitInstrWithCustomInserter!\n";
329#endif
330 llvm_unreachable(nullptr);
331}
332
333void TargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
334 SDNode *Node) const {
335 assert(!MI.hasPostISelHook() &&
336 "If a target marks an instruction with 'hasPostISelHook', "
337 "it must implement TargetLowering::AdjustInstrPostInstrSelection!");
338}
339
340//===----------------------------------------------------------------------===//
341// SelectionDAGISel code
342//===----------------------------------------------------------------------===//
343
344SelectionDAGISel::SelectionDAGISel(char &ID, TargetMachine &tm,
345 CodeGenOptLevel OL)
346 : MachineFunctionPass(ID), TM(tm), FuncInfo(new FunctionLoweringInfo()),
347 SwiftError(new SwiftErrorValueTracking()),
348 CurDAG(new SelectionDAG(tm, OL)),
349 SDB(std::make_unique<SelectionDAGBuilder>(args&: *CurDAG, args&: *FuncInfo, args&: *SwiftError,
350 args&: OL)),
351 OptLevel(OL) {
352 initializeGCModuleInfoPass(*PassRegistry::getPassRegistry());
353 initializeBranchProbabilityInfoWrapperPassPass(
354 *PassRegistry::getPassRegistry());
355 initializeAAResultsWrapperPassPass(*PassRegistry::getPassRegistry());
356 initializeTargetLibraryInfoWrapperPassPass(*PassRegistry::getPassRegistry());
357}
358
359SelectionDAGISel::~SelectionDAGISel() {
360 delete CurDAG;
361 delete SwiftError;
362}
363
364void SelectionDAGISel::getAnalysisUsage(AnalysisUsage &AU) const {
365 if (OptLevel != CodeGenOptLevel::None)
366 AU.addRequired<AAResultsWrapperPass>();
367 AU.addRequired<GCModuleInfo>();
368 AU.addRequired<StackProtector>();
369 AU.addPreserved<GCModuleInfo>();
370 AU.addRequired<TargetLibraryInfoWrapperPass>();
371 AU.addRequired<TargetTransformInfoWrapperPass>();
372 AU.addRequired<AssumptionCacheTracker>();
373 if (UseMBPI && OptLevel != CodeGenOptLevel::None)
374 AU.addRequired<BranchProbabilityInfoWrapperPass>();
375 AU.addRequired<ProfileSummaryInfoWrapperPass>();
376 // AssignmentTrackingAnalysis only runs if assignment tracking is enabled for
377 // the module.
378 AU.addRequired<AssignmentTrackingAnalysis>();
379 AU.addPreserved<AssignmentTrackingAnalysis>();
380 if (OptLevel != CodeGenOptLevel::None)
381 LazyBlockFrequencyInfoPass::getLazyBFIAnalysisUsage(AU);
382 MachineFunctionPass::getAnalysisUsage(AU);
383}
384
385static void computeUsesMSVCFloatingPoint(const Triple &TT, const Function &F,
386 MachineModuleInfo &MMI) {
387 // Only needed for MSVC
388 if (!TT.isWindowsMSVCEnvironment())
389 return;
390
391 // If it's already set, nothing to do.
392 if (MMI.usesMSVCFloatingPoint())
393 return;
394
395 for (const Instruction &I : instructions(F)) {
396 if (I.getType()->isFPOrFPVectorTy()) {
397 MMI.setUsesMSVCFloatingPoint(true);
398 return;
399 }
400 for (const auto &Op : I.operands()) {
401 if (Op->getType()->isFPOrFPVectorTy()) {
402 MMI.setUsesMSVCFloatingPoint(true);
403 return;
404 }
405 }
406 }
407}
408
409bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
410 // If we already selected that function, we do not need to run SDISel.
411 if (mf.getProperties().hasProperty(
412 P: MachineFunctionProperties::Property::Selected))
413 return false;
414 // Do some sanity-checking on the command-line options.
415 assert((!EnableFastISelAbort || TM.Options.EnableFastISel) &&
416 "-fast-isel-abort > 0 requires -fast-isel");
417
418 const Function &Fn = mf.getFunction();
419 MF = &mf;
420
421#ifndef NDEBUG
422 StringRef FuncName = Fn.getName();
423 MatchFilterFuncName = isFunctionInPrintList(FunctionName: FuncName);
424#else
425 (void)MatchFilterFuncName;
426#endif
427
428 // Decide what flavour of variable location debug-info will be used, before
429 // we change the optimisation level.
430 bool InstrRef = mf.shouldUseDebugInstrRef();
431 mf.setUseDebugInstrRef(InstrRef);
432
433 // Reset the target options before resetting the optimization
434 // level below.
435 // FIXME: This is a horrible hack and should be processed via
436 // codegen looking at the optimization level explicitly when
437 // it wants to look at it.
438 TM.resetTargetOptions(F: Fn);
439 // Reset OptLevel to None for optnone functions.
440 CodeGenOptLevel NewOptLevel = OptLevel;
441 if (OptLevel != CodeGenOptLevel::None && skipFunction(F: Fn))
442 NewOptLevel = CodeGenOptLevel::None;
443 OptLevelChanger OLC(*this, NewOptLevel);
444
445 TII = MF->getSubtarget().getInstrInfo();
446 TLI = MF->getSubtarget().getTargetLowering();
447 RegInfo = &MF->getRegInfo();
448 LibInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F: Fn);
449 GFI = Fn.hasGC() ? &getAnalysis<GCModuleInfo>().getFunctionInfo(F: Fn) : nullptr;
450 ORE = std::make_unique<OptimizationRemarkEmitter>(args: &Fn);
451 AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F&: mf.getFunction());
452 auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
453 BlockFrequencyInfo *BFI = nullptr;
454 if (PSI && PSI->hasProfileSummary() && OptLevel != CodeGenOptLevel::None)
455 BFI = &getAnalysis<LazyBlockFrequencyInfoPass>().getBFI();
456
457 FunctionVarLocs const *FnVarLocs = nullptr;
458 if (isAssignmentTrackingEnabled(M: *Fn.getParent()))
459 FnVarLocs = getAnalysis<AssignmentTrackingAnalysis>().getResults();
460
461 ISEL_DUMP(dbgs() << "\n\n\n=== " << FuncName << "\n");
462
463 UniformityInfo *UA = nullptr;
464 if (auto *UAPass = getAnalysisIfAvailable<UniformityInfoWrapperPass>())
465 UA = &UAPass->getUniformityInfo();
466 CurDAG->init(NewMF&: *MF, NewORE&: *ORE, PassPtr: this, LibraryInfo: LibInfo, UA, PSIin: PSI, BFIin: BFI, FnVarLocs);
467 FuncInfo->set(Fn, MF&: *MF, DAG: CurDAG);
468 SwiftError->setFunction(*MF);
469
470 // Now get the optional analyzes if we want to.
471 // This is based on the possibly changed OptLevel (after optnone is taken
472 // into account). That's unfortunate but OK because it just means we won't
473 // ask for passes that have been required anyway.
474
475 if (UseMBPI && OptLevel != CodeGenOptLevel::None)
476 FuncInfo->BPI = &getAnalysis<BranchProbabilityInfoWrapperPass>().getBPI();
477 else
478 FuncInfo->BPI = nullptr;
479
480 if (OptLevel != CodeGenOptLevel::None)
481 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
482 else
483 AA = nullptr;
484
485 SDB->init(gfi: GFI, AA, AC, li: LibInfo);
486
487 MF->setHasInlineAsm(false);
488
489 FuncInfo->SplitCSR = false;
490
491 // We split CSR if the target supports it for the given function
492 // and the function has only return exits.
493 if (OptLevel != CodeGenOptLevel::None && TLI->supportSplitCSR(MF)) {
494 FuncInfo->SplitCSR = true;
495
496 // Collect all the return blocks.
497 for (const BasicBlock &BB : Fn) {
498 if (!succ_empty(BB: &BB))
499 continue;
500
501 const Instruction *Term = BB.getTerminator();
502 if (isa<UnreachableInst>(Val: Term) || isa<ReturnInst>(Val: Term))
503 continue;
504
505 // Bail out if the exit block is not Return nor Unreachable.
506 FuncInfo->SplitCSR = false;
507 break;
508 }
509 }
510
511 MachineBasicBlock *EntryMBB = &MF->front();
512 if (FuncInfo->SplitCSR)
513 // This performs initialization so lowering for SplitCSR will be correct.
514 TLI->initializeSplitCSR(Entry: EntryMBB);
515
516 SelectAllBasicBlocks(Fn);
517 if (FastISelFailed && EnableFastISelFallbackReport) {
518 DiagnosticInfoISelFallback DiagFallback(Fn);
519 Fn.getContext().diagnose(DI: DiagFallback);
520 }
521
522 // Replace forward-declared registers with the registers containing
523 // the desired value.
524 // Note: it is important that this happens **before** the call to
525 // EmitLiveInCopies, since implementations can skip copies of unused
526 // registers. If we don't apply the reg fixups before, some registers may
527 // appear as unused and will be skipped, resulting in bad MI.
528 MachineRegisterInfo &MRI = MF->getRegInfo();
529 for (DenseMap<Register, Register>::iterator I = FuncInfo->RegFixups.begin(),
530 E = FuncInfo->RegFixups.end();
531 I != E; ++I) {
532 Register From = I->first;
533 Register To = I->second;
534 // If To is also scheduled to be replaced, find what its ultimate
535 // replacement is.
536 while (true) {
537 DenseMap<Register, Register>::iterator J = FuncInfo->RegFixups.find(Val: To);
538 if (J == E)
539 break;
540 To = J->second;
541 }
542 // Make sure the new register has a sufficiently constrained register class.
543 if (From.isVirtual() && To.isVirtual())
544 MRI.constrainRegClass(Reg: To, RC: MRI.getRegClass(Reg: From));
545 // Replace it.
546
547 // Replacing one register with another won't touch the kill flags.
548 // We need to conservatively clear the kill flags as a kill on the old
549 // register might dominate existing uses of the new register.
550 if (!MRI.use_empty(RegNo: To))
551 MRI.clearKillFlags(Reg: From);
552 MRI.replaceRegWith(FromReg: From, ToReg: To);
553 }
554
555 // If the first basic block in the function has live ins that need to be
556 // copied into vregs, emit the copies into the top of the block before
557 // emitting the code for the block.
558 const TargetRegisterInfo &TRI = *MF->getSubtarget().getRegisterInfo();
559 RegInfo->EmitLiveInCopies(EntryMBB, TRI, TII: *TII);
560
561 // Insert copies in the entry block and the return blocks.
562 if (FuncInfo->SplitCSR) {
563 SmallVector<MachineBasicBlock*, 4> Returns;
564 // Collect all the return blocks.
565 for (MachineBasicBlock &MBB : mf) {
566 if (!MBB.succ_empty())
567 continue;
568
569 MachineBasicBlock::iterator Term = MBB.getFirstTerminator();
570 if (Term != MBB.end() && Term->isReturn()) {
571 Returns.push_back(Elt: &MBB);
572 continue;
573 }
574 }
575 TLI->insertCopiesSplitCSR(Entry: EntryMBB, Exits: Returns);
576 }
577
578 DenseMap<unsigned, unsigned> LiveInMap;
579 if (!FuncInfo->ArgDbgValues.empty())
580 for (std::pair<unsigned, unsigned> LI : RegInfo->liveins())
581 if (LI.second)
582 LiveInMap.insert(KV: LI);
583
584 // Insert DBG_VALUE instructions for function arguments to the entry block.
585 for (unsigned i = 0, e = FuncInfo->ArgDbgValues.size(); i != e; ++i) {
586 MachineInstr *MI = FuncInfo->ArgDbgValues[e - i - 1];
587 assert(MI->getOpcode() != TargetOpcode::DBG_VALUE_LIST &&
588 "Function parameters should not be described by DBG_VALUE_LIST.");
589 bool hasFI = MI->getDebugOperand(Index: 0).isFI();
590 Register Reg =
591 hasFI ? TRI.getFrameRegister(MF: *MF) : MI->getDebugOperand(Index: 0).getReg();
592 if (Reg.isPhysical())
593 EntryMBB->insert(I: EntryMBB->begin(), MI);
594 else {
595 MachineInstr *Def = RegInfo->getVRegDef(Reg);
596 if (Def) {
597 MachineBasicBlock::iterator InsertPos = Def;
598 // FIXME: VR def may not be in entry block.
599 Def->getParent()->insert(I: std::next(x: InsertPos), MI);
600 } else
601 LLVM_DEBUG(dbgs() << "Dropping debug info for dead vreg"
602 << Register::virtReg2Index(Reg) << "\n");
603 }
604
605 // Don't try and extend through copies in instruction referencing mode.
606 if (InstrRef)
607 continue;
608
609 // If Reg is live-in then update debug info to track its copy in a vreg.
610 DenseMap<unsigned, unsigned>::iterator LDI = LiveInMap.find(Val: Reg);
611 if (LDI != LiveInMap.end()) {
612 assert(!hasFI && "There's no handling of frame pointer updating here yet "
613 "- add if needed");
614 MachineInstr *Def = RegInfo->getVRegDef(Reg: LDI->second);
615 MachineBasicBlock::iterator InsertPos = Def;
616 const MDNode *Variable = MI->getDebugVariable();
617 const MDNode *Expr = MI->getDebugExpression();
618 DebugLoc DL = MI->getDebugLoc();
619 bool IsIndirect = MI->isIndirectDebugValue();
620 if (IsIndirect)
621 assert(MI->getDebugOffset().getImm() == 0 &&
622 "DBG_VALUE with nonzero offset");
623 assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
624 "Expected inlined-at fields to agree");
625 assert(MI->getOpcode() != TargetOpcode::DBG_VALUE_LIST &&
626 "Didn't expect to see a DBG_VALUE_LIST here");
627 // Def is never a terminator here, so it is ok to increment InsertPos.
628 BuildMI(BB&: *EntryMBB, I: ++InsertPos, DL, MCID: TII->get(Opcode: TargetOpcode::DBG_VALUE),
629 IsIndirect, Reg: LDI->second, Variable, Expr);
630
631 // If this vreg is directly copied into an exported register then
632 // that COPY instructions also need DBG_VALUE, if it is the only
633 // user of LDI->second.
634 MachineInstr *CopyUseMI = nullptr;
635 for (MachineRegisterInfo::use_instr_iterator
636 UI = RegInfo->use_instr_begin(RegNo: LDI->second),
637 E = RegInfo->use_instr_end(); UI != E; ) {
638 MachineInstr *UseMI = &*(UI++);
639 if (UseMI->isDebugValue()) continue;
640 if (UseMI->isCopy() && !CopyUseMI && UseMI->getParent() == EntryMBB) {
641 CopyUseMI = UseMI; continue;
642 }
643 // Otherwise this is another use or second copy use.
644 CopyUseMI = nullptr; break;
645 }
646 if (CopyUseMI &&
647 TRI.getRegSizeInBits(Reg: LDI->second, MRI) ==
648 TRI.getRegSizeInBits(Reg: CopyUseMI->getOperand(i: 0).getReg(), MRI)) {
649 // Use MI's debug location, which describes where Variable was
650 // declared, rather than whatever is attached to CopyUseMI.
651 MachineInstr *NewMI =
652 BuildMI(MF&: *MF, DL, MCID: TII->get(Opcode: TargetOpcode::DBG_VALUE), IsIndirect,
653 Reg: CopyUseMI->getOperand(i: 0).getReg(), Variable, Expr);
654 MachineBasicBlock::iterator Pos = CopyUseMI;
655 EntryMBB->insertAfter(I: Pos, MI: NewMI);
656 }
657 }
658 }
659
660 // For debug-info, in instruction referencing mode, we need to perform some
661 // post-isel maintenence.
662 if (MF->useDebugInstrRef())
663 MF->finalizeDebugInstrRefs();
664
665 // Determine if there are any calls in this machine function.
666 MachineFrameInfo &MFI = MF->getFrameInfo();
667 for (const auto &MBB : *MF) {
668 if (MFI.hasCalls() && MF->hasInlineAsm())
669 break;
670
671 for (const auto &MI : MBB) {
672 const MCInstrDesc &MCID = TII->get(Opcode: MI.getOpcode());
673 if ((MCID.isCall() && !MCID.isReturn()) ||
674 MI.isStackAligningInlineAsm()) {
675 MFI.setHasCalls(true);
676 }
677 if (MI.isInlineAsm()) {
678 MF->setHasInlineAsm(true);
679 }
680 }
681 }
682
683 // Determine if there is a call to setjmp in the machine function.
684 MF->setExposesReturnsTwice(Fn.callsFunctionThatReturnsTwice());
685
686 // Determine if floating point is used for msvc
687 computeUsesMSVCFloatingPoint(TT: TM.getTargetTriple(), F: Fn, MMI&: MF->getMMI());
688
689 // Release function-specific state. SDB and CurDAG are already cleared
690 // at this point.
691 FuncInfo->clear();
692
693 ISEL_DUMP(dbgs() << "*** MachineFunction at end of ISel ***\n");
694 ISEL_DUMP(MF->print(dbgs()));
695
696 return true;
697}
698
699static void reportFastISelFailure(MachineFunction &MF,
700 OptimizationRemarkEmitter &ORE,
701 OptimizationRemarkMissed &R,
702 bool ShouldAbort) {
703 // Print the function name explicitly if we don't have a debug location (which
704 // makes the diagnostic less useful) or if we're going to emit a raw error.
705 if (!R.getLocation().isValid() || ShouldAbort)
706 R << (" (in function: " + MF.getName() + ")").str();
707
708 if (ShouldAbort)
709 report_fatal_error(reason: Twine(R.getMsg()));
710
711 ORE.emit(OptDiag&: R);
712 LLVM_DEBUG(dbgs() << R.getMsg() << "\n");
713}
714
715void SelectionDAGISel::SelectBasicBlock(BasicBlock::const_iterator Begin,
716 BasicBlock::const_iterator End,
717 bool &HadTailCall) {
718 // Allow creating illegal types during DAG building for the basic block.
719 CurDAG->NewNodesMustHaveLegalTypes = false;
720
721 // Lower the instructions. If a call is emitted as a tail call, cease emitting
722 // nodes for this block. If an instruction is elided, don't emit it, but do
723 // handle any debug-info attached to it.
724 for (BasicBlock::const_iterator I = Begin; I != End && !SDB->HasTailCall; ++I) {
725 if (!ElidedArgCopyInstrs.count(Ptr: &*I))
726 SDB->visit(I: *I);
727 else
728 SDB->visitDbgInfo(I: *I);
729 }
730
731 // Make sure the root of the DAG is up-to-date.
732 CurDAG->setRoot(SDB->getControlRoot());
733 HadTailCall = SDB->HasTailCall;
734 SDB->resolveOrClearDbgInfo();
735 SDB->clear();
736
737 // Final step, emit the lowered DAG as machine code.
738 CodeGenAndEmitDAG();
739}
740
741void SelectionDAGISel::ComputeLiveOutVRegInfo() {
742 SmallPtrSet<SDNode *, 16> Added;
743 SmallVector<SDNode*, 128> Worklist;
744
745 Worklist.push_back(Elt: CurDAG->getRoot().getNode());
746 Added.insert(Ptr: CurDAG->getRoot().getNode());
747
748 KnownBits Known;
749
750 do {
751 SDNode *N = Worklist.pop_back_val();
752
753 // Otherwise, add all chain operands to the worklist.
754 for (const SDValue &Op : N->op_values())
755 if (Op.getValueType() == MVT::Other && Added.insert(Ptr: Op.getNode()).second)
756 Worklist.push_back(Elt: Op.getNode());
757
758 // If this is a CopyToReg with a vreg dest, process it.
759 if (N->getOpcode() != ISD::CopyToReg)
760 continue;
761
762 unsigned DestReg = cast<RegisterSDNode>(Val: N->getOperand(Num: 1))->getReg();
763 if (!Register::isVirtualRegister(Reg: DestReg))
764 continue;
765
766 // Ignore non-integer values.
767 SDValue Src = N->getOperand(Num: 2);
768 EVT SrcVT = Src.getValueType();
769 if (!SrcVT.isInteger())
770 continue;
771
772 unsigned NumSignBits = CurDAG->ComputeNumSignBits(Op: Src);
773 Known = CurDAG->computeKnownBits(Op: Src);
774 FuncInfo->AddLiveOutRegInfo(Reg: DestReg, NumSignBits, Known);
775 } while (!Worklist.empty());
776}
777
778void SelectionDAGISel::CodeGenAndEmitDAG() {
779 StringRef GroupName = "sdag";
780 StringRef GroupDescription = "Instruction Selection and Scheduling";
781 std::string BlockName;
782 bool MatchFilterBB = false; (void)MatchFilterBB;
783#ifndef NDEBUG
784 TargetTransformInfo &TTI =
785 getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F: *FuncInfo->Fn);
786#endif
787
788 // Pre-type legalization allow creation of any node types.
789 CurDAG->NewNodesMustHaveLegalTypes = false;
790
791#ifndef NDEBUG
792 MatchFilterBB = (FilterDAGBasicBlockName.empty() ||
793 FilterDAGBasicBlockName ==
794 FuncInfo->MBB->getBasicBlock()->getName());
795#endif
796#ifdef NDEBUG
797 if (ViewDAGCombine1 || ViewLegalizeTypesDAGs || ViewDAGCombineLT ||
798 ViewLegalizeDAGs || ViewDAGCombine2 || ViewISelDAGs || ViewSchedDAGs ||
799 ViewSUnitDAGs)
800#endif
801 {
802 BlockName =
803 (MF->getName() + ":" + FuncInfo->MBB->getBasicBlock()->getName()).str();
804 }
805 ISEL_DUMP(dbgs() << "\nInitial selection DAG: "
806 << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
807 << "'\n";
808 CurDAG->dump());
809
810#ifndef NDEBUG
811 if (TTI.hasBranchDivergence())
812 CurDAG->VerifyDAGDivergence();
813#endif
814
815 if (ViewDAGCombine1 && MatchFilterBB)
816 CurDAG->viewGraph(Title: "dag-combine1 input for " + BlockName);
817
818 // Run the DAG combiner in pre-legalize mode.
819 {
820 NamedRegionTimer T("combine1", "DAG Combining 1", GroupName,
821 GroupDescription, TimePassesIsEnabled);
822 CurDAG->Combine(Level: BeforeLegalizeTypes, AA, OptLevel);
823 }
824
825 ISEL_DUMP(dbgs() << "\nOptimized lowered selection DAG: "
826 << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
827 << "'\n";
828 CurDAG->dump());
829
830#ifndef NDEBUG
831 if (TTI.hasBranchDivergence())
832 CurDAG->VerifyDAGDivergence();
833#endif
834
835 // Second step, hack on the DAG until it only uses operations and types that
836 // the target supports.
837 if (ViewLegalizeTypesDAGs && MatchFilterBB)
838 CurDAG->viewGraph(Title: "legalize-types input for " + BlockName);
839
840 bool Changed;
841 {
842 NamedRegionTimer T("legalize_types", "Type Legalization", GroupName,
843 GroupDescription, TimePassesIsEnabled);
844 Changed = CurDAG->LegalizeTypes();
845 }
846
847 ISEL_DUMP(dbgs() << "\nType-legalized selection DAG: "
848 << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
849 << "'\n";
850 CurDAG->dump());
851
852#ifndef NDEBUG
853 if (TTI.hasBranchDivergence())
854 CurDAG->VerifyDAGDivergence();
855#endif
856
857 // Only allow creation of legal node types.
858 CurDAG->NewNodesMustHaveLegalTypes = true;
859
860 if (Changed) {
861 if (ViewDAGCombineLT && MatchFilterBB)
862 CurDAG->viewGraph(Title: "dag-combine-lt input for " + BlockName);
863
864 // Run the DAG combiner in post-type-legalize mode.
865 {
866 NamedRegionTimer T("combine_lt", "DAG Combining after legalize types",
867 GroupName, GroupDescription, TimePassesIsEnabled);
868 CurDAG->Combine(Level: AfterLegalizeTypes, AA, OptLevel);
869 }
870
871 ISEL_DUMP(dbgs() << "\nOptimized type-legalized selection DAG: "
872 << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
873 << "'\n";
874 CurDAG->dump());
875
876#ifndef NDEBUG
877 if (TTI.hasBranchDivergence())
878 CurDAG->VerifyDAGDivergence();
879#endif
880 }
881
882 {
883 NamedRegionTimer T("legalize_vec", "Vector Legalization", GroupName,
884 GroupDescription, TimePassesIsEnabled);
885 Changed = CurDAG->LegalizeVectors();
886 }
887
888 if (Changed) {
889 ISEL_DUMP(dbgs() << "\nVector-legalized selection DAG: "
890 << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
891 << "'\n";
892 CurDAG->dump());
893
894#ifndef NDEBUG
895 if (TTI.hasBranchDivergence())
896 CurDAG->VerifyDAGDivergence();
897#endif
898
899 {
900 NamedRegionTimer T("legalize_types2", "Type Legalization 2", GroupName,
901 GroupDescription, TimePassesIsEnabled);
902 CurDAG->LegalizeTypes();
903 }
904
905 ISEL_DUMP(dbgs() << "\nVector/type-legalized selection DAG: "
906 << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
907 << "'\n";
908 CurDAG->dump());
909
910#ifndef NDEBUG
911 if (TTI.hasBranchDivergence())
912 CurDAG->VerifyDAGDivergence();
913#endif
914
915 if (ViewDAGCombineLT && MatchFilterBB)
916 CurDAG->viewGraph(Title: "dag-combine-lv input for " + BlockName);
917
918 // Run the DAG combiner in post-type-legalize mode.
919 {
920 NamedRegionTimer T("combine_lv", "DAG Combining after legalize vectors",
921 GroupName, GroupDescription, TimePassesIsEnabled);
922 CurDAG->Combine(Level: AfterLegalizeVectorOps, AA, OptLevel);
923 }
924
925 ISEL_DUMP(dbgs() << "\nOptimized vector-legalized selection DAG: "
926 << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
927 << "'\n";
928 CurDAG->dump());
929
930#ifndef NDEBUG
931 if (TTI.hasBranchDivergence())
932 CurDAG->VerifyDAGDivergence();
933#endif
934 }
935
936 if (ViewLegalizeDAGs && MatchFilterBB)
937 CurDAG->viewGraph(Title: "legalize input for " + BlockName);
938
939 {
940 NamedRegionTimer T("legalize", "DAG Legalization", GroupName,
941 GroupDescription, TimePassesIsEnabled);
942 CurDAG->Legalize();
943 }
944
945 ISEL_DUMP(dbgs() << "\nLegalized selection DAG: "
946 << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
947 << "'\n";
948 CurDAG->dump());
949
950#ifndef NDEBUG
951 if (TTI.hasBranchDivergence())
952 CurDAG->VerifyDAGDivergence();
953#endif
954
955 if (ViewDAGCombine2 && MatchFilterBB)
956 CurDAG->viewGraph(Title: "dag-combine2 input for " + BlockName);
957
958 // Run the DAG combiner in post-legalize mode.
959 {
960 NamedRegionTimer T("combine2", "DAG Combining 2", GroupName,
961 GroupDescription, TimePassesIsEnabled);
962 CurDAG->Combine(Level: AfterLegalizeDAG, AA, OptLevel);
963 }
964
965 ISEL_DUMP(dbgs() << "\nOptimized legalized selection DAG: "
966 << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
967 << "'\n";
968 CurDAG->dump());
969
970#ifndef NDEBUG
971 if (TTI.hasBranchDivergence())
972 CurDAG->VerifyDAGDivergence();
973#endif
974
975 if (OptLevel != CodeGenOptLevel::None)
976 ComputeLiveOutVRegInfo();
977
978 if (ViewISelDAGs && MatchFilterBB)
979 CurDAG->viewGraph(Title: "isel input for " + BlockName);
980
981 // Third, instruction select all of the operations to machine code, adding the
982 // code to the MachineBasicBlock.
983 {
984 NamedRegionTimer T("isel", "Instruction Selection", GroupName,
985 GroupDescription, TimePassesIsEnabled);
986 DoInstructionSelection();
987 }
988
989 ISEL_DUMP(dbgs() << "\nSelected selection DAG: "
990 << printMBBReference(*FuncInfo->MBB) << " '" << BlockName
991 << "'\n";
992 CurDAG->dump());
993
994 if (ViewSchedDAGs && MatchFilterBB)
995 CurDAG->viewGraph(Title: "scheduler input for " + BlockName);
996
997 // Schedule machine code.
998 ScheduleDAGSDNodes *Scheduler = CreateScheduler();
999 {
1000 NamedRegionTimer T("sched", "Instruction Scheduling", GroupName,
1001 GroupDescription, TimePassesIsEnabled);
1002 Scheduler->Run(dag: CurDAG, bb: FuncInfo->MBB);
1003 }
1004
1005 if (ViewSUnitDAGs && MatchFilterBB)
1006 Scheduler->viewGraph();
1007
1008 // Emit machine code to BB. This can change 'BB' to the last block being
1009 // inserted into.
1010 MachineBasicBlock *FirstMBB = FuncInfo->MBB, *LastMBB;
1011 {
1012 NamedRegionTimer T("emit", "Instruction Creation", GroupName,
1013 GroupDescription, TimePassesIsEnabled);
1014
1015 // FuncInfo->InsertPt is passed by reference and set to the end of the
1016 // scheduled instructions.
1017 LastMBB = FuncInfo->MBB = Scheduler->EmitSchedule(InsertPos&: FuncInfo->InsertPt);
1018 }
1019
1020 // If the block was split, make sure we update any references that are used to
1021 // update PHI nodes later on.
1022 if (FirstMBB != LastMBB)
1023 SDB->UpdateSplitBlock(First: FirstMBB, Last: LastMBB);
1024
1025 // Free the scheduler state.
1026 {
1027 NamedRegionTimer T("cleanup", "Instruction Scheduling Cleanup", GroupName,
1028 GroupDescription, TimePassesIsEnabled);
1029 delete Scheduler;
1030 }
1031
1032 // Free the SelectionDAG state, now that we're finished with it.
1033 CurDAG->clear();
1034}
1035
1036namespace {
1037
1038/// ISelUpdater - helper class to handle updates of the instruction selection
1039/// graph.
1040class ISelUpdater : public SelectionDAG::DAGUpdateListener {
1041 SelectionDAG::allnodes_iterator &ISelPosition;
1042
1043public:
1044 ISelUpdater(SelectionDAG &DAG, SelectionDAG::allnodes_iterator &isp)
1045 : SelectionDAG::DAGUpdateListener(DAG), ISelPosition(isp) {}
1046
1047 /// NodeDeleted - Handle nodes deleted from the graph. If the node being
1048 /// deleted is the current ISelPosition node, update ISelPosition.
1049 ///
1050 void NodeDeleted(SDNode *N, SDNode *E) override {
1051 if (ISelPosition == SelectionDAG::allnodes_iterator(N))
1052 ++ISelPosition;
1053 }
1054
1055 /// NodeInserted - Handle new nodes inserted into the graph: propagate
1056 /// metadata from root nodes that also applies to new nodes, in case the root
1057 /// is later deleted.
1058 void NodeInserted(SDNode *N) override {
1059 SDNode *CurNode = &*ISelPosition;
1060 if (MDNode *MD = DAG.getPCSections(Node: CurNode))
1061 DAG.addPCSections(Node: N, MD);
1062 }
1063};
1064
1065} // end anonymous namespace
1066
1067// This function is used to enforce the topological node id property
1068// leveraged during instruction selection. Before the selection process all
1069// nodes are given a non-negative id such that all nodes have a greater id than
1070// their operands. As this holds transitively we can prune checks that a node N
1071// is a predecessor of M another by not recursively checking through M's
1072// operands if N's ID is larger than M's ID. This significantly improves
1073// performance of various legality checks (e.g. IsLegalToFold / UpdateChains).
1074
1075// However, when we fuse multiple nodes into a single node during the
1076// selection we may induce a predecessor relationship between inputs and
1077// outputs of distinct nodes being merged, violating the topological property.
1078// Should a fused node have a successor which has yet to be selected,
1079// our legality checks would be incorrect. To avoid this we mark all unselected
1080// successor nodes, i.e. id != -1, as invalid for pruning by bit-negating (x =>
1081// (-(x+1))) the ids and modify our pruning check to ignore negative Ids of M.
1082// We use bit-negation to more clearly enforce that node id -1 can only be
1083// achieved by selected nodes. As the conversion is reversable to the original
1084// Id, topological pruning can still be leveraged when looking for unselected
1085// nodes. This method is called internally in all ISel replacement related
1086// functions.
1087void SelectionDAGISel::EnforceNodeIdInvariant(SDNode *Node) {
1088 SmallVector<SDNode *, 4> Nodes;
1089 Nodes.push_back(Elt: Node);
1090
1091 while (!Nodes.empty()) {
1092 SDNode *N = Nodes.pop_back_val();
1093 for (auto *U : N->uses()) {
1094 auto UId = U->getNodeId();
1095 if (UId > 0) {
1096 InvalidateNodeId(N: U);
1097 Nodes.push_back(Elt: U);
1098 }
1099 }
1100 }
1101}
1102
1103// InvalidateNodeId - As explained in EnforceNodeIdInvariant, mark a
1104// NodeId with the equivalent node id which is invalid for topological
1105// pruning.
1106void SelectionDAGISel::InvalidateNodeId(SDNode *N) {
1107 int InvalidId = -(N->getNodeId() + 1);
1108 N->setNodeId(InvalidId);
1109}
1110
1111// getUninvalidatedNodeId - get original uninvalidated node id.
1112int SelectionDAGISel::getUninvalidatedNodeId(SDNode *N) {
1113 int Id = N->getNodeId();
1114 if (Id < -1)
1115 return -(Id + 1);
1116 return Id;
1117}
1118
1119void SelectionDAGISel::DoInstructionSelection() {
1120 LLVM_DEBUG(dbgs() << "===== Instruction selection begins: "
1121 << printMBBReference(*FuncInfo->MBB) << " '"
1122 << FuncInfo->MBB->getName() << "'\n");
1123
1124 PreprocessISelDAG();
1125
1126 // Select target instructions for the DAG.
1127 {
1128 // Number all nodes with a topological order and set DAGSize.
1129 DAGSize = CurDAG->AssignTopologicalOrder();
1130
1131 // Create a dummy node (which is not added to allnodes), that adds
1132 // a reference to the root node, preventing it from being deleted,
1133 // and tracking any changes of the root.
1134 HandleSDNode Dummy(CurDAG->getRoot());
1135 SelectionDAG::allnodes_iterator ISelPosition (CurDAG->getRoot().getNode());
1136 ++ISelPosition;
1137
1138 // Make sure that ISelPosition gets properly updated when nodes are deleted
1139 // in calls made from this function. New nodes inherit relevant metadata.
1140 ISelUpdater ISU(*CurDAG, ISelPosition);
1141
1142 // The AllNodes list is now topological-sorted. Visit the
1143 // nodes by starting at the end of the list (the root of the
1144 // graph) and preceding back toward the beginning (the entry
1145 // node).
1146 while (ISelPosition != CurDAG->allnodes_begin()) {
1147 SDNode *Node = &*--ISelPosition;
1148 // Skip dead nodes. DAGCombiner is expected to eliminate all dead nodes,
1149 // but there are currently some corner cases that it misses. Also, this
1150 // makes it theoretically possible to disable the DAGCombiner.
1151 if (Node->use_empty())
1152 continue;
1153
1154#ifndef NDEBUG
1155 SmallVector<SDNode *, 4> Nodes;
1156 Nodes.push_back(Elt: Node);
1157
1158 while (!Nodes.empty()) {
1159 auto N = Nodes.pop_back_val();
1160 if (N->getOpcode() == ISD::TokenFactor || N->getNodeId() < 0)
1161 continue;
1162 for (const SDValue &Op : N->op_values()) {
1163 if (Op->getOpcode() == ISD::TokenFactor)
1164 Nodes.push_back(Elt: Op.getNode());
1165 else {
1166 // We rely on topological ordering of node ids for checking for
1167 // cycles when fusing nodes during selection. All unselected nodes
1168 // successors of an already selected node should have a negative id.
1169 // This assertion will catch such cases. If this assertion triggers
1170 // it is likely you using DAG-level Value/Node replacement functions
1171 // (versus equivalent ISEL replacement) in backend-specific
1172 // selections. See comment in EnforceNodeIdInvariant for more
1173 // details.
1174 assert(Op->getNodeId() != -1 &&
1175 "Node has already selected predecessor node");
1176 }
1177 }
1178 }
1179#endif
1180
1181 // When we are using non-default rounding modes or FP exception behavior
1182 // FP operations are represented by StrictFP pseudo-operations. For
1183 // targets that do not (yet) understand strict FP operations directly,
1184 // we convert them to normal FP opcodes instead at this point. This
1185 // will allow them to be handled by existing target-specific instruction
1186 // selectors.
1187 if (!TLI->isStrictFPEnabled() && Node->isStrictFPOpcode()) {
1188 // For some opcodes, we need to call TLI->getOperationAction using
1189 // the first operand type instead of the result type. Note that this
1190 // must match what SelectionDAGLegalize::LegalizeOp is doing.
1191 EVT ActionVT;
1192 switch (Node->getOpcode()) {
1193 case ISD::STRICT_SINT_TO_FP:
1194 case ISD::STRICT_UINT_TO_FP:
1195 case ISD::STRICT_LRINT:
1196 case ISD::STRICT_LLRINT:
1197 case ISD::STRICT_LROUND:
1198 case ISD::STRICT_LLROUND:
1199 case ISD::STRICT_FSETCC:
1200 case ISD::STRICT_FSETCCS:
1201 ActionVT = Node->getOperand(Num: 1).getValueType();
1202 break;
1203 default:
1204 ActionVT = Node->getValueType(ResNo: 0);
1205 break;
1206 }
1207 if (TLI->getOperationAction(Op: Node->getOpcode(), VT: ActionVT)
1208 == TargetLowering::Expand)
1209 Node = CurDAG->mutateStrictFPToFP(Node);
1210 }
1211
1212 LLVM_DEBUG(dbgs() << "\nISEL: Starting selection on root node: ";
1213 Node->dump(CurDAG));
1214
1215 Select(N: Node);
1216 }
1217
1218 CurDAG->setRoot(Dummy.getValue());
1219 }
1220
1221 LLVM_DEBUG(dbgs() << "\n===== Instruction selection ends:\n");
1222
1223 PostprocessISelDAG();
1224}
1225
1226static bool hasExceptionPointerOrCodeUser(const CatchPadInst *CPI) {
1227 for (const User *U : CPI->users()) {
1228 if (const IntrinsicInst *EHPtrCall = dyn_cast<IntrinsicInst>(Val: U)) {
1229 Intrinsic::ID IID = EHPtrCall->getIntrinsicID();
1230 if (IID == Intrinsic::eh_exceptionpointer ||
1231 IID == Intrinsic::eh_exceptioncode)
1232 return true;
1233 }
1234 }
1235 return false;
1236}
1237
1238// wasm.landingpad.index intrinsic is for associating a landing pad index number
1239// with a catchpad instruction. Retrieve the landing pad index in the intrinsic
1240// and store the mapping in the function.
1241static void mapWasmLandingPadIndex(MachineBasicBlock *MBB,
1242 const CatchPadInst *CPI) {
1243 MachineFunction *MF = MBB->getParent();
1244 // In case of single catch (...), we don't emit LSDA, so we don't need
1245 // this information.
1246 bool IsSingleCatchAllClause =
1247 CPI->arg_size() == 1 &&
1248 cast<Constant>(Val: CPI->getArgOperand(i: 0))->isNullValue();
1249 // cathchpads for longjmp use an empty type list, e.g. catchpad within %0 []
1250 // and they don't need LSDA info
1251 bool IsCatchLongjmp = CPI->arg_size() == 0;
1252 if (!IsSingleCatchAllClause && !IsCatchLongjmp) {
1253 // Create a mapping from landing pad label to landing pad index.
1254 bool IntrFound = false;
1255 for (const User *U : CPI->users()) {
1256 if (const auto *Call = dyn_cast<IntrinsicInst>(Val: U)) {
1257 Intrinsic::ID IID = Call->getIntrinsicID();
1258 if (IID == Intrinsic::wasm_landingpad_index) {
1259 Value *IndexArg = Call->getArgOperand(i: 1);
1260 int Index = cast<ConstantInt>(Val: IndexArg)->getZExtValue();
1261 MF->setWasmLandingPadIndex(LPad: MBB, Index);
1262 IntrFound = true;
1263 break;
1264 }
1265 }
1266 }
1267 assert(IntrFound && "wasm.landingpad.index intrinsic not found!");
1268 (void)IntrFound;
1269 }
1270}
1271
1272/// PrepareEHLandingPad - Emit an EH_LABEL, set up live-in registers, and
1273/// do other setup for EH landing-pad blocks.
1274bool SelectionDAGISel::PrepareEHLandingPad() {
1275 MachineBasicBlock *MBB = FuncInfo->MBB;
1276 const Constant *PersonalityFn = FuncInfo->Fn->getPersonalityFn();
1277 const BasicBlock *LLVMBB = MBB->getBasicBlock();
1278 const TargetRegisterClass *PtrRC =
1279 TLI->getRegClassFor(VT: TLI->getPointerTy(DL: CurDAG->getDataLayout()));
1280
1281 auto Pers = classifyEHPersonality(Pers: PersonalityFn);
1282
1283 // Catchpads have one live-in register, which typically holds the exception
1284 // pointer or code.
1285 if (isFuncletEHPersonality(Pers)) {
1286 if (const auto *CPI = dyn_cast<CatchPadInst>(Val: LLVMBB->getFirstNonPHI())) {
1287 if (hasExceptionPointerOrCodeUser(CPI)) {
1288 // Get or create the virtual register to hold the pointer or code. Mark
1289 // the live in physreg and copy into the vreg.
1290 MCPhysReg EHPhysReg = TLI->getExceptionPointerRegister(PersonalityFn);
1291 assert(EHPhysReg && "target lacks exception pointer register");
1292 MBB->addLiveIn(PhysReg: EHPhysReg);
1293 unsigned VReg = FuncInfo->getCatchPadExceptionPointerVReg(CPI, RC: PtrRC);
1294 BuildMI(BB&: *MBB, I: FuncInfo->InsertPt, MIMD: SDB->getCurDebugLoc(),
1295 MCID: TII->get(Opcode: TargetOpcode::COPY), DestReg: VReg)
1296 .addReg(RegNo: EHPhysReg, flags: RegState::Kill);
1297 }
1298 }
1299 return true;
1300 }
1301
1302 // Add a label to mark the beginning of the landing pad. Deletion of the
1303 // landing pad can thus be detected via the MachineModuleInfo.
1304 MCSymbol *Label = MF->addLandingPad(LandingPad: MBB);
1305
1306 const MCInstrDesc &II = TII->get(Opcode: TargetOpcode::EH_LABEL);
1307 BuildMI(BB&: *MBB, I: FuncInfo->InsertPt, MIMD: SDB->getCurDebugLoc(), MCID: II)
1308 .addSym(Sym: Label);
1309
1310 // If the unwinder does not preserve all registers, ensure that the
1311 // function marks the clobbered registers as used.
1312 const TargetRegisterInfo &TRI = *MF->getSubtarget().getRegisterInfo();
1313 if (auto *RegMask = TRI.getCustomEHPadPreservedMask(MF: *MF))
1314 MF->getRegInfo().addPhysRegsUsedFromRegMask(RegMask);
1315
1316 if (Pers == EHPersonality::Wasm_CXX) {
1317 if (const auto *CPI = dyn_cast<CatchPadInst>(Val: LLVMBB->getFirstNonPHI()))
1318 mapWasmLandingPadIndex(MBB, CPI);
1319 } else {
1320 // Assign the call site to the landing pad's begin label.
1321 MF->setCallSiteLandingPad(Sym: Label, Sites: SDB->LPadToCallSiteMap[MBB]);
1322 // Mark exception register as live in.
1323 if (unsigned Reg = TLI->getExceptionPointerRegister(PersonalityFn))
1324 FuncInfo->ExceptionPointerVirtReg = MBB->addLiveIn(PhysReg: Reg, RC: PtrRC);
1325 // Mark exception selector register as live in.
1326 if (unsigned Reg = TLI->getExceptionSelectorRegister(PersonalityFn))
1327 FuncInfo->ExceptionSelectorVirtReg = MBB->addLiveIn(PhysReg: Reg, RC: PtrRC);
1328 }
1329
1330 return true;
1331}
1332
1333// Mark and Report IPToState for each Block under IsEHa
1334void SelectionDAGISel::reportIPToStateForBlocks(MachineFunction *MF) {
1335 MachineModuleInfo &MMI = MF->getMMI();
1336 llvm::WinEHFuncInfo *EHInfo = MF->getWinEHFuncInfo();
1337 if (!EHInfo)
1338 return;
1339 for (auto MBBI = MF->begin(), E = MF->end(); MBBI != E; ++MBBI) {
1340 MachineBasicBlock *MBB = &*MBBI;
1341 const BasicBlock *BB = MBB->getBasicBlock();
1342 int State = EHInfo->BlockToStateMap[BB];
1343 if (BB->getFirstMayFaultInst()) {
1344 // Report IP range only for blocks with Faulty inst
1345 auto MBBb = MBB->getFirstNonPHI();
1346 MachineInstr *MIb = &*MBBb;
1347 if (MIb->isTerminator())
1348 continue;
1349
1350 // Insert EH Labels
1351 MCSymbol *BeginLabel = MMI.getContext().createTempSymbol();
1352 MCSymbol *EndLabel = MMI.getContext().createTempSymbol();
1353 EHInfo->addIPToStateRange(State, InvokeBegin: BeginLabel, InvokeEnd: EndLabel);
1354 BuildMI(BB&: *MBB, I: MBBb, MIMD: SDB->getCurDebugLoc(),
1355 MCID: TII->get(Opcode: TargetOpcode::EH_LABEL))
1356 .addSym(Sym: BeginLabel);
1357 auto MBBe = MBB->instr_end();
1358 MachineInstr *MIe = &*(--MBBe);
1359 // insert before (possible multiple) terminators
1360 while (MIe->isTerminator())
1361 MIe = &*(--MBBe);
1362 ++MBBe;
1363 BuildMI(BB&: *MBB, I: MBBe, MIMD: SDB->getCurDebugLoc(),
1364 MCID: TII->get(Opcode: TargetOpcode::EH_LABEL))
1365 .addSym(Sym: EndLabel);
1366 }
1367 }
1368}
1369
1370/// isFoldedOrDeadInstruction - Return true if the specified instruction is
1371/// side-effect free and is either dead or folded into a generated instruction.
1372/// Return false if it needs to be emitted.
1373static bool isFoldedOrDeadInstruction(const Instruction *I,
1374 const FunctionLoweringInfo &FuncInfo) {
1375 return !I->mayWriteToMemory() && // Side-effecting instructions aren't folded.
1376 !I->isTerminator() && // Terminators aren't folded.
1377 !isa<DbgInfoIntrinsic>(Val: I) && // Debug instructions aren't folded.
1378 !I->isEHPad() && // EH pad instructions aren't folded.
1379 !FuncInfo.isExportedInst(V: I); // Exported instrs must be computed.
1380}
1381
1382static bool processIfEntryValueDbgDeclare(FunctionLoweringInfo &FuncInfo,
1383 const Value *Arg, DIExpression *Expr,
1384 DILocalVariable *Var,
1385 DebugLoc DbgLoc) {
1386 if (!Expr->isEntryValue() || !isa<Argument>(Val: Arg))
1387 return false;
1388
1389 auto ArgIt = FuncInfo.ValueMap.find(Val: Arg);
1390 if (ArgIt == FuncInfo.ValueMap.end())
1391 return false;
1392 Register ArgVReg = ArgIt->getSecond();
1393
1394 // Find the corresponding livein physical register to this argument.
1395 for (auto [PhysReg, VirtReg] : FuncInfo.RegInfo->liveins())
1396 if (VirtReg == ArgVReg) {
1397 // Append an op deref to account for the fact that this is a dbg_declare.
1398 Expr = DIExpression::append(Expr, Ops: dwarf::DW_OP_deref);
1399 FuncInfo.MF->setVariableDbgInfo(Var, Expr, Reg: PhysReg, Loc: DbgLoc);
1400 LLVM_DEBUG(dbgs() << "processDbgDeclare: setVariableDbgInfo Var=" << *Var
1401 << ", Expr=" << *Expr << ", MCRegister=" << PhysReg
1402 << ", DbgLoc=" << DbgLoc << "\n");
1403 return true;
1404 }
1405 return false;
1406}
1407
1408static bool processDbgDeclare(FunctionLoweringInfo &FuncInfo,
1409 const Value *Address, DIExpression *Expr,
1410 DILocalVariable *Var, DebugLoc DbgLoc) {
1411 if (!Address) {
1412 LLVM_DEBUG(dbgs() << "processDbgDeclares skipping " << *Var
1413 << " (bad address)\n");
1414 return false;
1415 }
1416
1417 if (processIfEntryValueDbgDeclare(FuncInfo, Arg: Address, Expr, Var, DbgLoc))
1418 return true;
1419
1420 MachineFunction *MF = FuncInfo.MF;
1421 const DataLayout &DL = MF->getDataLayout();
1422
1423 assert(Var && "Missing variable");
1424 assert(DbgLoc && "Missing location");
1425
1426 // Look through casts and constant offset GEPs. These mostly come from
1427 // inalloca.
1428 APInt Offset(DL.getTypeSizeInBits(Ty: Address->getType()), 0);
1429 Address = Address->stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
1430
1431 // Check if the variable is a static alloca or a byval or inalloca
1432 // argument passed in memory. If it is not, then we will ignore this
1433 // intrinsic and handle this during isel like dbg.value.
1434 int FI = std::numeric_limits<int>::max();
1435 if (const auto *AI = dyn_cast<AllocaInst>(Val: Address)) {
1436 auto SI = FuncInfo.StaticAllocaMap.find(Val: AI);
1437 if (SI != FuncInfo.StaticAllocaMap.end())
1438 FI = SI->second;
1439 } else if (const auto *Arg = dyn_cast<Argument>(Val: Address))
1440 FI = FuncInfo.getArgumentFrameIndex(A: Arg);
1441
1442 if (FI == std::numeric_limits<int>::max())
1443 return false;
1444
1445 if (Offset.getBoolValue())
1446 Expr = DIExpression::prepend(Expr, Flags: DIExpression::ApplyOffset,
1447 Offset: Offset.getZExtValue());
1448
1449 LLVM_DEBUG(dbgs() << "processDbgDeclare: setVariableDbgInfo Var=" << *Var
1450 << ", Expr=" << *Expr << ", FI=" << FI
1451 << ", DbgLoc=" << DbgLoc << "\n");
1452 MF->setVariableDbgInfo(Var, Expr, Slot: FI, Loc: DbgLoc);
1453 return true;
1454}
1455
1456/// Collect llvm.dbg.declare information. This is done after argument lowering
1457/// in case the declarations refer to arguments.
1458static void processDbgDeclares(FunctionLoweringInfo &FuncInfo) {
1459 for (const auto &I : instructions(F: *FuncInfo.Fn)) {
1460 const auto *DI = dyn_cast<DbgDeclareInst>(Val: &I);
1461 if (DI && processDbgDeclare(FuncInfo, Address: DI->getAddress(), Expr: DI->getExpression(),
1462 Var: DI->getVariable(), DbgLoc: DI->getDebugLoc()))
1463 FuncInfo.PreprocessedDbgDeclares.insert(Ptr: DI);
1464
1465 for (const DPValue &DPV : I.getDbgValueRange()) {
1466 if (DPV.getType() == DPValue::LocationType::Declare &&
1467 processDbgDeclare(FuncInfo, Address: DPV.getVariableLocationOp(OpIdx: 0),
1468 Expr: DPV.getExpression(), Var: DPV.getVariable(),
1469 DbgLoc: DPV.getDebugLoc()))
1470 FuncInfo.PreprocessedDPVDeclares.insert(Ptr: &DPV);
1471 }
1472 }
1473}
1474
1475/// Collect single location variable information generated with assignment
1476/// tracking. This is done after argument lowering in case the declarations
1477/// refer to arguments.
1478static void processSingleLocVars(FunctionLoweringInfo &FuncInfo,
1479 FunctionVarLocs const *FnVarLocs) {
1480 for (auto It = FnVarLocs->single_locs_begin(),
1481 End = FnVarLocs->single_locs_end();
1482 It != End; ++It) {
1483 assert(!It->Values.hasArgList() && "Single loc variadic ops not supported");
1484 processDbgDeclare(FuncInfo, Address: It->Values.getVariableLocationOp(OpIdx: 0), Expr: It->Expr,
1485 Var: FnVarLocs->getDILocalVariable(ID: It->VariableID), DbgLoc: It->DL);
1486 }
1487}
1488
1489void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
1490 FastISelFailed = false;
1491 // Initialize the Fast-ISel state, if needed.
1492 FastISel *FastIS = nullptr;
1493 if (TM.Options.EnableFastISel) {
1494 LLVM_DEBUG(dbgs() << "Enabling fast-isel\n");
1495 FastIS = TLI->createFastISel(*FuncInfo, LibInfo);
1496 }
1497
1498 ReversePostOrderTraversal<const Function*> RPOT(&Fn);
1499
1500 // Lower arguments up front. An RPO iteration always visits the entry block
1501 // first.
1502 assert(*RPOT.begin() == &Fn.getEntryBlock());
1503 ++NumEntryBlocks;
1504
1505 // Set up FuncInfo for ISel. Entry blocks never have PHIs.
1506 FuncInfo->MBB = FuncInfo->MBBMap[&Fn.getEntryBlock()];
1507 FuncInfo->InsertPt = FuncInfo->MBB->begin();
1508
1509 CurDAG->setFunctionLoweringInfo(FuncInfo.get());
1510
1511 if (!FastIS) {
1512 LowerArguments(F: Fn);
1513 } else {
1514 // See if fast isel can lower the arguments.
1515 FastIS->startNewBlock();
1516 if (!FastIS->lowerArguments()) {
1517 FastISelFailed = true;
1518 // Fast isel failed to lower these arguments
1519 ++NumFastIselFailLowerArguments;
1520
1521 OptimizationRemarkMissed R("sdagisel", "FastISelFailure",
1522 Fn.getSubprogram(),
1523 &Fn.getEntryBlock());
1524 R << "FastISel didn't lower all arguments: "
1525 << ore::NV("Prototype", Fn.getFunctionType());
1526 reportFastISelFailure(MF&: *MF, ORE&: *ORE, R, ShouldAbort: EnableFastISelAbort > 1);
1527
1528 // Use SelectionDAG argument lowering
1529 LowerArguments(F: Fn);
1530 CurDAG->setRoot(SDB->getControlRoot());
1531 SDB->clear();
1532 CodeGenAndEmitDAG();
1533 }
1534
1535 // If we inserted any instructions at the beginning, make a note of
1536 // where they are, so we can be sure to emit subsequent instructions
1537 // after them.
1538 if (FuncInfo->InsertPt != FuncInfo->MBB->begin())
1539 FastIS->setLastLocalValue(&*std::prev(x: FuncInfo->InsertPt));
1540 else
1541 FastIS->setLastLocalValue(nullptr);
1542 }
1543
1544 bool Inserted = SwiftError->createEntriesInEntryBlock(DbgLoc: SDB->getCurDebugLoc());
1545
1546 if (FastIS && Inserted)
1547 FastIS->setLastLocalValue(&*std::prev(x: FuncInfo->InsertPt));
1548
1549 if (isAssignmentTrackingEnabled(M: *Fn.getParent())) {
1550 assert(CurDAG->getFunctionVarLocs() &&
1551 "expected AssignmentTrackingAnalysis pass results");
1552 processSingleLocVars(FuncInfo&: *FuncInfo, FnVarLocs: CurDAG->getFunctionVarLocs());
1553 } else {
1554 processDbgDeclares(FuncInfo&: *FuncInfo);
1555 }
1556
1557 // Iterate over all basic blocks in the function.
1558 StackProtector &SP = getAnalysis<StackProtector>();
1559 for (const BasicBlock *LLVMBB : RPOT) {
1560 if (OptLevel != CodeGenOptLevel::None) {
1561 bool AllPredsVisited = true;
1562 for (const BasicBlock *Pred : predecessors(BB: LLVMBB)) {
1563 if (!FuncInfo->VisitedBBs.count(Ptr: Pred)) {
1564 AllPredsVisited = false;
1565 break;
1566 }
1567 }
1568
1569 if (AllPredsVisited) {
1570 for (const PHINode &PN : LLVMBB->phis())
1571 FuncInfo->ComputePHILiveOutRegInfo(&PN);
1572 } else {
1573 for (const PHINode &PN : LLVMBB->phis())
1574 FuncInfo->InvalidatePHILiveOutRegInfo(PN: &PN);
1575 }
1576
1577 FuncInfo->VisitedBBs.insert(Ptr: LLVMBB);
1578 }
1579
1580 BasicBlock::const_iterator const Begin =
1581 LLVMBB->getFirstNonPHI()->getIterator();
1582 BasicBlock::const_iterator const End = LLVMBB->end();
1583 BasicBlock::const_iterator BI = End;
1584
1585 FuncInfo->MBB = FuncInfo->MBBMap[LLVMBB];
1586 if (!FuncInfo->MBB)
1587 continue; // Some blocks like catchpads have no code or MBB.
1588
1589 // Insert new instructions after any phi or argument setup code.
1590 FuncInfo->InsertPt = FuncInfo->MBB->end();
1591
1592 // Setup an EH landing-pad block.
1593 FuncInfo->ExceptionPointerVirtReg = 0;
1594 FuncInfo->ExceptionSelectorVirtReg = 0;
1595 if (LLVMBB->isEHPad())
1596 if (!PrepareEHLandingPad())
1597 continue;
1598
1599 // Before doing SelectionDAG ISel, see if FastISel has been requested.
1600 if (FastIS) {
1601 if (LLVMBB != &Fn.getEntryBlock())
1602 FastIS->startNewBlock();
1603
1604 unsigned NumFastIselRemaining = std::distance(first: Begin, last: End);
1605
1606 // Pre-assign swifterror vregs.
1607 SwiftError->preassignVRegs(MBB: FuncInfo->MBB, Begin, End);
1608
1609 // Do FastISel on as many instructions as possible.
1610 for (; BI != Begin; --BI) {
1611 const Instruction *Inst = &*std::prev(x: BI);
1612
1613 // If we no longer require this instruction, skip it.
1614 if (isFoldedOrDeadInstruction(I: Inst, FuncInfo: *FuncInfo) ||
1615 ElidedArgCopyInstrs.count(Ptr: Inst)) {
1616 --NumFastIselRemaining;
1617 FastIS->handleDbgInfo(II: Inst);
1618 continue;
1619 }
1620
1621 // Bottom-up: reset the insert pos at the top, after any local-value
1622 // instructions.
1623 FastIS->recomputeInsertPt();
1624
1625 // Try to select the instruction with FastISel.
1626 if (FastIS->selectInstruction(I: Inst)) {
1627 --NumFastIselRemaining;
1628 ++NumFastIselSuccess;
1629
1630 FastIS->handleDbgInfo(II: Inst);
1631 // If fast isel succeeded, skip over all the folded instructions, and
1632 // then see if there is a load right before the selected instructions.
1633 // Try to fold the load if so.
1634 const Instruction *BeforeInst = Inst;
1635 while (BeforeInst != &*Begin) {
1636 BeforeInst = &*std::prev(x: BasicBlock::const_iterator(BeforeInst));
1637 if (!isFoldedOrDeadInstruction(I: BeforeInst, FuncInfo: *FuncInfo))
1638 break;
1639 }
1640 if (BeforeInst != Inst && isa<LoadInst>(Val: BeforeInst) &&
1641 BeforeInst->hasOneUse() &&
1642 FastIS->tryToFoldLoad(LI: cast<LoadInst>(Val: BeforeInst), FoldInst: Inst)) {
1643 // If we succeeded, don't re-select the load.
1644 LLVM_DEBUG(dbgs()
1645 << "FastISel folded load: " << *BeforeInst << "\n");
1646 FastIS->handleDbgInfo(II: BeforeInst);
1647 BI = std::next(x: BasicBlock::const_iterator(BeforeInst));
1648 --NumFastIselRemaining;
1649 ++NumFastIselSuccess;
1650 }
1651 continue;
1652 }
1653
1654 FastISelFailed = true;
1655
1656 // Then handle certain instructions as single-LLVM-Instruction blocks.
1657 // We cannot separate out GCrelocates to their own blocks since we need
1658 // to keep track of gc-relocates for a particular gc-statepoint. This is
1659 // done by SelectionDAGBuilder::LowerAsSTATEPOINT, called before
1660 // visitGCRelocate.
1661 if (isa<CallInst>(Val: Inst) && !isa<GCStatepointInst>(Val: Inst) &&
1662 !isa<GCRelocateInst>(Val: Inst) && !isa<GCResultInst>(Val: Inst)) {
1663 OptimizationRemarkMissed R("sdagisel", "FastISelFailure",
1664 Inst->getDebugLoc(), LLVMBB);
1665
1666 R << "FastISel missed call";
1667
1668 if (R.isEnabled() || EnableFastISelAbort) {
1669 std::string InstStrStorage;
1670 raw_string_ostream InstStr(InstStrStorage);
1671 InstStr << *Inst;
1672
1673 R << ": " << InstStr.str();
1674 }
1675
1676 reportFastISelFailure(MF&: *MF, ORE&: *ORE, R, ShouldAbort: EnableFastISelAbort > 2);
1677
1678 if (!Inst->getType()->isVoidTy() && !Inst->getType()->isTokenTy() &&
1679 !Inst->use_empty()) {
1680 Register &R = FuncInfo->ValueMap[Inst];
1681 if (!R)
1682 R = FuncInfo->CreateRegs(V: Inst);
1683 }
1684
1685 bool HadTailCall = false;
1686 MachineBasicBlock::iterator SavedInsertPt = FuncInfo->InsertPt;
1687 SelectBasicBlock(Begin: Inst->getIterator(), End: BI, HadTailCall);
1688
1689 // If the call was emitted as a tail call, we're done with the block.
1690 // We also need to delete any previously emitted instructions.
1691 if (HadTailCall) {
1692 FastIS->removeDeadCode(I: SavedInsertPt, E: FuncInfo->MBB->end());
1693 --BI;
1694 break;
1695 }
1696
1697 // Recompute NumFastIselRemaining as Selection DAG instruction
1698 // selection may have handled the call, input args, etc.
1699 unsigned RemainingNow = std::distance(first: Begin, last: BI);
1700 NumFastIselFailures += NumFastIselRemaining - RemainingNow;
1701 NumFastIselRemaining = RemainingNow;
1702 continue;
1703 }
1704
1705 OptimizationRemarkMissed R("sdagisel", "FastISelFailure",
1706 Inst->getDebugLoc(), LLVMBB);
1707
1708 bool ShouldAbort = EnableFastISelAbort;
1709 if (Inst->isTerminator()) {
1710 // Use a different message for terminator misses.
1711 R << "FastISel missed terminator";
1712 // Don't abort for terminator unless the level is really high
1713 ShouldAbort = (EnableFastISelAbort > 2);
1714 } else {
1715 R << "FastISel missed";
1716 }
1717
1718 if (R.isEnabled() || EnableFastISelAbort) {
1719 std::string InstStrStorage;
1720 raw_string_ostream InstStr(InstStrStorage);
1721 InstStr << *Inst;
1722 R << ": " << InstStr.str();
1723 }
1724
1725 reportFastISelFailure(MF&: *MF, ORE&: *ORE, R, ShouldAbort);
1726
1727 NumFastIselFailures += NumFastIselRemaining;
1728 break;
1729 }
1730
1731 FastIS->recomputeInsertPt();
1732 }
1733
1734 if (SP.shouldEmitSDCheck(BB: *LLVMBB)) {
1735 bool FunctionBasedInstrumentation =
1736 TLI->getSSPStackGuardCheck(M: *Fn.getParent());
1737 SDB->SPDescriptor.initialize(BB: LLVMBB, MBB: FuncInfo->MBBMap[LLVMBB],
1738 FunctionBasedInstrumentation);
1739 }
1740
1741 if (Begin != BI)
1742 ++NumDAGBlocks;
1743 else
1744 ++NumFastIselBlocks;
1745
1746 if (Begin != BI) {
1747 // Run SelectionDAG instruction selection on the remainder of the block
1748 // not handled by FastISel. If FastISel is not run, this is the entire
1749 // block.
1750 bool HadTailCall;
1751 SelectBasicBlock(Begin, End: BI, HadTailCall);
1752
1753 // But if FastISel was run, we already selected some of the block.
1754 // If we emitted a tail-call, we need to delete any previously emitted
1755 // instruction that follows it.
1756 if (FastIS && HadTailCall && FuncInfo->InsertPt != FuncInfo->MBB->end())
1757 FastIS->removeDeadCode(I: FuncInfo->InsertPt, E: FuncInfo->MBB->end());
1758 }
1759
1760 if (FastIS)
1761 FastIS->finishBasicBlock();
1762 FinishBasicBlock();
1763 FuncInfo->PHINodesToUpdate.clear();
1764 ElidedArgCopyInstrs.clear();
1765 }
1766
1767 // AsynchEH: Report Block State under -AsynchEH
1768 if (Fn.getParent()->getModuleFlag(Key: "eh-asynch"))
1769 reportIPToStateForBlocks(MF);
1770
1771 SP.copyToMachineFrameInfo(MFI&: MF->getFrameInfo());
1772
1773 SwiftError->propagateVRegs();
1774
1775 delete FastIS;
1776 SDB->clearDanglingDebugInfo();
1777 SDB->SPDescriptor.resetPerFunctionState();
1778}
1779
1780void
1781SelectionDAGISel::FinishBasicBlock() {
1782 LLVM_DEBUG(dbgs() << "Total amount of phi nodes to update: "
1783 << FuncInfo->PHINodesToUpdate.size() << "\n";
1784 for (unsigned i = 0, e = FuncInfo->PHINodesToUpdate.size(); i != e;
1785 ++i) dbgs()
1786 << "Node " << i << " : (" << FuncInfo->PHINodesToUpdate[i].first
1787 << ", " << FuncInfo->PHINodesToUpdate[i].second << ")\n");
1788
1789 // Next, now that we know what the last MBB the LLVM BB expanded is, update
1790 // PHI nodes in successors.
1791 for (unsigned i = 0, e = FuncInfo->PHINodesToUpdate.size(); i != e; ++i) {
1792 MachineInstrBuilder PHI(*MF, FuncInfo->PHINodesToUpdate[i].first);
1793 assert(PHI->isPHI() &&
1794 "This is not a machine PHI node that we are updating!");
1795 if (!FuncInfo->MBB->isSuccessor(MBB: PHI->getParent()))
1796 continue;
1797 PHI.addReg(RegNo: FuncInfo->PHINodesToUpdate[i].second).addMBB(MBB: FuncInfo->MBB);
1798 }
1799
1800 // Handle stack protector.
1801 if (SDB->SPDescriptor.shouldEmitFunctionBasedCheckStackProtector()) {
1802 // The target provides a guard check function. There is no need to
1803 // generate error handling code or to split current basic block.
1804 MachineBasicBlock *ParentMBB = SDB->SPDescriptor.getParentMBB();
1805
1806 // Add load and check to the basicblock.
1807 FuncInfo->MBB = ParentMBB;
1808 FuncInfo->InsertPt =
1809 findSplitPointForStackProtector(BB: ParentMBB, TII: *TII);
1810 SDB->visitSPDescriptorParent(SPD&: SDB->SPDescriptor, ParentBB: ParentMBB);
1811 CurDAG->setRoot(SDB->getRoot());
1812 SDB->clear();
1813 CodeGenAndEmitDAG();
1814
1815 // Clear the Per-BB State.
1816 SDB->SPDescriptor.resetPerBBState();
1817 } else if (SDB->SPDescriptor.shouldEmitStackProtector()) {
1818 MachineBasicBlock *ParentMBB = SDB->SPDescriptor.getParentMBB();
1819 MachineBasicBlock *SuccessMBB = SDB->SPDescriptor.getSuccessMBB();
1820
1821 // Find the split point to split the parent mbb. At the same time copy all
1822 // physical registers used in the tail of parent mbb into virtual registers
1823 // before the split point and back into physical registers after the split
1824 // point. This prevents us needing to deal with Live-ins and many other
1825 // register allocation issues caused by us splitting the parent mbb. The
1826 // register allocator will clean up said virtual copies later on.
1827 MachineBasicBlock::iterator SplitPoint =
1828 findSplitPointForStackProtector(BB: ParentMBB, TII: *TII);
1829
1830 // Splice the terminator of ParentMBB into SuccessMBB.
1831 SuccessMBB->splice(Where: SuccessMBB->end(), Other: ParentMBB,
1832 From: SplitPoint,
1833 To: ParentMBB->end());
1834
1835 // Add compare/jump on neq/jump to the parent BB.
1836 FuncInfo->MBB = ParentMBB;
1837 FuncInfo->InsertPt = ParentMBB->end();
1838 SDB->visitSPDescriptorParent(SPD&: SDB->SPDescriptor, ParentBB: ParentMBB);
1839 CurDAG->setRoot(SDB->getRoot());
1840 SDB->clear();
1841 CodeGenAndEmitDAG();
1842
1843 // CodeGen Failure MBB if we have not codegened it yet.
1844 MachineBasicBlock *FailureMBB = SDB->SPDescriptor.getFailureMBB();
1845 if (FailureMBB->empty()) {
1846 FuncInfo->MBB = FailureMBB;
1847 FuncInfo->InsertPt = FailureMBB->end();
1848 SDB->visitSPDescriptorFailure(SPD&: SDB->SPDescriptor);
1849 CurDAG->setRoot(SDB->getRoot());
1850 SDB->clear();
1851 CodeGenAndEmitDAG();
1852 }
1853
1854 // Clear the Per-BB State.
1855 SDB->SPDescriptor.resetPerBBState();
1856 }
1857
1858 // Lower each BitTestBlock.
1859 for (auto &BTB : SDB->SL->BitTestCases) {
1860 // Lower header first, if it wasn't already lowered
1861 if (!BTB.Emitted) {
1862 // Set the current basic block to the mbb we wish to insert the code into
1863 FuncInfo->MBB = BTB.Parent;
1864 FuncInfo->InsertPt = FuncInfo->MBB->end();
1865 // Emit the code
1866 SDB->visitBitTestHeader(B&: BTB, SwitchBB: FuncInfo->MBB);
1867 CurDAG->setRoot(SDB->getRoot());
1868 SDB->clear();
1869 CodeGenAndEmitDAG();
1870 }
1871
1872 BranchProbability UnhandledProb = BTB.Prob;
1873 for (unsigned j = 0, ej = BTB.Cases.size(); j != ej; ++j) {
1874 UnhandledProb -= BTB.Cases[j].ExtraProb;
1875 // Set the current basic block to the mbb we wish to insert the code into
1876 FuncInfo->MBB = BTB.Cases[j].ThisBB;
1877 FuncInfo->InsertPt = FuncInfo->MBB->end();
1878 // Emit the code
1879
1880 // If all cases cover a contiguous range, it is not necessary to jump to
1881 // the default block after the last bit test fails. This is because the
1882 // range check during bit test header creation has guaranteed that every
1883 // case here doesn't go outside the range. In this case, there is no need
1884 // to perform the last bit test, as it will always be true. Instead, make
1885 // the second-to-last bit-test fall through to the target of the last bit
1886 // test, and delete the last bit test.
1887
1888 MachineBasicBlock *NextMBB;
1889 if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {
1890 // Second-to-last bit-test with contiguous range or omitted range
1891 // check: fall through to the target of the final bit test.
1892 NextMBB = BTB.Cases[j + 1].TargetBB;
1893 } else if (j + 1 == ej) {
1894 // For the last bit test, fall through to Default.
1895 NextMBB = BTB.Default;
1896 } else {
1897 // Otherwise, fall through to the next bit test.
1898 NextMBB = BTB.Cases[j + 1].ThisBB;
1899 }
1900
1901 SDB->visitBitTestCase(BB&: BTB, NextMBB, BranchProbToNext: UnhandledProb, Reg: BTB.Reg, B&: BTB.Cases[j],
1902 SwitchBB: FuncInfo->MBB);
1903
1904 CurDAG->setRoot(SDB->getRoot());
1905 SDB->clear();
1906 CodeGenAndEmitDAG();
1907
1908 if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {
1909 // Since we're not going to use the final bit test, remove it.
1910 BTB.Cases.pop_back();
1911 break;
1912 }
1913 }
1914
1915 // Update PHI Nodes
1916 for (const std::pair<MachineInstr *, unsigned> &P :
1917 FuncInfo->PHINodesToUpdate) {
1918 MachineInstrBuilder PHI(*MF, P.first);
1919 MachineBasicBlock *PHIBB = PHI->getParent();
1920 assert(PHI->isPHI() &&
1921 "This is not a machine PHI node that we are updating!");
1922 // This is "default" BB. We have two jumps to it. From "header" BB and
1923 // from last "case" BB, unless the latter was skipped.
1924 if (PHIBB == BTB.Default) {
1925 PHI.addReg(RegNo: P.second).addMBB(MBB: BTB.Parent);
1926 if (!BTB.ContiguousRange) {
1927 PHI.addReg(RegNo: P.second).addMBB(MBB: BTB.Cases.back().ThisBB);
1928 }
1929 }
1930 // One of "cases" BB.
1931 for (const SwitchCG::BitTestCase &BT : BTB.Cases) {
1932 MachineBasicBlock* cBB = BT.ThisBB;
1933 if (cBB->isSuccessor(MBB: PHIBB))
1934 PHI.addReg(RegNo: P.second).addMBB(MBB: cBB);
1935 }
1936 }
1937 }
1938 SDB->SL->BitTestCases.clear();
1939
1940 // If the JumpTable record is filled in, then we need to emit a jump table.
1941 // Updating the PHI nodes is tricky in this case, since we need to determine
1942 // whether the PHI is a successor of the range check MBB or the jump table MBB
1943 for (unsigned i = 0, e = SDB->SL->JTCases.size(); i != e; ++i) {
1944 // Lower header first, if it wasn't already lowered
1945 if (!SDB->SL->JTCases[i].first.Emitted) {
1946 // Set the current basic block to the mbb we wish to insert the code into
1947 FuncInfo->MBB = SDB->SL->JTCases[i].first.HeaderBB;
1948 FuncInfo->InsertPt = FuncInfo->MBB->end();
1949 // Emit the code
1950 SDB->visitJumpTableHeader(JT&: SDB->SL->JTCases[i].second,
1951 JTH&: SDB->SL->JTCases[i].first, SwitchBB: FuncInfo->MBB);
1952 CurDAG->setRoot(SDB->getRoot());
1953 SDB->clear();
1954 CodeGenAndEmitDAG();
1955 }
1956
1957 // Set the current basic block to the mbb we wish to insert the code into
1958 FuncInfo->MBB = SDB->SL->JTCases[i].second.MBB;
1959 FuncInfo->InsertPt = FuncInfo->MBB->end();
1960 // Emit the code
1961 SDB->visitJumpTable(JT&: SDB->SL->JTCases[i].second);
1962 CurDAG->setRoot(SDB->getRoot());
1963 SDB->clear();
1964 CodeGenAndEmitDAG();
1965
1966 // Update PHI Nodes
1967 for (unsigned pi = 0, pe = FuncInfo->PHINodesToUpdate.size();
1968 pi != pe; ++pi) {
1969 MachineInstrBuilder PHI(*MF, FuncInfo->PHINodesToUpdate[pi].first);
1970 MachineBasicBlock *PHIBB = PHI->getParent();
1971 assert(PHI->isPHI() &&
1972 "This is not a machine PHI node that we are updating!");
1973 // "default" BB. We can go there only from header BB.
1974 if (PHIBB == SDB->SL->JTCases[i].second.Default)
1975 PHI.addReg(RegNo: FuncInfo->PHINodesToUpdate[pi].second)
1976 .addMBB(MBB: SDB->SL->JTCases[i].first.HeaderBB);
1977 // JT BB. Just iterate over successors here
1978 if (FuncInfo->MBB->isSuccessor(MBB: PHIBB))
1979 PHI.addReg(RegNo: FuncInfo->PHINodesToUpdate[pi].second).addMBB(MBB: FuncInfo->MBB);
1980 }
1981 }
1982 SDB->SL->JTCases.clear();
1983
1984 // If we generated any switch lowering information, build and codegen any
1985 // additional DAGs necessary.
1986 for (unsigned i = 0, e = SDB->SL->SwitchCases.size(); i != e; ++i) {
1987 // Set the current basic block to the mbb we wish to insert the code into
1988 FuncInfo->MBB = SDB->SL->SwitchCases[i].ThisBB;
1989 FuncInfo->InsertPt = FuncInfo->MBB->end();
1990
1991 // Determine the unique successors.
1992 SmallVector<MachineBasicBlock *, 2> Succs;
1993 Succs.push_back(Elt: SDB->SL->SwitchCases[i].TrueBB);
1994 if (SDB->SL->SwitchCases[i].TrueBB != SDB->SL->SwitchCases[i].FalseBB)
1995 Succs.push_back(Elt: SDB->SL->SwitchCases[i].FalseBB);
1996
1997 // Emit the code. Note that this could result in FuncInfo->MBB being split.
1998 SDB->visitSwitchCase(CB&: SDB->SL->SwitchCases[i], SwitchBB: FuncInfo->MBB);
1999 CurDAG->setRoot(SDB->getRoot());
2000 SDB->clear();
2001 CodeGenAndEmitDAG();
2002
2003 // Remember the last block, now that any splitting is done, for use in
2004 // populating PHI nodes in successors.
2005 MachineBasicBlock *ThisBB = FuncInfo->MBB;
2006
2007 // Handle any PHI nodes in successors of this chunk, as if we were coming
2008 // from the original BB before switch expansion. Note that PHI nodes can
2009 // occur multiple times in PHINodesToUpdate. We have to be very careful to
2010 // handle them the right number of times.
2011 for (unsigned i = 0, e = Succs.size(); i != e; ++i) {
2012 FuncInfo->MBB = Succs[i];
2013 FuncInfo->InsertPt = FuncInfo->MBB->end();
2014 // FuncInfo->MBB may have been removed from the CFG if a branch was
2015 // constant folded.
2016 if (ThisBB->isSuccessor(MBB: FuncInfo->MBB)) {
2017 for (MachineBasicBlock::iterator
2018 MBBI = FuncInfo->MBB->begin(), MBBE = FuncInfo->MBB->end();
2019 MBBI != MBBE && MBBI->isPHI(); ++MBBI) {
2020 MachineInstrBuilder PHI(*MF, MBBI);
2021 // This value for this PHI node is recorded in PHINodesToUpdate.
2022 for (unsigned pn = 0; ; ++pn) {
2023 assert(pn != FuncInfo->PHINodesToUpdate.size() &&
2024 "Didn't find PHI entry!");
2025 if (FuncInfo->PHINodesToUpdate[pn].first == PHI) {
2026 PHI.addReg(RegNo: FuncInfo->PHINodesToUpdate[pn].second).addMBB(MBB: ThisBB);
2027 break;
2028 }
2029 }
2030 }
2031 }
2032 }
2033 }
2034 SDB->SL->SwitchCases.clear();
2035}
2036
2037/// Create the scheduler. If a specific scheduler was specified
2038/// via the SchedulerRegistry, use it, otherwise select the
2039/// one preferred by the target.
2040///
2041ScheduleDAGSDNodes *SelectionDAGISel::CreateScheduler() {
2042 return ISHeuristic(this, OptLevel);
2043}
2044
2045//===----------------------------------------------------------------------===//
2046// Helper functions used by the generated instruction selector.
2047//===----------------------------------------------------------------------===//
2048// Calls to these methods are generated by tblgen.
2049
2050/// CheckAndMask - The isel is trying to match something like (and X, 255). If
2051/// the dag combiner simplified the 255, we still want to match. RHS is the
2052/// actual value in the DAG on the RHS of an AND, and DesiredMaskS is the value
2053/// specified in the .td file (e.g. 255).
2054bool SelectionDAGISel::CheckAndMask(SDValue LHS, ConstantSDNode *RHS,
2055 int64_t DesiredMaskS) const {
2056 const APInt &ActualMask = RHS->getAPIntValue();
2057 const APInt &DesiredMask = APInt(LHS.getValueSizeInBits(), DesiredMaskS);
2058
2059 // If the actual mask exactly matches, success!
2060 if (ActualMask == DesiredMask)
2061 return true;
2062
2063 // If the actual AND mask is allowing unallowed bits, this doesn't match.
2064 if (!ActualMask.isSubsetOf(RHS: DesiredMask))
2065 return false;
2066
2067 // Otherwise, the DAG Combiner may have proven that the value coming in is
2068 // either already zero or is not demanded. Check for known zero input bits.
2069 APInt NeededMask = DesiredMask & ~ActualMask;
2070 if (CurDAG->MaskedValueIsZero(Op: LHS, Mask: NeededMask))
2071 return true;
2072
2073 // TODO: check to see if missing bits are just not demanded.
2074
2075 // Otherwise, this pattern doesn't match.
2076 return false;
2077}
2078
2079/// CheckOrMask - The isel is trying to match something like (or X, 255). If
2080/// the dag combiner simplified the 255, we still want to match. RHS is the
2081/// actual value in the DAG on the RHS of an OR, and DesiredMaskS is the value
2082/// specified in the .td file (e.g. 255).
2083bool SelectionDAGISel::CheckOrMask(SDValue LHS, ConstantSDNode *RHS,
2084 int64_t DesiredMaskS) const {
2085 const APInt &ActualMask = RHS->getAPIntValue();
2086 const APInt &DesiredMask = APInt(LHS.getValueSizeInBits(), DesiredMaskS);
2087
2088 // If the actual mask exactly matches, success!
2089 if (ActualMask == DesiredMask)
2090 return true;
2091
2092 // If the actual AND mask is allowing unallowed bits, this doesn't match.
2093 if (!ActualMask.isSubsetOf(RHS: DesiredMask))
2094 return false;
2095
2096 // Otherwise, the DAG Combiner may have proven that the value coming in is
2097 // either already zero or is not demanded. Check for known zero input bits.
2098 APInt NeededMask = DesiredMask & ~ActualMask;
2099 KnownBits Known = CurDAG->computeKnownBits(Op: LHS);
2100
2101 // If all the missing bits in the or are already known to be set, match!
2102 if (NeededMask.isSubsetOf(RHS: Known.One))
2103 return true;
2104
2105 // TODO: check to see if missing bits are just not demanded.
2106
2107 // Otherwise, this pattern doesn't match.
2108 return false;
2109}
2110
2111/// SelectInlineAsmMemoryOperands - Calls to this are automatically generated
2112/// by tblgen. Others should not call it.
2113void SelectionDAGISel::SelectInlineAsmMemoryOperands(std::vector<SDValue> &Ops,
2114 const SDLoc &DL) {
2115 std::vector<SDValue> InOps;
2116 std::swap(x&: InOps, y&: Ops);
2117
2118 Ops.push_back(x: InOps[InlineAsm::Op_InputChain]); // 0
2119 Ops.push_back(x: InOps[InlineAsm::Op_AsmString]); // 1
2120 Ops.push_back(x: InOps[InlineAsm::Op_MDNode]); // 2, !srcloc
2121 Ops.push_back(x: InOps[InlineAsm::Op_ExtraInfo]); // 3 (SideEffect, AlignStack)
2122
2123 unsigned i = InlineAsm::Op_FirstOperand, e = InOps.size();
2124 if (InOps[e-1].getValueType() == MVT::Glue)
2125 --e; // Don't process a glue operand if it is here.
2126
2127 while (i != e) {
2128 InlineAsm::Flag Flags(InOps[i]->getAsZExtVal());
2129 if (!Flags.isMemKind() && !Flags.isFuncKind()) {
2130 // Just skip over this operand, copying the operands verbatim.
2131 Ops.insert(position: Ops.end(), first: InOps.begin() + i,
2132 last: InOps.begin() + i + Flags.getNumOperandRegisters() + 1);
2133 i += Flags.getNumOperandRegisters() + 1;
2134 } else {
2135 assert(Flags.getNumOperandRegisters() == 1 &&
2136 "Memory operand with multiple values?");
2137
2138 unsigned TiedToOperand;
2139 if (Flags.isUseOperandTiedToDef(Idx&: TiedToOperand)) {
2140 // We need the constraint ID from the operand this is tied to.
2141 unsigned CurOp = InlineAsm::Op_FirstOperand;
2142 Flags = InlineAsm::Flag(InOps[CurOp]->getAsZExtVal());
2143 for (; TiedToOperand; --TiedToOperand) {
2144 CurOp += Flags.getNumOperandRegisters() + 1;
2145 Flags = InlineAsm::Flag(InOps[CurOp]->getAsZExtVal());
2146 }
2147 }
2148
2149 // Otherwise, this is a memory operand. Ask the target to select it.
2150 std::vector<SDValue> SelOps;
2151 const InlineAsm::ConstraintCode ConstraintID =
2152 Flags.getMemoryConstraintID();
2153 if (SelectInlineAsmMemoryOperand(Op: InOps[i+1], ConstraintID, OutOps&: SelOps))
2154 report_fatal_error(reason: "Could not match memory address. Inline asm"
2155 " failure!");
2156
2157 // Add this to the output node.
2158 Flags = InlineAsm::Flag(Flags.isMemKind() ? InlineAsm::Kind::Mem
2159 : InlineAsm::Kind::Func,
2160 SelOps.size());
2161 Flags.setMemConstraint(ConstraintID);
2162 Ops.push_back(CurDAG->getTargetConstant(Flags, DL, MVT::i32));
2163 llvm::append_range(C&: Ops, R&: SelOps);
2164 i += 2;
2165 }
2166 }
2167
2168 // Add the glue input back if present.
2169 if (e != InOps.size())
2170 Ops.push_back(x: InOps.back());
2171}
2172
2173/// findGlueUse - Return use of MVT::Glue value produced by the specified
2174/// SDNode.
2175///
2176static SDNode *findGlueUse(SDNode *N) {
2177 unsigned FlagResNo = N->getNumValues()-1;
2178 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
2179 SDUse &Use = I.getUse();
2180 if (Use.getResNo() == FlagResNo)
2181 return Use.getUser();
2182 }
2183 return nullptr;
2184}
2185
2186/// findNonImmUse - Return true if "Def" is a predecessor of "Root" via a path
2187/// beyond "ImmedUse". We may ignore chains as they are checked separately.
2188static bool findNonImmUse(SDNode *Root, SDNode *Def, SDNode *ImmedUse,
2189 bool IgnoreChains) {
2190 SmallPtrSet<const SDNode *, 16> Visited;
2191 SmallVector<const SDNode *, 16> WorkList;
2192 // Only check if we have non-immediate uses of Def.
2193 if (ImmedUse->isOnlyUserOf(N: Def))
2194 return false;
2195
2196 // We don't care about paths to Def that go through ImmedUse so mark it
2197 // visited and mark non-def operands as used.
2198 Visited.insert(Ptr: ImmedUse);
2199 for (const SDValue &Op : ImmedUse->op_values()) {
2200 SDNode *N = Op.getNode();
2201 // Ignore chain deps (they are validated by
2202 // HandleMergeInputChains) and immediate uses
2203 if ((Op.getValueType() == MVT::Other && IgnoreChains) || N == Def)
2204 continue;
2205 if (!Visited.insert(Ptr: N).second)
2206 continue;
2207 WorkList.push_back(Elt: N);
2208 }
2209
2210 // Initialize worklist to operands of Root.
2211 if (Root != ImmedUse) {
2212 for (const SDValue &Op : Root->op_values()) {
2213 SDNode *N = Op.getNode();
2214 // Ignore chains (they are validated by HandleMergeInputChains)
2215 if ((Op.getValueType() == MVT::Other && IgnoreChains) || N == Def)
2216 continue;
2217 if (!Visited.insert(Ptr: N).second)
2218 continue;
2219 WorkList.push_back(Elt: N);
2220 }
2221 }
2222
2223 return SDNode::hasPredecessorHelper(N: Def, Visited, Worklist&: WorkList, MaxSteps: 0, TopologicalPrune: true);
2224}
2225
2226/// IsProfitableToFold - Returns true if it's profitable to fold the specific
2227/// operand node N of U during instruction selection that starts at Root.
2228bool SelectionDAGISel::IsProfitableToFold(SDValue N, SDNode *U,
2229 SDNode *Root) const {
2230 if (OptLevel == CodeGenOptLevel::None)
2231 return false;
2232 return N.hasOneUse();
2233}
2234
2235/// IsLegalToFold - Returns true if the specific operand node N of
2236/// U can be folded during instruction selection that starts at Root.
2237bool SelectionDAGISel::IsLegalToFold(SDValue N, SDNode *U, SDNode *Root,
2238 CodeGenOptLevel OptLevel,
2239 bool IgnoreChains) {
2240 if (OptLevel == CodeGenOptLevel::None)
2241 return false;
2242
2243 // If Root use can somehow reach N through a path that doesn't contain
2244 // U then folding N would create a cycle. e.g. In the following
2245 // diagram, Root can reach N through X. If N is folded into Root, then
2246 // X is both a predecessor and a successor of U.
2247 //
2248 // [N*] //
2249 // ^ ^ //
2250 // / \ //
2251 // [U*] [X]? //
2252 // ^ ^ //
2253 // \ / //
2254 // \ / //
2255 // [Root*] //
2256 //
2257 // * indicates nodes to be folded together.
2258 //
2259 // If Root produces glue, then it gets (even more) interesting. Since it
2260 // will be "glued" together with its glue use in the scheduler, we need to
2261 // check if it might reach N.
2262 //
2263 // [N*] //
2264 // ^ ^ //
2265 // / \ //
2266 // [U*] [X]? //
2267 // ^ ^ //
2268 // \ \ //
2269 // \ | //
2270 // [Root*] | //
2271 // ^ | //
2272 // f | //
2273 // | / //
2274 // [Y] / //
2275 // ^ / //
2276 // f / //
2277 // | / //
2278 // [GU] //
2279 //
2280 // If GU (glue use) indirectly reaches N (the load), and Root folds N
2281 // (call it Fold), then X is a predecessor of GU and a successor of
2282 // Fold. But since Fold and GU are glued together, this will create
2283 // a cycle in the scheduling graph.
2284
2285 // If the node has glue, walk down the graph to the "lowest" node in the
2286 // glueged set.
2287 EVT VT = Root->getValueType(ResNo: Root->getNumValues()-1);
2288 while (VT == MVT::Glue) {
2289 SDNode *GU = findGlueUse(N: Root);
2290 if (!GU)
2291 break;
2292 Root = GU;
2293 VT = Root->getValueType(ResNo: Root->getNumValues()-1);
2294
2295 // If our query node has a glue result with a use, we've walked up it. If
2296 // the user (which has already been selected) has a chain or indirectly uses
2297 // the chain, HandleMergeInputChains will not consider it. Because of
2298 // this, we cannot ignore chains in this predicate.
2299 IgnoreChains = false;
2300 }
2301
2302 return !findNonImmUse(Root, Def: N.getNode(), ImmedUse: U, IgnoreChains);
2303}
2304
2305void SelectionDAGISel::Select_INLINEASM(SDNode *N) {
2306 SDLoc DL(N);
2307
2308 std::vector<SDValue> Ops(N->op_begin(), N->op_end());
2309 SelectInlineAsmMemoryOperands(Ops, DL);
2310
2311 const EVT VTs[] = {MVT::Other, MVT::Glue};
2312 SDValue New = CurDAG->getNode(N->getOpcode(), DL, VTs, Ops);
2313 New->setNodeId(-1);
2314 ReplaceUses(F: N, T: New.getNode());
2315 CurDAG->RemoveDeadNode(N);
2316}
2317
2318void SelectionDAGISel::Select_READ_REGISTER(SDNode *Op) {
2319 SDLoc dl(Op);
2320 MDNodeSDNode *MD = cast<MDNodeSDNode>(Val: Op->getOperand(Num: 1));
2321 const MDString *RegStr = cast<MDString>(Val: MD->getMD()->getOperand(I: 0));
2322
2323 EVT VT = Op->getValueType(ResNo: 0);
2324 LLT Ty = VT.isSimple() ? getLLTForMVT(Ty: VT.getSimpleVT()) : LLT();
2325 Register Reg =
2326 TLI->getRegisterByName(RegName: RegStr->getString().data(), Ty,
2327 MF: CurDAG->getMachineFunction());
2328 SDValue New = CurDAG->getCopyFromReg(
2329 Chain: Op->getOperand(Num: 0), dl, Reg, VT: Op->getValueType(ResNo: 0));
2330 New->setNodeId(-1);
2331 ReplaceUses(F: Op, T: New.getNode());
2332 CurDAG->RemoveDeadNode(N: Op);
2333}
2334
2335void SelectionDAGISel::Select_WRITE_REGISTER(SDNode *Op) {
2336 SDLoc dl(Op);
2337 MDNodeSDNode *MD = cast<MDNodeSDNode>(Val: Op->getOperand(Num: 1));
2338 const MDString *RegStr = cast<MDString>(Val: MD->getMD()->getOperand(I: 0));
2339
2340 EVT VT = Op->getOperand(Num: 2).getValueType();
2341 LLT Ty = VT.isSimple() ? getLLTForMVT(Ty: VT.getSimpleVT()) : LLT();
2342
2343 Register Reg = TLI->getRegisterByName(RegName: RegStr->getString().data(), Ty,
2344 MF: CurDAG->getMachineFunction());
2345 SDValue New = CurDAG->getCopyToReg(
2346 Chain: Op->getOperand(Num: 0), dl, Reg, N: Op->getOperand(Num: 2));
2347 New->setNodeId(-1);
2348 ReplaceUses(F: Op, T: New.getNode());
2349 CurDAG->RemoveDeadNode(N: Op);
2350}
2351
2352void SelectionDAGISel::Select_UNDEF(SDNode *N) {
2353 CurDAG->SelectNodeTo(N, MachineOpc: TargetOpcode::IMPLICIT_DEF, VT: N->getValueType(ResNo: 0));
2354}
2355
2356void SelectionDAGISel::Select_FREEZE(SDNode *N) {
2357 // TODO: We don't have FREEZE pseudo-instruction in MachineInstr-level now.
2358 // If FREEZE instruction is added later, the code below must be changed as
2359 // well.
2360 CurDAG->SelectNodeTo(N, MachineOpc: TargetOpcode::COPY, VT: N->getValueType(ResNo: 0),
2361 Op1: N->getOperand(Num: 0));
2362}
2363
2364void SelectionDAGISel::Select_ARITH_FENCE(SDNode *N) {
2365 CurDAG->SelectNodeTo(N, MachineOpc: TargetOpcode::ARITH_FENCE, VT: N->getValueType(ResNo: 0),
2366 Op1: N->getOperand(Num: 0));
2367}
2368
2369void SelectionDAGISel::Select_MEMBARRIER(SDNode *N) {
2370 CurDAG->SelectNodeTo(N, MachineOpc: TargetOpcode::MEMBARRIER, VT: N->getValueType(ResNo: 0),
2371 Op1: N->getOperand(Num: 0));
2372}
2373
2374void SelectionDAGISel::pushStackMapLiveVariable(SmallVectorImpl<SDValue> &Ops,
2375 SDValue OpVal, SDLoc DL) {
2376 SDNode *OpNode = OpVal.getNode();
2377
2378 // FrameIndex nodes should have been directly emitted to TargetFrameIndex
2379 // nodes at DAG-construction time.
2380 assert(OpNode->getOpcode() != ISD::FrameIndex);
2381
2382 if (OpNode->getOpcode() == ISD::Constant) {
2383 Ops.push_back(
2384 Elt: CurDAG->getTargetConstant(StackMaps::ConstantOp, DL, MVT::i64));
2385 Ops.push_back(Elt: CurDAG->getTargetConstant(Val: OpNode->getAsZExtVal(), DL,
2386 VT: OpVal.getValueType()));
2387 } else {
2388 Ops.push_back(Elt: OpVal);
2389 }
2390}
2391
2392void SelectionDAGISel::Select_STACKMAP(SDNode *N) {
2393 SmallVector<SDValue, 32> Ops;
2394 auto *It = N->op_begin();
2395 SDLoc DL(N);
2396
2397 // Stash the chain and glue operands so we can move them to the end.
2398 SDValue Chain = *It++;
2399 SDValue InGlue = *It++;
2400
2401 // <id> operand.
2402 SDValue ID = *It++;
2403 assert(ID.getValueType() == MVT::i64);
2404 Ops.push_back(Elt: ID);
2405
2406 // <numShadowBytes> operand.
2407 SDValue Shad = *It++;
2408 assert(Shad.getValueType() == MVT::i32);
2409 Ops.push_back(Elt: Shad);
2410
2411 // Live variable operands.
2412 for (; It != N->op_end(); It++)
2413 pushStackMapLiveVariable(Ops, OpVal: *It, DL);
2414
2415 Ops.push_back(Elt: Chain);
2416 Ops.push_back(Elt: InGlue);
2417
2418 SDVTList NodeTys = CurDAG->getVTList(MVT::Other, MVT::Glue);
2419 CurDAG->SelectNodeTo(N, MachineOpc: TargetOpcode::STACKMAP, VTs: NodeTys, Ops);
2420}
2421
2422void SelectionDAGISel::Select_PATCHPOINT(SDNode *N) {
2423 SmallVector<SDValue, 32> Ops;
2424 auto *It = N->op_begin();
2425 SDLoc DL(N);
2426
2427 // Cache arguments that will be moved to the end in the target node.
2428 SDValue Chain = *It++;
2429 std::optional<SDValue> Glue;
2430 if (It->getValueType() == MVT::Glue)
2431 Glue = *It++;
2432 SDValue RegMask = *It++;
2433
2434 // <id> operand.
2435 SDValue ID = *It++;
2436 assert(ID.getValueType() == MVT::i64);
2437 Ops.push_back(Elt: ID);
2438
2439 // <numShadowBytes> operand.
2440 SDValue Shad = *It++;
2441 assert(Shad.getValueType() == MVT::i32);
2442 Ops.push_back(Elt: Shad);
2443
2444 // Add the callee.
2445 Ops.push_back(Elt: *It++);
2446
2447 // Add <numArgs>.
2448 SDValue NumArgs = *It++;
2449 assert(NumArgs.getValueType() == MVT::i32);
2450 Ops.push_back(Elt: NumArgs);
2451
2452 // Calling convention.
2453 Ops.push_back(Elt: *It++);
2454
2455 // Push the args for the call.
2456 for (uint64_t I = NumArgs->getAsZExtVal(); I != 0; I--)
2457 Ops.push_back(Elt: *It++);
2458
2459 // Now push the live variables.
2460 for (; It != N->op_end(); It++)
2461 pushStackMapLiveVariable(Ops, OpVal: *It, DL);
2462
2463 // Finally, the regmask, chain and (if present) glue are moved to the end.
2464 Ops.push_back(Elt: RegMask);
2465 Ops.push_back(Elt: Chain);
2466 if (Glue.has_value())
2467 Ops.push_back(Elt: *Glue);
2468
2469 SDVTList NodeTys = N->getVTList();
2470 CurDAG->SelectNodeTo(N, MachineOpc: TargetOpcode::PATCHPOINT, VTs: NodeTys, Ops);
2471}
2472
2473/// GetVBR - decode a vbr encoding whose top bit is set.
2474LLVM_ATTRIBUTE_ALWAYS_INLINE static uint64_t
2475GetVBR(uint64_t Val, const unsigned char *MatcherTable, unsigned &Idx) {
2476 assert(Val >= 128 && "Not a VBR");
2477 Val &= 127; // Remove first vbr bit.
2478
2479 unsigned Shift = 7;
2480 uint64_t NextBits;
2481 do {
2482 NextBits = MatcherTable[Idx++];
2483 Val |= (NextBits&127) << Shift;
2484 Shift += 7;
2485 } while (NextBits & 128);
2486
2487 return Val;
2488}
2489
2490void SelectionDAGISel::Select_JUMP_TABLE_DEBUG_INFO(SDNode *N) {
2491 SDLoc dl(N);
2492 CurDAG->SelectNodeTo(N, TargetOpcode::JUMP_TABLE_DEBUG_INFO, MVT::Glue,
2493 CurDAG->getTargetConstant(N->getConstantOperandVal(Num: 1),
2494 dl, MVT::i64, true));
2495}
2496
2497/// When a match is complete, this method updates uses of interior chain results
2498/// to use the new results.
2499void SelectionDAGISel::UpdateChains(
2500 SDNode *NodeToMatch, SDValue InputChain,
2501 SmallVectorImpl<SDNode *> &ChainNodesMatched, bool isMorphNodeTo) {
2502 SmallVector<SDNode*, 4> NowDeadNodes;
2503
2504 // Now that all the normal results are replaced, we replace the chain and
2505 // glue results if present.
2506 if (!ChainNodesMatched.empty()) {
2507 assert(InputChain.getNode() &&
2508 "Matched input chains but didn't produce a chain");
2509 // Loop over all of the nodes we matched that produced a chain result.
2510 // Replace all the chain results with the final chain we ended up with.
2511 for (unsigned i = 0, e = ChainNodesMatched.size(); i != e; ++i) {
2512 SDNode *ChainNode = ChainNodesMatched[i];
2513 // If ChainNode is null, it's because we replaced it on a previous
2514 // iteration and we cleared it out of the map. Just skip it.
2515 if (!ChainNode)
2516 continue;
2517
2518 assert(ChainNode->getOpcode() != ISD::DELETED_NODE &&
2519 "Deleted node left in chain");
2520
2521 // Don't replace the results of the root node if we're doing a
2522 // MorphNodeTo.
2523 if (ChainNode == NodeToMatch && isMorphNodeTo)
2524 continue;
2525
2526 SDValue ChainVal = SDValue(ChainNode, ChainNode->getNumValues()-1);
2527 if (ChainVal.getValueType() == MVT::Glue)
2528 ChainVal = ChainVal.getValue(R: ChainVal->getNumValues()-2);
2529 assert(ChainVal.getValueType() == MVT::Other && "Not a chain?");
2530 SelectionDAG::DAGNodeDeletedListener NDL(
2531 *CurDAG, [&](SDNode *N, SDNode *E) {
2532 std::replace(first: ChainNodesMatched.begin(), last: ChainNodesMatched.end(), old_value: N,
2533 new_value: static_cast<SDNode *>(nullptr));
2534 });
2535 if (ChainNode->getOpcode() != ISD::TokenFactor)
2536 ReplaceUses(F: ChainVal, T: InputChain);
2537
2538 // If the node became dead and we haven't already seen it, delete it.
2539 if (ChainNode != NodeToMatch && ChainNode->use_empty() &&
2540 !llvm::is_contained(Range&: NowDeadNodes, Element: ChainNode))
2541 NowDeadNodes.push_back(Elt: ChainNode);
2542 }
2543 }
2544
2545 if (!NowDeadNodes.empty())
2546 CurDAG->RemoveDeadNodes(DeadNodes&: NowDeadNodes);
2547
2548 LLVM_DEBUG(dbgs() << "ISEL: Match complete!\n");
2549}
2550
2551/// HandleMergeInputChains - This implements the OPC_EmitMergeInputChains
2552/// operation for when the pattern matched at least one node with a chains. The
2553/// input vector contains a list of all of the chained nodes that we match. We
2554/// must determine if this is a valid thing to cover (i.e. matching it won't
2555/// induce cycles in the DAG) and if so, creating a TokenFactor node. that will
2556/// be used as the input node chain for the generated nodes.
2557static SDValue
2558HandleMergeInputChains(SmallVectorImpl<SDNode*> &ChainNodesMatched,
2559 SelectionDAG *CurDAG) {
2560
2561 SmallPtrSet<const SDNode *, 16> Visited;
2562 SmallVector<const SDNode *, 8> Worklist;
2563 SmallVector<SDValue, 3> InputChains;
2564 unsigned int Max = 8192;
2565
2566 // Quick exit on trivial merge.
2567 if (ChainNodesMatched.size() == 1)
2568 return ChainNodesMatched[0]->getOperand(Num: 0);
2569
2570 // Add chains that aren't already added (internal). Peek through
2571 // token factors.
2572 std::function<void(const SDValue)> AddChains = [&](const SDValue V) {
2573 if (V.getValueType() != MVT::Other)
2574 return;
2575 if (V->getOpcode() == ISD::EntryToken)
2576 return;
2577 if (!Visited.insert(Ptr: V.getNode()).second)
2578 return;
2579 if (V->getOpcode() == ISD::TokenFactor) {
2580 for (const SDValue &Op : V->op_values())
2581 AddChains(Op);
2582 } else
2583 InputChains.push_back(Elt: V);
2584 };
2585
2586 for (auto *N : ChainNodesMatched) {
2587 Worklist.push_back(Elt: N);
2588 Visited.insert(Ptr: N);
2589 }
2590
2591 while (!Worklist.empty())
2592 AddChains(Worklist.pop_back_val()->getOperand(Num: 0));
2593
2594 // Skip the search if there are no chain dependencies.
2595 if (InputChains.size() == 0)
2596 return CurDAG->getEntryNode();
2597
2598 // If one of these chains is a successor of input, we must have a
2599 // node that is both the predecessor and successor of the
2600 // to-be-merged nodes. Fail.
2601 Visited.clear();
2602 for (SDValue V : InputChains)
2603 Worklist.push_back(Elt: V.getNode());
2604
2605 for (auto *N : ChainNodesMatched)
2606 if (SDNode::hasPredecessorHelper(N, Visited, Worklist, MaxSteps: Max, TopologicalPrune: true))
2607 return SDValue();
2608
2609 // Return merged chain.
2610 if (InputChains.size() == 1)
2611 return InputChains[0];
2612 return CurDAG->getNode(ISD::TokenFactor, SDLoc(ChainNodesMatched[0]),
2613 MVT::Other, InputChains);
2614}
2615
2616/// MorphNode - Handle morphing a node in place for the selector.
2617SDNode *SelectionDAGISel::
2618MorphNode(SDNode *Node, unsigned TargetOpc, SDVTList VTList,
2619 ArrayRef<SDValue> Ops, unsigned EmitNodeInfo) {
2620 // It is possible we're using MorphNodeTo to replace a node with no
2621 // normal results with one that has a normal result (or we could be
2622 // adding a chain) and the input could have glue and chains as well.
2623 // In this case we need to shift the operands down.
2624 // FIXME: This is a horrible hack and broken in obscure cases, no worse
2625 // than the old isel though.
2626 int OldGlueResultNo = -1, OldChainResultNo = -1;
2627
2628 unsigned NTMNumResults = Node->getNumValues();
2629 if (Node->getValueType(ResNo: NTMNumResults-1) == MVT::Glue) {
2630 OldGlueResultNo = NTMNumResults-1;
2631 if (NTMNumResults != 1 &&
2632 Node->getValueType(ResNo: NTMNumResults-2) == MVT::Other)
2633 OldChainResultNo = NTMNumResults-2;
2634 } else if (Node->getValueType(ResNo: NTMNumResults-1) == MVT::Other)
2635 OldChainResultNo = NTMNumResults-1;
2636
2637 // Call the underlying SelectionDAG routine to do the transmogrification. Note
2638 // that this deletes operands of the old node that become dead.
2639 SDNode *Res = CurDAG->MorphNodeTo(N: Node, Opc: ~TargetOpc, VTs: VTList, Ops);
2640
2641 // MorphNodeTo can operate in two ways: if an existing node with the
2642 // specified operands exists, it can just return it. Otherwise, it
2643 // updates the node in place to have the requested operands.
2644 if (Res == Node) {
2645 // If we updated the node in place, reset the node ID. To the isel,
2646 // this should be just like a newly allocated machine node.
2647 Res->setNodeId(-1);
2648 }
2649
2650 unsigned ResNumResults = Res->getNumValues();
2651 // Move the glue if needed.
2652 if ((EmitNodeInfo & OPFL_GlueOutput) && OldGlueResultNo != -1 &&
2653 static_cast<unsigned>(OldGlueResultNo) != ResNumResults - 1)
2654 ReplaceUses(F: SDValue(Node, OldGlueResultNo),
2655 T: SDValue(Res, ResNumResults - 1));
2656
2657 if ((EmitNodeInfo & OPFL_GlueOutput) != 0)
2658 --ResNumResults;
2659
2660 // Move the chain reference if needed.
2661 if ((EmitNodeInfo & OPFL_Chain) && OldChainResultNo != -1 &&
2662 static_cast<unsigned>(OldChainResultNo) != ResNumResults - 1)
2663 ReplaceUses(F: SDValue(Node, OldChainResultNo),
2664 T: SDValue(Res, ResNumResults - 1));
2665
2666 // Otherwise, no replacement happened because the node already exists. Replace
2667 // Uses of the old node with the new one.
2668 if (Res != Node) {
2669 ReplaceNode(F: Node, T: Res);
2670 } else {
2671 EnforceNodeIdInvariant(Node: Res);
2672 }
2673
2674 return Res;
2675}
2676
2677/// CheckSame - Implements OP_CheckSame.
2678LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
2679CheckSame(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N,
2680 const SmallVectorImpl<std::pair<SDValue, SDNode *>> &RecordedNodes) {
2681 // Accept if it is exactly the same as a previously recorded node.
2682 unsigned RecNo = MatcherTable[MatcherIndex++];
2683 assert(RecNo < RecordedNodes.size() && "Invalid CheckSame");
2684 return N == RecordedNodes[RecNo].first;
2685}
2686
2687/// CheckChildSame - Implements OP_CheckChildXSame.
2688LLVM_ATTRIBUTE_ALWAYS_INLINE static bool CheckChildSame(
2689 const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N,
2690 const SmallVectorImpl<std::pair<SDValue, SDNode *>> &RecordedNodes,
2691 unsigned ChildNo) {
2692 if (ChildNo >= N.getNumOperands())
2693 return false; // Match fails if out of range child #.
2694 return ::CheckSame(MatcherTable, MatcherIndex, N: N.getOperand(i: ChildNo),
2695 RecordedNodes);
2696}
2697
2698/// CheckPatternPredicate - Implements OP_CheckPatternPredicate.
2699LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
2700CheckPatternPredicate(unsigned Opcode, const unsigned char *MatcherTable,
2701 unsigned &MatcherIndex, const SelectionDAGISel &SDISel) {
2702 bool TwoBytePredNo =
2703 Opcode == SelectionDAGISel::OPC_CheckPatternPredicateTwoByte;
2704 unsigned PredNo =
2705 TwoBytePredNo || Opcode == SelectionDAGISel::OPC_CheckPatternPredicate
2706 ? MatcherTable[MatcherIndex++]
2707 : Opcode - SelectionDAGISel::OPC_CheckPatternPredicate0;
2708 if (TwoBytePredNo)
2709 PredNo |= MatcherTable[MatcherIndex++] << 8;
2710 return SDISel.CheckPatternPredicate(PredNo);
2711}
2712
2713/// CheckNodePredicate - Implements OP_CheckNodePredicate.
2714LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
2715CheckNodePredicate(unsigned Opcode, const unsigned char *MatcherTable,
2716 unsigned &MatcherIndex, const SelectionDAGISel &SDISel,
2717 SDNode *N) {
2718 unsigned PredNo = Opcode == SelectionDAGISel::OPC_CheckPredicate
2719 ? MatcherTable[MatcherIndex++]
2720 : Opcode - SelectionDAGISel::OPC_CheckPredicate0;
2721 return SDISel.CheckNodePredicate(N, PredNo);
2722}
2723
2724LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
2725CheckOpcode(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2726 SDNode *N) {
2727 uint16_t Opc = MatcherTable[MatcherIndex++];
2728 Opc |= static_cast<uint16_t>(MatcherTable[MatcherIndex++]) << 8;
2729 return N->getOpcode() == Opc;
2730}
2731
2732LLVM_ATTRIBUTE_ALWAYS_INLINE static bool CheckType(MVT::SimpleValueType VT,
2733 SDValue N,
2734 const TargetLowering *TLI,
2735 const DataLayout &DL) {
2736 if (N.getValueType() == VT)
2737 return true;
2738
2739 // Handle the case when VT is iPTR.
2740 return VT == MVT::iPTR && N.getValueType() == TLI->getPointerTy(DL);
2741}
2742
2743LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
2744CheckChildType(MVT::SimpleValueType VT, SDValue N, const TargetLowering *TLI,
2745 const DataLayout &DL, unsigned ChildNo) {
2746 if (ChildNo >= N.getNumOperands())
2747 return false; // Match fails if out of range child #.
2748 return ::CheckType(VT, N: N.getOperand(i: ChildNo), TLI, DL);
2749}
2750
2751LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
2752CheckCondCode(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2753 SDValue N) {
2754 return cast<CondCodeSDNode>(Val&: N)->get() ==
2755 static_cast<ISD::CondCode>(MatcherTable[MatcherIndex++]);
2756}
2757
2758LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
2759CheckChild2CondCode(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2760 SDValue N) {
2761 if (2 >= N.getNumOperands())
2762 return false;
2763 return ::CheckCondCode(MatcherTable, MatcherIndex, N: N.getOperand(i: 2));
2764}
2765
2766LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
2767CheckValueType(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2768 SDValue N, const TargetLowering *TLI, const DataLayout &DL) {
2769 MVT::SimpleValueType VT =
2770 static_cast<MVT::SimpleValueType>(MatcherTable[MatcherIndex++]);
2771 if (cast<VTSDNode>(Val&: N)->getVT() == VT)
2772 return true;
2773
2774 // Handle the case when VT is iPTR.
2775 return VT == MVT::iPTR && cast<VTSDNode>(Val&: N)->getVT() == TLI->getPointerTy(DL);
2776}
2777
2778// Bit 0 stores the sign of the immediate. The upper bits contain the magnitude
2779// shifted left by 1.
2780static uint64_t decodeSignRotatedValue(uint64_t V) {
2781 if ((V & 1) == 0)
2782 return V >> 1;
2783 if (V != 1)
2784 return -(V >> 1);
2785 // There is no such thing as -0 with integers. "-0" really means MININT.
2786 return 1ULL << 63;
2787}
2788
2789LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
2790CheckInteger(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2791 SDValue N) {
2792 int64_t Val = MatcherTable[MatcherIndex++];
2793 if (Val & 128)
2794 Val = GetVBR(Val, MatcherTable, Idx&: MatcherIndex);
2795
2796 Val = decodeSignRotatedValue(V: Val);
2797
2798 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val&: N);
2799 return C && C->getAPIntValue().trySExtValue() == Val;
2800}
2801
2802LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
2803CheckChildInteger(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2804 SDValue N, unsigned ChildNo) {
2805 if (ChildNo >= N.getNumOperands())
2806 return false; // Match fails if out of range child #.
2807 return ::CheckInteger(MatcherTable, MatcherIndex, N: N.getOperand(i: ChildNo));
2808}
2809
2810LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
2811CheckAndImm(const unsigned char *MatcherTable, unsigned &MatcherIndex,
2812 SDValue N, const SelectionDAGISel &SDISel) {
2813 int64_t Val = MatcherTable[MatcherIndex++];
2814 if (Val & 128)
2815 Val = GetVBR(Val, MatcherTable, Idx&: MatcherIndex);
2816
2817 if (N->getOpcode() != ISD::AND) return false;
2818
2819 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val: N->getOperand(Num: 1));
2820 return C && SDISel.CheckAndMask(LHS: N.getOperand(i: 0), RHS: C, DesiredMaskS: Val);
2821}
2822
2823LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
2824CheckOrImm(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N,
2825 const SelectionDAGISel &SDISel) {
2826 int64_t Val = MatcherTable[MatcherIndex++];
2827 if (Val & 128)
2828 Val = GetVBR(Val, MatcherTable, Idx&: MatcherIndex);
2829
2830 if (N->getOpcode() != ISD::OR) return false;
2831
2832 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val: N->getOperand(Num: 1));
2833 return C && SDISel.CheckOrMask(LHS: N.getOperand(i: 0), RHS: C, DesiredMaskS: Val);
2834}
2835
2836/// IsPredicateKnownToFail - If we know how and can do so without pushing a
2837/// scope, evaluate the current node. If the current predicate is known to
2838/// fail, set Result=true and return anything. If the current predicate is
2839/// known to pass, set Result=false and return the MatcherIndex to continue
2840/// with. If the current predicate is unknown, set Result=false and return the
2841/// MatcherIndex to continue with.
2842static unsigned IsPredicateKnownToFail(const unsigned char *Table,
2843 unsigned Index, SDValue N,
2844 bool &Result,
2845 const SelectionDAGISel &SDISel,
2846 SmallVectorImpl<std::pair<SDValue, SDNode*>> &RecordedNodes) {
2847 unsigned Opcode = Table[Index++];
2848 switch (Opcode) {
2849 default:
2850 Result = false;
2851 return Index-1; // Could not evaluate this predicate.
2852 case SelectionDAGISel::OPC_CheckSame:
2853 Result = !::CheckSame(MatcherTable: Table, MatcherIndex&: Index, N, RecordedNodes);
2854 return Index;
2855 case SelectionDAGISel::OPC_CheckChild0Same:
2856 case SelectionDAGISel::OPC_CheckChild1Same:
2857 case SelectionDAGISel::OPC_CheckChild2Same:
2858 case SelectionDAGISel::OPC_CheckChild3Same:
2859 Result = !::CheckChildSame(MatcherTable: Table, MatcherIndex&: Index, N, RecordedNodes,
2860 ChildNo: Table[Index-1] - SelectionDAGISel::OPC_CheckChild0Same);
2861 return Index;
2862 case SelectionDAGISel::OPC_CheckPatternPredicate:
2863 case SelectionDAGISel::OPC_CheckPatternPredicate0:
2864 case SelectionDAGISel::OPC_CheckPatternPredicate1:
2865 case SelectionDAGISel::OPC_CheckPatternPredicate2:
2866 case SelectionDAGISel::OPC_CheckPatternPredicate3:
2867 case SelectionDAGISel::OPC_CheckPatternPredicate4:
2868 case SelectionDAGISel::OPC_CheckPatternPredicate5:
2869 case SelectionDAGISel::OPC_CheckPatternPredicate6:
2870 case SelectionDAGISel::OPC_CheckPatternPredicate7:
2871 case SelectionDAGISel::OPC_CheckPatternPredicateTwoByte:
2872 Result = !::CheckPatternPredicate(Opcode, MatcherTable: Table, MatcherIndex&: Index, SDISel);
2873 return Index;
2874 case SelectionDAGISel::OPC_CheckPredicate:
2875 case SelectionDAGISel::OPC_CheckPredicate0:
2876 case SelectionDAGISel::OPC_CheckPredicate1:
2877 case SelectionDAGISel::OPC_CheckPredicate2:
2878 case SelectionDAGISel::OPC_CheckPredicate3:
2879 case SelectionDAGISel::OPC_CheckPredicate4:
2880 case SelectionDAGISel::OPC_CheckPredicate5:
2881 case SelectionDAGISel::OPC_CheckPredicate6:
2882 case SelectionDAGISel::OPC_CheckPredicate7:
2883 Result = !::CheckNodePredicate(Opcode, MatcherTable: Table, MatcherIndex&: Index, SDISel, N: N.getNode());
2884 return Index;
2885 case SelectionDAGISel::OPC_CheckOpcode:
2886 Result = !::CheckOpcode(MatcherTable: Table, MatcherIndex&: Index, N: N.getNode());
2887 return Index;
2888 case SelectionDAGISel::OPC_CheckType:
2889 case SelectionDAGISel::OPC_CheckTypeI32:
2890 case SelectionDAGISel::OPC_CheckTypeI64: {
2891 MVT::SimpleValueType VT;
2892 switch (Opcode) {
2893 case SelectionDAGISel::OPC_CheckTypeI32:
2894 VT = MVT::i32;
2895 break;
2896 case SelectionDAGISel::OPC_CheckTypeI64:
2897 VT = MVT::i64;
2898 break;
2899 default:
2900 VT = static_cast<MVT::SimpleValueType>(Table[Index++]);
2901 break;
2902 }
2903 Result = !::CheckType(VT, N, TLI: SDISel.TLI, DL: SDISel.CurDAG->getDataLayout());
2904 return Index;
2905 }
2906 case SelectionDAGISel::OPC_CheckTypeRes: {
2907 unsigned Res = Table[Index++];
2908 Result = !::CheckType(VT: static_cast<MVT::SimpleValueType>(Table[Index++]),
2909 N: N.getValue(R: Res), TLI: SDISel.TLI,
2910 DL: SDISel.CurDAG->getDataLayout());
2911 return Index;
2912 }
2913 case SelectionDAGISel::OPC_CheckChild0Type:
2914 case SelectionDAGISel::OPC_CheckChild1Type:
2915 case SelectionDAGISel::OPC_CheckChild2Type:
2916 case SelectionDAGISel::OPC_CheckChild3Type:
2917 case SelectionDAGISel::OPC_CheckChild4Type:
2918 case SelectionDAGISel::OPC_CheckChild5Type:
2919 case SelectionDAGISel::OPC_CheckChild6Type:
2920 case SelectionDAGISel::OPC_CheckChild7Type:
2921 case SelectionDAGISel::OPC_CheckChild0TypeI32:
2922 case SelectionDAGISel::OPC_CheckChild1TypeI32:
2923 case SelectionDAGISel::OPC_CheckChild2TypeI32:
2924 case SelectionDAGISel::OPC_CheckChild3TypeI32:
2925 case SelectionDAGISel::OPC_CheckChild4TypeI32:
2926 case SelectionDAGISel::OPC_CheckChild5TypeI32:
2927 case SelectionDAGISel::OPC_CheckChild6TypeI32:
2928 case SelectionDAGISel::OPC_CheckChild7TypeI32:
2929 case SelectionDAGISel::OPC_CheckChild0TypeI64:
2930 case SelectionDAGISel::OPC_CheckChild1TypeI64:
2931 case SelectionDAGISel::OPC_CheckChild2TypeI64:
2932 case SelectionDAGISel::OPC_CheckChild3TypeI64:
2933 case SelectionDAGISel::OPC_CheckChild4TypeI64:
2934 case SelectionDAGISel::OPC_CheckChild5TypeI64:
2935 case SelectionDAGISel::OPC_CheckChild6TypeI64:
2936 case SelectionDAGISel::OPC_CheckChild7TypeI64: {
2937 MVT::SimpleValueType VT;
2938 unsigned ChildNo;
2939 if (Opcode >= SelectionDAGISel::OPC_CheckChild0TypeI32 &&
2940 Opcode <= SelectionDAGISel::OPC_CheckChild7TypeI32) {
2941 VT = MVT::i32;
2942 ChildNo = Opcode - SelectionDAGISel::OPC_CheckChild0TypeI32;
2943 } else if (Opcode >= SelectionDAGISel::OPC_CheckChild0TypeI64 &&
2944 Opcode <= SelectionDAGISel::OPC_CheckChild7TypeI64) {
2945 VT = MVT::i64;
2946 ChildNo = Opcode - SelectionDAGISel::OPC_CheckChild0TypeI64;
2947 } else {
2948 VT = static_cast<MVT::SimpleValueType>(Table[Index++]);
2949 ChildNo = Opcode - SelectionDAGISel::OPC_CheckChild0Type;
2950 }
2951 Result = !::CheckChildType(VT, N, TLI: SDISel.TLI,
2952 DL: SDISel.CurDAG->getDataLayout(), ChildNo);
2953 return Index;
2954 }
2955 case SelectionDAGISel::OPC_CheckCondCode:
2956 Result = !::CheckCondCode(MatcherTable: Table, MatcherIndex&: Index, N);
2957 return Index;
2958 case SelectionDAGISel::OPC_CheckChild2CondCode:
2959 Result = !::CheckChild2CondCode(MatcherTable: Table, MatcherIndex&: Index, N);
2960 return Index;
2961 case SelectionDAGISel::OPC_CheckValueType:
2962 Result = !::CheckValueType(MatcherTable: Table, MatcherIndex&: Index, N, TLI: SDISel.TLI,
2963 DL: SDISel.CurDAG->getDataLayout());
2964 return Index;
2965 case SelectionDAGISel::OPC_CheckInteger:
2966 Result = !::CheckInteger(MatcherTable: Table, MatcherIndex&: Index, N);
2967 return Index;
2968 case SelectionDAGISel::OPC_CheckChild0Integer:
2969 case SelectionDAGISel::OPC_CheckChild1Integer:
2970 case SelectionDAGISel::OPC_CheckChild2Integer:
2971 case SelectionDAGISel::OPC_CheckChild3Integer:
2972 case SelectionDAGISel::OPC_CheckChild4Integer:
2973 Result = !::CheckChildInteger(MatcherTable: Table, MatcherIndex&: Index, N,
2974 ChildNo: Table[Index-1] - SelectionDAGISel::OPC_CheckChild0Integer);
2975 return Index;
2976 case SelectionDAGISel::OPC_CheckAndImm:
2977 Result = !::CheckAndImm(MatcherTable: Table, MatcherIndex&: Index, N, SDISel);
2978 return Index;
2979 case SelectionDAGISel::OPC_CheckOrImm:
2980 Result = !::CheckOrImm(MatcherTable: Table, MatcherIndex&: Index, N, SDISel);
2981 return Index;
2982 }
2983}
2984
2985namespace {
2986
2987struct MatchScope {
2988 /// FailIndex - If this match fails, this is the index to continue with.
2989 unsigned FailIndex;
2990
2991 /// NodeStack - The node stack when the scope was formed.
2992 SmallVector<SDValue, 4> NodeStack;
2993
2994 /// NumRecordedNodes - The number of recorded nodes when the scope was formed.
2995 unsigned NumRecordedNodes;
2996
2997 /// NumMatchedMemRefs - The number of matched memref entries.
2998 unsigned NumMatchedMemRefs;
2999
3000 /// InputChain/InputGlue - The current chain/glue
3001 SDValue InputChain, InputGlue;
3002
3003 /// HasChainNodesMatched - True if the ChainNodesMatched list is non-empty.
3004 bool HasChainNodesMatched;
3005};
3006
3007/// \A DAG update listener to keep the matching state
3008/// (i.e. RecordedNodes and MatchScope) uptodate if the target is allowed to
3009/// change the DAG while matching. X86 addressing mode matcher is an example
3010/// for this.
3011class MatchStateUpdater : public SelectionDAG::DAGUpdateListener
3012{
3013 SDNode **NodeToMatch;
3014 SmallVectorImpl<std::pair<SDValue, SDNode *>> &RecordedNodes;
3015 SmallVectorImpl<MatchScope> &MatchScopes;
3016
3017public:
3018 MatchStateUpdater(SelectionDAG &DAG, SDNode **NodeToMatch,
3019 SmallVectorImpl<std::pair<SDValue, SDNode *>> &RN,
3020 SmallVectorImpl<MatchScope> &MS)
3021 : SelectionDAG::DAGUpdateListener(DAG), NodeToMatch(NodeToMatch),
3022 RecordedNodes(RN), MatchScopes(MS) {}
3023
3024 void NodeDeleted(SDNode *N, SDNode *E) override {
3025 // Some early-returns here to avoid the search if we deleted the node or
3026 // if the update comes from MorphNodeTo (MorphNodeTo is the last thing we
3027 // do, so it's unnecessary to update matching state at that point).
3028 // Neither of these can occur currently because we only install this
3029 // update listener during matching a complex patterns.
3030 if (!E || E->isMachineOpcode())
3031 return;
3032 // Check if NodeToMatch was updated.
3033 if (N == *NodeToMatch)
3034 *NodeToMatch = E;
3035 // Performing linear search here does not matter because we almost never
3036 // run this code. You'd have to have a CSE during complex pattern
3037 // matching.
3038 for (auto &I : RecordedNodes)
3039 if (I.first.getNode() == N)
3040 I.first.setNode(E);
3041
3042 for (auto &I : MatchScopes)
3043 for (auto &J : I.NodeStack)
3044 if (J.getNode() == N)
3045 J.setNode(E);
3046 }
3047};
3048
3049} // end anonymous namespace
3050
3051void SelectionDAGISel::SelectCodeCommon(SDNode *NodeToMatch,
3052 const unsigned char *MatcherTable,
3053 unsigned TableSize) {
3054 // FIXME: Should these even be selected? Handle these cases in the caller?
3055 switch (NodeToMatch->getOpcode()) {
3056 default:
3057 break;
3058 case ISD::EntryToken: // These nodes remain the same.
3059 case ISD::BasicBlock:
3060 case ISD::Register:
3061 case ISD::RegisterMask:
3062 case ISD::HANDLENODE:
3063 case ISD::MDNODE_SDNODE:
3064 case ISD::TargetConstant:
3065 case ISD::TargetConstantFP:
3066 case ISD::TargetConstantPool:
3067 case ISD::TargetFrameIndex:
3068 case ISD::TargetExternalSymbol:
3069 case ISD::MCSymbol:
3070 case ISD::TargetBlockAddress:
3071 case ISD::TargetJumpTable:
3072 case ISD::TargetGlobalTLSAddress:
3073 case ISD::TargetGlobalAddress:
3074 case ISD::TokenFactor:
3075 case ISD::CopyFromReg:
3076 case ISD::CopyToReg:
3077 case ISD::EH_LABEL:
3078 case ISD::ANNOTATION_LABEL:
3079 case ISD::LIFETIME_START:
3080 case ISD::LIFETIME_END:
3081 case ISD::PSEUDO_PROBE:
3082 NodeToMatch->setNodeId(-1); // Mark selected.
3083 return;
3084 case ISD::AssertSext:
3085 case ISD::AssertZext:
3086 case ISD::AssertAlign:
3087 ReplaceUses(F: SDValue(NodeToMatch, 0), T: NodeToMatch->getOperand(Num: 0));
3088 CurDAG->RemoveDeadNode(N: NodeToMatch);
3089 return;
3090 case ISD::INLINEASM:
3091 case ISD::INLINEASM_BR:
3092 Select_INLINEASM(N: NodeToMatch);
3093 return;
3094 case ISD::READ_REGISTER:
3095 Select_READ_REGISTER(Op: NodeToMatch);
3096 return;
3097 case ISD::WRITE_REGISTER:
3098 Select_WRITE_REGISTER(Op: NodeToMatch);
3099 return;
3100 case ISD::UNDEF:
3101 Select_UNDEF(N: NodeToMatch);
3102 return;
3103 case ISD::FREEZE:
3104 Select_FREEZE(N: NodeToMatch);
3105 return;
3106 case ISD::ARITH_FENCE:
3107 Select_ARITH_FENCE(N: NodeToMatch);
3108 return;
3109 case ISD::MEMBARRIER:
3110 Select_MEMBARRIER(N: NodeToMatch);
3111 return;
3112 case ISD::STACKMAP:
3113 Select_STACKMAP(N: NodeToMatch);
3114 return;
3115 case ISD::PATCHPOINT:
3116 Select_PATCHPOINT(N: NodeToMatch);
3117 return;
3118 case ISD::JUMP_TABLE_DEBUG_INFO:
3119 Select_JUMP_TABLE_DEBUG_INFO(N: NodeToMatch);
3120 return;
3121 }
3122
3123 assert(!NodeToMatch->isMachineOpcode() && "Node already selected!");
3124
3125 // Set up the node stack with NodeToMatch as the only node on the stack.
3126 SmallVector<SDValue, 8> NodeStack;
3127 SDValue N = SDValue(NodeToMatch, 0);
3128 NodeStack.push_back(Elt: N);
3129
3130 // MatchScopes - Scopes used when matching, if a match failure happens, this
3131 // indicates where to continue checking.
3132 SmallVector<MatchScope, 8> MatchScopes;
3133
3134 // RecordedNodes - This is the set of nodes that have been recorded by the
3135 // state machine. The second value is the parent of the node, or null if the
3136 // root is recorded.
3137 SmallVector<std::pair<SDValue, SDNode*>, 8> RecordedNodes;
3138
3139 // MatchedMemRefs - This is the set of MemRef's we've seen in the input
3140 // pattern.
3141 SmallVector<MachineMemOperand*, 2> MatchedMemRefs;
3142
3143 // These are the current input chain and glue for use when generating nodes.
3144 // Various Emit operations change these. For example, emitting a copytoreg
3145 // uses and updates these.
3146 SDValue InputChain, InputGlue;
3147
3148 // ChainNodesMatched - If a pattern matches nodes that have input/output
3149 // chains, the OPC_EmitMergeInputChains operation is emitted which indicates
3150 // which ones they are. The result is captured into this list so that we can
3151 // update the chain results when the pattern is complete.
3152 SmallVector<SDNode*, 3> ChainNodesMatched;
3153
3154 LLVM_DEBUG(dbgs() << "ISEL: Starting pattern match\n");
3155
3156 // Determine where to start the interpreter. Normally we start at opcode #0,
3157 // but if the state machine starts with an OPC_SwitchOpcode, then we
3158 // accelerate the first lookup (which is guaranteed to be hot) with the
3159 // OpcodeOffset table.
3160 unsigned MatcherIndex = 0;
3161
3162 if (!OpcodeOffset.empty()) {
3163 // Already computed the OpcodeOffset table, just index into it.
3164 if (N.getOpcode() < OpcodeOffset.size())
3165 MatcherIndex = OpcodeOffset[N.getOpcode()];
3166 LLVM_DEBUG(dbgs() << " Initial Opcode index to " << MatcherIndex << "\n");
3167
3168 } else if (MatcherTable[0] == OPC_SwitchOpcode) {
3169 // Otherwise, the table isn't computed, but the state machine does start
3170 // with an OPC_SwitchOpcode instruction. Populate the table now, since this
3171 // is the first time we're selecting an instruction.
3172 unsigned Idx = 1;
3173 while (true) {
3174 // Get the size of this case.
3175 unsigned CaseSize = MatcherTable[Idx++];
3176 if (CaseSize & 128)
3177 CaseSize = GetVBR(Val: CaseSize, MatcherTable, Idx);
3178 if (CaseSize == 0) break;
3179
3180 // Get the opcode, add the index to the table.
3181 uint16_t Opc = MatcherTable[Idx++];
3182 Opc |= static_cast<uint16_t>(MatcherTable[Idx++]) << 8;
3183 if (Opc >= OpcodeOffset.size())
3184 OpcodeOffset.resize(new_size: (Opc+1)*2);
3185 OpcodeOffset[Opc] = Idx;
3186 Idx += CaseSize;
3187 }
3188
3189 // Okay, do the lookup for the first opcode.
3190 if (N.getOpcode() < OpcodeOffset.size())
3191 MatcherIndex = OpcodeOffset[N.getOpcode()];
3192 }
3193
3194 while (true) {
3195 assert(MatcherIndex < TableSize && "Invalid index");
3196#ifndef NDEBUG
3197 unsigned CurrentOpcodeIndex = MatcherIndex;
3198#endif
3199 BuiltinOpcodes Opcode =
3200 static_cast<BuiltinOpcodes>(MatcherTable[MatcherIndex++]);
3201 switch (Opcode) {
3202 case OPC_Scope: {
3203 // Okay, the semantics of this operation are that we should push a scope
3204 // then evaluate the first child. However, pushing a scope only to have
3205 // the first check fail (which then pops it) is inefficient. If we can
3206 // determine immediately that the first check (or first several) will
3207 // immediately fail, don't even bother pushing a scope for them.
3208 unsigned FailIndex;
3209
3210 while (true) {
3211 unsigned NumToSkip = MatcherTable[MatcherIndex++];
3212 if (NumToSkip & 128)
3213 NumToSkip = GetVBR(Val: NumToSkip, MatcherTable, Idx&: MatcherIndex);
3214 // Found the end of the scope with no match.
3215 if (NumToSkip == 0) {
3216 FailIndex = 0;
3217 break;
3218 }
3219
3220 FailIndex = MatcherIndex+NumToSkip;
3221
3222 unsigned MatcherIndexOfPredicate = MatcherIndex;
3223 (void)MatcherIndexOfPredicate; // silence warning.
3224
3225 // If we can't evaluate this predicate without pushing a scope (e.g. if
3226 // it is a 'MoveParent') or if the predicate succeeds on this node, we
3227 // push the scope and evaluate the full predicate chain.
3228 bool Result;
3229 MatcherIndex = IsPredicateKnownToFail(Table: MatcherTable, Index: MatcherIndex, N,
3230 Result, SDISel: *this, RecordedNodes);
3231 if (!Result)
3232 break;
3233
3234 LLVM_DEBUG(
3235 dbgs() << " Skipped scope entry (due to false predicate) at "
3236 << "index " << MatcherIndexOfPredicate << ", continuing at "
3237 << FailIndex << "\n");
3238 ++NumDAGIselRetries;
3239
3240 // Otherwise, we know that this case of the Scope is guaranteed to fail,
3241 // move to the next case.
3242 MatcherIndex = FailIndex;
3243 }
3244
3245 // If the whole scope failed to match, bail.
3246 if (FailIndex == 0) break;
3247
3248 // Push a MatchScope which indicates where to go if the first child fails
3249 // to match.
3250 MatchScope NewEntry;
3251 NewEntry.FailIndex = FailIndex;
3252 NewEntry.NodeStack.append(in_start: NodeStack.begin(), in_end: NodeStack.end());
3253 NewEntry.NumRecordedNodes = RecordedNodes.size();
3254 NewEntry.NumMatchedMemRefs = MatchedMemRefs.size();
3255 NewEntry.InputChain = InputChain;
3256 NewEntry.InputGlue = InputGlue;
3257 NewEntry.HasChainNodesMatched = !ChainNodesMatched.empty();
3258 MatchScopes.push_back(Elt: NewEntry);
3259 continue;
3260 }
3261 case OPC_RecordNode: {
3262 // Remember this node, it may end up being an operand in the pattern.
3263 SDNode *Parent = nullptr;
3264 if (NodeStack.size() > 1)
3265 Parent = NodeStack[NodeStack.size()-2].getNode();
3266 RecordedNodes.push_back(Elt: std::make_pair(x&: N, y&: Parent));
3267 continue;
3268 }
3269
3270 case OPC_RecordChild0: case OPC_RecordChild1:
3271 case OPC_RecordChild2: case OPC_RecordChild3:
3272 case OPC_RecordChild4: case OPC_RecordChild5:
3273 case OPC_RecordChild6: case OPC_RecordChild7: {
3274 unsigned ChildNo = Opcode-OPC_RecordChild0;
3275 if (ChildNo >= N.getNumOperands())
3276 break; // Match fails if out of range child #.
3277
3278 RecordedNodes.push_back(Elt: std::make_pair(x: N->getOperand(Num: ChildNo),
3279 y: N.getNode()));
3280 continue;
3281 }
3282 case OPC_RecordMemRef:
3283 if (auto *MN = dyn_cast<MemSDNode>(Val&: N))
3284 MatchedMemRefs.push_back(Elt: MN->getMemOperand());
3285 else {
3286 LLVM_DEBUG(dbgs() << "Expected MemSDNode "; N->dump(CurDAG);
3287 dbgs() << '\n');
3288 }
3289
3290 continue;
3291
3292 case OPC_CaptureGlueInput:
3293 // If the current node has an input glue, capture it in InputGlue.
3294 if (N->getNumOperands() != 0 &&
3295 N->getOperand(Num: N->getNumOperands()-1).getValueType() == MVT::Glue)
3296 InputGlue = N->getOperand(Num: N->getNumOperands()-1);
3297 continue;
3298
3299 case OPC_MoveChild: {
3300 unsigned ChildNo = MatcherTable[MatcherIndex++];
3301 if (ChildNo >= N.getNumOperands())
3302 break; // Match fails if out of range child #.
3303 N = N.getOperand(i: ChildNo);
3304 NodeStack.push_back(Elt: N);
3305 continue;
3306 }
3307
3308 case OPC_MoveChild0: case OPC_MoveChild1:
3309 case OPC_MoveChild2: case OPC_MoveChild3:
3310 case OPC_MoveChild4: case OPC_MoveChild5:
3311 case OPC_MoveChild6: case OPC_MoveChild7: {
3312 unsigned ChildNo = Opcode-OPC_MoveChild0;
3313 if (ChildNo >= N.getNumOperands())
3314 break; // Match fails if out of range child #.
3315 N = N.getOperand(i: ChildNo);
3316 NodeStack.push_back(Elt: N);
3317 continue;
3318 }
3319
3320 case OPC_MoveSibling:
3321 case OPC_MoveSibling0:
3322 case OPC_MoveSibling1:
3323 case OPC_MoveSibling2:
3324 case OPC_MoveSibling3:
3325 case OPC_MoveSibling4:
3326 case OPC_MoveSibling5:
3327 case OPC_MoveSibling6:
3328 case OPC_MoveSibling7: {
3329 // Pop the current node off the NodeStack.
3330 NodeStack.pop_back();
3331 assert(!NodeStack.empty() && "Node stack imbalance!");
3332 N = NodeStack.back();
3333
3334 unsigned SiblingNo = Opcode == OPC_MoveSibling
3335 ? MatcherTable[MatcherIndex++]
3336 : Opcode - OPC_MoveSibling0;
3337 if (SiblingNo >= N.getNumOperands())
3338 break; // Match fails if out of range sibling #.
3339 N = N.getOperand(i: SiblingNo);
3340 NodeStack.push_back(Elt: N);
3341 continue;
3342 }
3343 case OPC_MoveParent:
3344 // Pop the current node off the NodeStack.
3345 NodeStack.pop_back();
3346 assert(!NodeStack.empty() && "Node stack imbalance!");
3347 N = NodeStack.back();
3348 continue;
3349
3350 case OPC_CheckSame:
3351 if (!::CheckSame(MatcherTable, MatcherIndex, N, RecordedNodes)) break;
3352 continue;
3353
3354 case OPC_CheckChild0Same: case OPC_CheckChild1Same:
3355 case OPC_CheckChild2Same: case OPC_CheckChild3Same:
3356 if (!::CheckChildSame(MatcherTable, MatcherIndex, N, RecordedNodes,
3357 ChildNo: Opcode-OPC_CheckChild0Same))
3358 break;
3359 continue;
3360
3361 case OPC_CheckPatternPredicate:
3362 case OPC_CheckPatternPredicate0:
3363 case OPC_CheckPatternPredicate1:
3364 case OPC_CheckPatternPredicate2:
3365 case OPC_CheckPatternPredicate3:
3366 case OPC_CheckPatternPredicate4:
3367 case OPC_CheckPatternPredicate5:
3368 case OPC_CheckPatternPredicate6:
3369 case OPC_CheckPatternPredicate7:
3370 case OPC_CheckPatternPredicateTwoByte:
3371 if (!::CheckPatternPredicate(Opcode, MatcherTable, MatcherIndex, SDISel: *this))
3372 break;
3373 continue;
3374 case SelectionDAGISel::OPC_CheckPredicate0:
3375 case SelectionDAGISel::OPC_CheckPredicate1:
3376 case SelectionDAGISel::OPC_CheckPredicate2:
3377 case SelectionDAGISel::OPC_CheckPredicate3:
3378 case SelectionDAGISel::OPC_CheckPredicate4:
3379 case SelectionDAGISel::OPC_CheckPredicate5:
3380 case SelectionDAGISel::OPC_CheckPredicate6:
3381 case SelectionDAGISel::OPC_CheckPredicate7:
3382 case OPC_CheckPredicate:
3383 if (!::CheckNodePredicate(Opcode, MatcherTable, MatcherIndex, SDISel: *this,
3384 N: N.getNode()))
3385 break;
3386 continue;
3387 case OPC_CheckPredicateWithOperands: {
3388 unsigned OpNum = MatcherTable[MatcherIndex++];
3389 SmallVector<SDValue, 8> Operands;
3390
3391 for (unsigned i = 0; i < OpNum; ++i)
3392 Operands.push_back(Elt: RecordedNodes[MatcherTable[MatcherIndex++]].first);
3393
3394 unsigned PredNo = MatcherTable[MatcherIndex++];
3395 if (!CheckNodePredicateWithOperands(N: N.getNode(), PredNo, Operands))
3396 break;
3397 continue;
3398 }
3399 case OPC_CheckComplexPat:
3400 case OPC_CheckComplexPat0:
3401 case OPC_CheckComplexPat1:
3402 case OPC_CheckComplexPat2:
3403 case OPC_CheckComplexPat3:
3404 case OPC_CheckComplexPat4:
3405 case OPC_CheckComplexPat5:
3406 case OPC_CheckComplexPat6:
3407 case OPC_CheckComplexPat7: {
3408 unsigned CPNum = Opcode == OPC_CheckComplexPat
3409 ? MatcherTable[MatcherIndex++]
3410 : Opcode - OPC_CheckComplexPat0;
3411 unsigned RecNo = MatcherTable[MatcherIndex++];
3412 assert(RecNo < RecordedNodes.size() && "Invalid CheckComplexPat");
3413
3414 // If target can modify DAG during matching, keep the matching state
3415 // consistent.
3416 std::unique_ptr<MatchStateUpdater> MSU;
3417 if (ComplexPatternFuncMutatesDAG())
3418 MSU.reset(p: new MatchStateUpdater(*CurDAG, &NodeToMatch, RecordedNodes,
3419 MatchScopes));
3420
3421 if (!CheckComplexPattern(Root: NodeToMatch, Parent: RecordedNodes[RecNo].second,
3422 N: RecordedNodes[RecNo].first, PatternNo: CPNum,
3423 Result&: RecordedNodes))
3424 break;
3425 continue;
3426 }
3427 case OPC_CheckOpcode:
3428 if (!::CheckOpcode(MatcherTable, MatcherIndex, N: N.getNode())) break;
3429 continue;
3430
3431 case OPC_CheckType:
3432 case OPC_CheckTypeI32:
3433 case OPC_CheckTypeI64:
3434 MVT::SimpleValueType VT;
3435 switch (Opcode) {
3436 case OPC_CheckTypeI32:
3437 VT = MVT::i32;
3438 break;
3439 case OPC_CheckTypeI64:
3440 VT = MVT::i64;
3441 break;
3442 default:
3443 VT = static_cast<MVT::SimpleValueType>(MatcherTable[MatcherIndex++]);
3444 break;
3445 }
3446 if (!::CheckType(VT, N, TLI, DL: CurDAG->getDataLayout()))
3447 break;
3448 continue;
3449
3450 case OPC_CheckTypeRes: {
3451 unsigned Res = MatcherTable[MatcherIndex++];
3452 if (!::CheckType(
3453 VT: static_cast<MVT::SimpleValueType>(MatcherTable[MatcherIndex++]),
3454 N: N.getValue(R: Res), TLI, DL: CurDAG->getDataLayout()))
3455 break;
3456 continue;
3457 }
3458
3459 case OPC_SwitchOpcode: {
3460 unsigned CurNodeOpcode = N.getOpcode();
3461 unsigned SwitchStart = MatcherIndex-1; (void)SwitchStart;
3462 unsigned CaseSize;
3463 while (true) {
3464 // Get the size of this case.
3465 CaseSize = MatcherTable[MatcherIndex++];
3466 if (CaseSize & 128)
3467 CaseSize = GetVBR(Val: CaseSize, MatcherTable, Idx&: MatcherIndex);
3468 if (CaseSize == 0) break;
3469
3470 uint16_t Opc = MatcherTable[MatcherIndex++];
3471 Opc |= static_cast<uint16_t>(MatcherTable[MatcherIndex++]) << 8;
3472
3473 // If the opcode matches, then we will execute this case.
3474 if (CurNodeOpcode == Opc)
3475 break;
3476
3477 // Otherwise, skip over this case.
3478 MatcherIndex += CaseSize;
3479 }
3480
3481 // If no cases matched, bail out.
3482 if (CaseSize == 0) break;
3483
3484 // Otherwise, execute the case we found.
3485 LLVM_DEBUG(dbgs() << " OpcodeSwitch from " << SwitchStart << " to "
3486 << MatcherIndex << "\n");
3487 continue;
3488 }
3489
3490 case OPC_SwitchType: {
3491 MVT CurNodeVT = N.getSimpleValueType();
3492 unsigned SwitchStart = MatcherIndex-1; (void)SwitchStart;
3493 unsigned CaseSize;
3494 while (true) {
3495 // Get the size of this case.
3496 CaseSize = MatcherTable[MatcherIndex++];
3497 if (CaseSize & 128)
3498 CaseSize = GetVBR(Val: CaseSize, MatcherTable, Idx&: MatcherIndex);
3499 if (CaseSize == 0) break;
3500
3501 MVT CaseVT =
3502 static_cast<MVT::SimpleValueType>(MatcherTable[MatcherIndex++]);
3503 if (CaseVT == MVT::iPTR)
3504 CaseVT = TLI->getPointerTy(DL: CurDAG->getDataLayout());
3505
3506 // If the VT matches, then we will execute this case.
3507 if (CurNodeVT == CaseVT)
3508 break;
3509
3510 // Otherwise, skip over this case.
3511 MatcherIndex += CaseSize;
3512 }
3513
3514 // If no cases matched, bail out.
3515 if (CaseSize == 0) break;
3516
3517 // Otherwise, execute the case we found.
3518 LLVM_DEBUG(dbgs() << " TypeSwitch[" << CurNodeVT
3519 << "] from " << SwitchStart << " to " << MatcherIndex
3520 << '\n');
3521 continue;
3522 }
3523 case OPC_CheckChild0Type:
3524 case OPC_CheckChild1Type:
3525 case OPC_CheckChild2Type:
3526 case OPC_CheckChild3Type:
3527 case OPC_CheckChild4Type:
3528 case OPC_CheckChild5Type:
3529 case OPC_CheckChild6Type:
3530 case OPC_CheckChild7Type:
3531 case OPC_CheckChild0TypeI32:
3532 case OPC_CheckChild1TypeI32:
3533 case OPC_CheckChild2TypeI32:
3534 case OPC_CheckChild3TypeI32:
3535 case OPC_CheckChild4TypeI32:
3536 case OPC_CheckChild5TypeI32:
3537 case OPC_CheckChild6TypeI32:
3538 case OPC_CheckChild7TypeI32:
3539 case OPC_CheckChild0TypeI64:
3540 case OPC_CheckChild1TypeI64:
3541 case OPC_CheckChild2TypeI64:
3542 case OPC_CheckChild3TypeI64:
3543 case OPC_CheckChild4TypeI64:
3544 case OPC_CheckChild5TypeI64:
3545 case OPC_CheckChild6TypeI64:
3546 case OPC_CheckChild7TypeI64: {
3547 MVT::SimpleValueType VT;
3548 unsigned ChildNo;
3549 if (Opcode >= SelectionDAGISel::OPC_CheckChild0TypeI32 &&
3550 Opcode <= SelectionDAGISel::OPC_CheckChild7TypeI32) {
3551 VT = MVT::i32;
3552 ChildNo = Opcode - SelectionDAGISel::OPC_CheckChild0TypeI32;
3553 } else if (Opcode >= SelectionDAGISel::OPC_CheckChild0TypeI64 &&
3554 Opcode <= SelectionDAGISel::OPC_CheckChild7TypeI64) {
3555 VT = MVT::i64;
3556 ChildNo = Opcode - SelectionDAGISel::OPC_CheckChild0TypeI64;
3557 } else {
3558 VT = static_cast<MVT::SimpleValueType>(MatcherTable[MatcherIndex++]);
3559 ChildNo = Opcode - SelectionDAGISel::OPC_CheckChild0Type;
3560 }
3561 if (!::CheckChildType(VT, N, TLI, DL: CurDAG->getDataLayout(), ChildNo))
3562 break;
3563 continue;
3564 }
3565 case OPC_CheckCondCode:
3566 if (!::CheckCondCode(MatcherTable, MatcherIndex, N)) break;
3567 continue;
3568 case OPC_CheckChild2CondCode:
3569 if (!::CheckChild2CondCode(MatcherTable, MatcherIndex, N)) break;
3570 continue;
3571 case OPC_CheckValueType:
3572 if (!::CheckValueType(MatcherTable, MatcherIndex, N, TLI,
3573 DL: CurDAG->getDataLayout()))
3574 break;
3575 continue;
3576 case OPC_CheckInteger:
3577 if (!::CheckInteger(MatcherTable, MatcherIndex, N)) break;
3578 continue;
3579 case OPC_CheckChild0Integer: case OPC_CheckChild1Integer:
3580 case OPC_CheckChild2Integer: case OPC_CheckChild3Integer:
3581 case OPC_CheckChild4Integer:
3582 if (!::CheckChildInteger(MatcherTable, MatcherIndex, N,
3583 ChildNo: Opcode-OPC_CheckChild0Integer)) break;
3584 continue;
3585 case OPC_CheckAndImm:
3586 if (!::CheckAndImm(MatcherTable, MatcherIndex, N, SDISel: *this)) break;
3587 continue;
3588 case OPC_CheckOrImm:
3589 if (!::CheckOrImm(MatcherTable, MatcherIndex, N, SDISel: *this)) break;
3590 continue;
3591 case OPC_CheckImmAllOnesV:
3592 if (!ISD::isConstantSplatVectorAllOnes(N: N.getNode()))
3593 break;
3594 continue;
3595 case OPC_CheckImmAllZerosV:
3596 if (!ISD::isConstantSplatVectorAllZeros(N: N.getNode()))
3597 break;
3598 continue;
3599
3600 case OPC_CheckFoldableChainNode: {
3601 assert(NodeStack.size() != 1 && "No parent node");
3602 // Verify that all intermediate nodes between the root and this one have
3603 // a single use (ignoring chains, which are handled in UpdateChains).
3604 bool HasMultipleUses = false;
3605 for (unsigned i = 1, e = NodeStack.size()-1; i != e; ++i) {
3606 unsigned NNonChainUses = 0;
3607 SDNode *NS = NodeStack[i].getNode();
3608 for (auto UI = NS->use_begin(), UE = NS->use_end(); UI != UE; ++UI)
3609 if (UI.getUse().getValueType() != MVT::Other)
3610 if (++NNonChainUses > 1) {
3611 HasMultipleUses = true;
3612 break;
3613 }
3614 if (HasMultipleUses) break;
3615 }
3616 if (HasMultipleUses) break;
3617
3618 // Check to see that the target thinks this is profitable to fold and that
3619 // we can fold it without inducing cycles in the graph.
3620 if (!IsProfitableToFold(N, U: NodeStack[NodeStack.size()-2].getNode(),
3621 Root: NodeToMatch) ||
3622 !IsLegalToFold(N, U: NodeStack[NodeStack.size()-2].getNode(),
3623 Root: NodeToMatch, OptLevel,
3624 IgnoreChains: true/*We validate our own chains*/))
3625 break;
3626
3627 continue;
3628 }
3629 case OPC_EmitInteger:
3630 case OPC_EmitInteger8:
3631 case OPC_EmitInteger16:
3632 case OPC_EmitInteger32:
3633 case OPC_EmitInteger64:
3634 case OPC_EmitStringInteger:
3635 case OPC_EmitStringInteger32: {
3636 MVT::SimpleValueType VT;
3637 switch (Opcode) {
3638 case OPC_EmitInteger8:
3639 VT = MVT::i8;
3640 break;
3641 case OPC_EmitInteger16:
3642 VT = MVT::i16;
3643 break;
3644 case OPC_EmitInteger32:
3645 case OPC_EmitStringInteger32:
3646 VT = MVT::i32;
3647 break;
3648 case OPC_EmitInteger64:
3649 VT = MVT::i64;
3650 break;
3651 default:
3652 VT = static_cast<MVT::SimpleValueType>(MatcherTable[MatcherIndex++]);
3653 break;
3654 }
3655 int64_t Val = MatcherTable[MatcherIndex++];
3656 if (Val & 128)
3657 Val = GetVBR(Val, MatcherTable, Idx&: MatcherIndex);
3658 if (Opcode >= OPC_EmitInteger && Opcode <= OPC_EmitInteger64)
3659 Val = decodeSignRotatedValue(V: Val);
3660 RecordedNodes.push_back(Elt: std::pair<SDValue, SDNode *>(
3661 CurDAG->getTargetConstant(Val, DL: SDLoc(NodeToMatch), VT), nullptr));
3662 continue;
3663 }
3664 case OPC_EmitRegister:
3665 case OPC_EmitRegisterI32:
3666 case OPC_EmitRegisterI64: {
3667 MVT::SimpleValueType VT;
3668 switch (Opcode) {
3669 case OPC_EmitRegisterI32:
3670 VT = MVT::i32;
3671 break;
3672 case OPC_EmitRegisterI64:
3673 VT = MVT::i64;
3674 break;
3675 default:
3676 VT = static_cast<MVT::SimpleValueType>(MatcherTable[MatcherIndex++]);
3677 break;
3678 }
3679 unsigned RegNo = MatcherTable[MatcherIndex++];
3680 RecordedNodes.push_back(Elt: std::pair<SDValue, SDNode *>(
3681 CurDAG->getRegister(Reg: RegNo, VT), nullptr));
3682 continue;
3683 }
3684 case OPC_EmitRegister2: {
3685 // For targets w/ more than 256 register names, the register enum
3686 // values are stored in two bytes in the matcher table (just like
3687 // opcodes).
3688 MVT::SimpleValueType VT =
3689 static_cast<MVT::SimpleValueType>(MatcherTable[MatcherIndex++]);
3690 unsigned RegNo = MatcherTable[MatcherIndex++];
3691 RegNo |= MatcherTable[MatcherIndex++] << 8;
3692 RecordedNodes.push_back(Elt: std::pair<SDValue, SDNode*>(
3693 CurDAG->getRegister(Reg: RegNo, VT), nullptr));
3694 continue;
3695 }
3696
3697 case OPC_EmitConvertToTarget:
3698 case OPC_EmitConvertToTarget0:
3699 case OPC_EmitConvertToTarget1:
3700 case OPC_EmitConvertToTarget2:
3701 case OPC_EmitConvertToTarget3:
3702 case OPC_EmitConvertToTarget4:
3703 case OPC_EmitConvertToTarget5:
3704 case OPC_EmitConvertToTarget6:
3705 case OPC_EmitConvertToTarget7: {
3706 // Convert from IMM/FPIMM to target version.
3707 unsigned RecNo = Opcode == OPC_EmitConvertToTarget
3708 ? MatcherTable[MatcherIndex++]
3709 : Opcode - OPC_EmitConvertToTarget0;
3710 assert(RecNo < RecordedNodes.size() && "Invalid EmitConvertToTarget");
3711 SDValue Imm = RecordedNodes[RecNo].first;
3712
3713 if (Imm->getOpcode() == ISD::Constant) {
3714 const ConstantInt *Val=cast<ConstantSDNode>(Val&: Imm)->getConstantIntValue();
3715 Imm = CurDAG->getTargetConstant(Val: *Val, DL: SDLoc(NodeToMatch),
3716 VT: Imm.getValueType());
3717 } else if (Imm->getOpcode() == ISD::ConstantFP) {
3718 const ConstantFP *Val=cast<ConstantFPSDNode>(Val&: Imm)->getConstantFPValue();
3719 Imm = CurDAG->getTargetConstantFP(Val: *Val, DL: SDLoc(NodeToMatch),
3720 VT: Imm.getValueType());
3721 }
3722
3723 RecordedNodes.push_back(Elt: std::make_pair(x&: Imm, y&: RecordedNodes[RecNo].second));
3724 continue;
3725 }
3726
3727 case OPC_EmitMergeInputChains1_0: // OPC_EmitMergeInputChains, 1, 0
3728 case OPC_EmitMergeInputChains1_1: // OPC_EmitMergeInputChains, 1, 1
3729 case OPC_EmitMergeInputChains1_2: { // OPC_EmitMergeInputChains, 1, 2
3730 // These are space-optimized forms of OPC_EmitMergeInputChains.
3731 assert(!InputChain.getNode() &&
3732 "EmitMergeInputChains should be the first chain producing node");
3733 assert(ChainNodesMatched.empty() &&
3734 "Should only have one EmitMergeInputChains per match");
3735
3736 // Read all of the chained nodes.
3737 unsigned RecNo = Opcode - OPC_EmitMergeInputChains1_0;
3738 assert(RecNo < RecordedNodes.size() && "Invalid EmitMergeInputChains");
3739 ChainNodesMatched.push_back(Elt: RecordedNodes[RecNo].first.getNode());
3740
3741 // If the chained node is not the root, we can't fold it if it has
3742 // multiple uses.
3743 // FIXME: What if other value results of the node have uses not matched
3744 // by this pattern?
3745 if (ChainNodesMatched.back() != NodeToMatch &&
3746 !RecordedNodes[RecNo].first.hasOneUse()) {
3747 ChainNodesMatched.clear();
3748 break;
3749 }
3750
3751 // Merge the input chains if they are not intra-pattern references.
3752 InputChain = HandleMergeInputChains(ChainNodesMatched, CurDAG);
3753
3754 if (!InputChain.getNode())
3755 break; // Failed to merge.
3756 continue;
3757 }
3758
3759 case OPC_EmitMergeInputChains: {
3760 assert(!InputChain.getNode() &&
3761 "EmitMergeInputChains should be the first chain producing node");
3762 // This node gets a list of nodes we matched in the input that have
3763 // chains. We want to token factor all of the input chains to these nodes
3764 // together. However, if any of the input chains is actually one of the
3765 // nodes matched in this pattern, then we have an intra-match reference.
3766 // Ignore these because the newly token factored chain should not refer to
3767 // the old nodes.
3768 unsigned NumChains = MatcherTable[MatcherIndex++];
3769 assert(NumChains != 0 && "Can't TF zero chains");
3770
3771 assert(ChainNodesMatched.empty() &&
3772 "Should only have one EmitMergeInputChains per match");
3773
3774 // Read all of the chained nodes.
3775 for (unsigned i = 0; i != NumChains; ++i) {
3776 unsigned RecNo = MatcherTable[MatcherIndex++];
3777 assert(RecNo < RecordedNodes.size() && "Invalid EmitMergeInputChains");
3778 ChainNodesMatched.push_back(Elt: RecordedNodes[RecNo].first.getNode());
3779
3780 // If the chained node is not the root, we can't fold it if it has
3781 // multiple uses.
3782 // FIXME: What if other value results of the node have uses not matched
3783 // by this pattern?
3784 if (ChainNodesMatched.back() != NodeToMatch &&
3785 !RecordedNodes[RecNo].first.hasOneUse()) {
3786 ChainNodesMatched.clear();
3787 break;
3788 }
3789 }
3790
3791 // If the inner loop broke out, the match fails.
3792 if (ChainNodesMatched.empty())
3793 break;
3794
3795 // Merge the input chains if they are not intra-pattern references.
3796 InputChain = HandleMergeInputChains(ChainNodesMatched, CurDAG);
3797
3798 if (!InputChain.getNode())
3799 break; // Failed to merge.
3800
3801 continue;
3802 }
3803
3804 case OPC_EmitCopyToReg:
3805 case OPC_EmitCopyToReg0:
3806 case OPC_EmitCopyToReg1:
3807 case OPC_EmitCopyToReg2:
3808 case OPC_EmitCopyToReg3:
3809 case OPC_EmitCopyToReg4:
3810 case OPC_EmitCopyToReg5:
3811 case OPC_EmitCopyToReg6:
3812 case OPC_EmitCopyToReg7:
3813 case OPC_EmitCopyToRegTwoByte: {
3814 unsigned RecNo =
3815 Opcode >= OPC_EmitCopyToReg0 && Opcode <= OPC_EmitCopyToReg7
3816 ? Opcode - OPC_EmitCopyToReg0
3817 : MatcherTable[MatcherIndex++];
3818 assert(RecNo < RecordedNodes.size() && "Invalid EmitCopyToReg");
3819 unsigned DestPhysReg = MatcherTable[MatcherIndex++];
3820 if (Opcode == OPC_EmitCopyToRegTwoByte)
3821 DestPhysReg |= MatcherTable[MatcherIndex++] << 8;
3822
3823 if (!InputChain.getNode())
3824 InputChain = CurDAG->getEntryNode();
3825
3826 InputChain = CurDAG->getCopyToReg(Chain: InputChain, dl: SDLoc(NodeToMatch),
3827 Reg: DestPhysReg, N: RecordedNodes[RecNo].first,
3828 Glue: InputGlue);
3829
3830 InputGlue = InputChain.getValue(R: 1);
3831 continue;
3832 }
3833
3834 case OPC_EmitNodeXForm: {
3835 unsigned XFormNo = MatcherTable[MatcherIndex++];
3836 unsigned RecNo = MatcherTable[MatcherIndex++];
3837 assert(RecNo < RecordedNodes.size() && "Invalid EmitNodeXForm");
3838 SDValue Res = RunSDNodeXForm(V: RecordedNodes[RecNo].first, XFormNo);
3839 RecordedNodes.push_back(Elt: std::pair<SDValue,SDNode*>(Res, nullptr));
3840 continue;
3841 }
3842 case OPC_Coverage: {
3843 // This is emitted right before MorphNode/EmitNode.
3844 // So it should be safe to assume that this node has been selected
3845 unsigned index = MatcherTable[MatcherIndex++];
3846 index |= (MatcherTable[MatcherIndex++] << 8);
3847 dbgs() << "COVERED: " << getPatternForIndex(index) << "\n";
3848 dbgs() << "INCLUDED: " << getIncludePathForIndex(index) << "\n";
3849 continue;
3850 }
3851
3852 case OPC_EmitNode:
3853 case OPC_EmitNode0:
3854 case OPC_EmitNode1:
3855 case OPC_EmitNode2:
3856 case OPC_EmitNode0None:
3857 case OPC_EmitNode1None:
3858 case OPC_EmitNode2None:
3859 case OPC_EmitNode0Chain:
3860 case OPC_EmitNode1Chain:
3861 case OPC_EmitNode2Chain:
3862 case OPC_MorphNodeTo:
3863 case OPC_MorphNodeTo0:
3864 case OPC_MorphNodeTo1:
3865 case OPC_MorphNodeTo2:
3866 case OPC_MorphNodeTo0None:
3867 case OPC_MorphNodeTo1None:
3868 case OPC_MorphNodeTo2None:
3869 case OPC_MorphNodeTo0Chain:
3870 case OPC_MorphNodeTo1Chain:
3871 case OPC_MorphNodeTo2Chain:
3872 case OPC_MorphNodeTo0GlueInput:
3873 case OPC_MorphNodeTo1GlueInput:
3874 case OPC_MorphNodeTo2GlueInput:
3875 case OPC_MorphNodeTo0GlueOutput:
3876 case OPC_MorphNodeTo1GlueOutput:
3877 case OPC_MorphNodeTo2GlueOutput: {
3878 uint16_t TargetOpc = MatcherTable[MatcherIndex++];
3879 TargetOpc |= static_cast<uint16_t>(MatcherTable[MatcherIndex++]) << 8;
3880 unsigned EmitNodeInfo;
3881 if (Opcode >= OPC_EmitNode0None && Opcode <= OPC_EmitNode2Chain) {
3882 if (Opcode >= OPC_EmitNode0Chain && Opcode <= OPC_EmitNode2Chain)
3883 EmitNodeInfo = OPFL_Chain;
3884 else
3885 EmitNodeInfo = OPFL_None;
3886 } else if (Opcode >= OPC_MorphNodeTo0None &&
3887 Opcode <= OPC_MorphNodeTo2GlueOutput) {
3888 if (Opcode >= OPC_MorphNodeTo0Chain && Opcode <= OPC_MorphNodeTo2Chain)
3889 EmitNodeInfo = OPFL_Chain;
3890 else if (Opcode >= OPC_MorphNodeTo0GlueInput &&
3891 Opcode <= OPC_MorphNodeTo2GlueInput)
3892 EmitNodeInfo = OPFL_GlueInput;
3893 else if (Opcode >= OPC_MorphNodeTo0GlueOutput &&
3894 Opcode <= OPC_MorphNodeTo2GlueOutput)
3895 EmitNodeInfo = OPFL_GlueOutput;
3896 else
3897 EmitNodeInfo = OPFL_None;
3898 } else
3899 EmitNodeInfo = MatcherTable[MatcherIndex++];
3900 // Get the result VT list.
3901 unsigned NumVTs;
3902 // If this is one of the compressed forms, get the number of VTs based
3903 // on the Opcode. Otherwise read the next byte from the table.
3904 if (Opcode >= OPC_MorphNodeTo0 && Opcode <= OPC_MorphNodeTo2)
3905 NumVTs = Opcode - OPC_MorphNodeTo0;
3906 else if (Opcode >= OPC_MorphNodeTo0None && Opcode <= OPC_MorphNodeTo2None)
3907 NumVTs = Opcode - OPC_MorphNodeTo0None;
3908 else if (Opcode >= OPC_MorphNodeTo0Chain &&
3909 Opcode <= OPC_MorphNodeTo2Chain)
3910 NumVTs = Opcode - OPC_MorphNodeTo0Chain;
3911 else if (Opcode >= OPC_MorphNodeTo0GlueInput &&
3912 Opcode <= OPC_MorphNodeTo2GlueInput)
3913 NumVTs = Opcode - OPC_MorphNodeTo0GlueInput;
3914 else if (Opcode >= OPC_MorphNodeTo0GlueOutput &&
3915 Opcode <= OPC_MorphNodeTo2GlueOutput)
3916 NumVTs = Opcode - OPC_MorphNodeTo0GlueOutput;
3917 else if (Opcode >= OPC_EmitNode0 && Opcode <= OPC_EmitNode2)
3918 NumVTs = Opcode - OPC_EmitNode0;
3919 else if (Opcode >= OPC_EmitNode0None && Opcode <= OPC_EmitNode2None)
3920 NumVTs = Opcode - OPC_EmitNode0None;
3921 else if (Opcode >= OPC_EmitNode0Chain && Opcode <= OPC_EmitNode2Chain)
3922 NumVTs = Opcode - OPC_EmitNode0Chain;
3923 else
3924 NumVTs = MatcherTable[MatcherIndex++];
3925 SmallVector<EVT, 4> VTs;
3926 for (unsigned i = 0; i != NumVTs; ++i) {
3927 MVT::SimpleValueType VT =
3928 static_cast<MVT::SimpleValueType>(MatcherTable[MatcherIndex++]);
3929 if (VT == MVT::iPTR)
3930 VT = TLI->getPointerTy(DL: CurDAG->getDataLayout()).SimpleTy;
3931 VTs.push_back(Elt: VT);
3932 }
3933
3934 if (EmitNodeInfo & OPFL_Chain)
3935 VTs.push_back(MVT::Other);
3936 if (EmitNodeInfo & OPFL_GlueOutput)
3937 VTs.push_back(MVT::Glue);
3938
3939 // This is hot code, so optimize the two most common cases of 1 and 2
3940 // results.
3941 SDVTList VTList;
3942 if (VTs.size() == 1)
3943 VTList = CurDAG->getVTList(VT: VTs[0]);
3944 else if (VTs.size() == 2)
3945 VTList = CurDAG->getVTList(VT1: VTs[0], VT2: VTs[1]);
3946 else
3947 VTList = CurDAG->getVTList(VTs);
3948
3949 // Get the operand list.
3950 unsigned NumOps = MatcherTable[MatcherIndex++];
3951 SmallVector<SDValue, 8> Ops;
3952 for (unsigned i = 0; i != NumOps; ++i) {
3953 unsigned RecNo = MatcherTable[MatcherIndex++];
3954 if (RecNo & 128)
3955 RecNo = GetVBR(Val: RecNo, MatcherTable, Idx&: MatcherIndex);
3956
3957 assert(RecNo < RecordedNodes.size() && "Invalid EmitNode");
3958 Ops.push_back(Elt: RecordedNodes[RecNo].first);
3959 }
3960
3961 // If there are variadic operands to add, handle them now.
3962 if (EmitNodeInfo & OPFL_VariadicInfo) {
3963 // Determine the start index to copy from.
3964 unsigned FirstOpToCopy = getNumFixedFromVariadicInfo(Flags: EmitNodeInfo);
3965 FirstOpToCopy += (EmitNodeInfo & OPFL_Chain) ? 1 : 0;
3966 assert(NodeToMatch->getNumOperands() >= FirstOpToCopy &&
3967 "Invalid variadic node");
3968 // Copy all of the variadic operands, not including a potential glue
3969 // input.
3970 for (unsigned i = FirstOpToCopy, e = NodeToMatch->getNumOperands();
3971 i != e; ++i) {
3972 SDValue V = NodeToMatch->getOperand(Num: i);
3973 if (V.getValueType() == MVT::Glue) break;
3974 Ops.push_back(Elt: V);
3975 }
3976 }
3977
3978 // If this has chain/glue inputs, add them.
3979 if (EmitNodeInfo & OPFL_Chain)
3980 Ops.push_back(Elt: InputChain);
3981 if ((EmitNodeInfo & OPFL_GlueInput) && InputGlue.getNode() != nullptr)
3982 Ops.push_back(Elt: InputGlue);
3983
3984 // Check whether any matched node could raise an FP exception. Since all
3985 // such nodes must have a chain, it suffices to check ChainNodesMatched.
3986 // We need to perform this check before potentially modifying one of the
3987 // nodes via MorphNode.
3988 bool MayRaiseFPException =
3989 llvm::any_of(Range&: ChainNodesMatched, P: [this](SDNode *N) {
3990 return mayRaiseFPException(Node: N) && !N->getFlags().hasNoFPExcept();
3991 });
3992
3993 // Create the node.
3994 MachineSDNode *Res = nullptr;
3995 bool IsMorphNodeTo =
3996 Opcode == OPC_MorphNodeTo ||
3997 (Opcode >= OPC_MorphNodeTo0 && Opcode <= OPC_MorphNodeTo2GlueOutput);
3998 if (!IsMorphNodeTo) {
3999 // If this is a normal EmitNode command, just create the new node and
4000 // add the results to the RecordedNodes list.
4001 Res = CurDAG->getMachineNode(Opcode: TargetOpc, dl: SDLoc(NodeToMatch),
4002 VTs: VTList, Ops);
4003
4004 // Add all the non-glue/non-chain results to the RecordedNodes list.
4005 for (unsigned i = 0, e = VTs.size(); i != e; ++i) {
4006 if (VTs[i] == MVT::Other || VTs[i] == MVT::Glue) break;
4007 RecordedNodes.push_back(Elt: std::pair<SDValue,SDNode*>(SDValue(Res, i),
4008 nullptr));
4009 }
4010 } else {
4011 assert(NodeToMatch->getOpcode() != ISD::DELETED_NODE &&
4012 "NodeToMatch was removed partway through selection");
4013 SelectionDAG::DAGNodeDeletedListener NDL(*CurDAG, [&](SDNode *N,
4014 SDNode *E) {
4015 CurDAG->salvageDebugInfo(N&: *N);
4016 auto &Chain = ChainNodesMatched;
4017 assert((!E || !is_contained(Chain, N)) &&
4018 "Chain node replaced during MorphNode");
4019 llvm::erase(C&: Chain, V: N);
4020 });
4021 Res = cast<MachineSDNode>(Val: MorphNode(Node: NodeToMatch, TargetOpc, VTList,
4022 Ops, EmitNodeInfo));
4023 }
4024
4025 // Set the NoFPExcept flag when no original matched node could
4026 // raise an FP exception, but the new node potentially might.
4027 if (!MayRaiseFPException && mayRaiseFPException(Node: Res)) {
4028 SDNodeFlags Flags = Res->getFlags();
4029 Flags.setNoFPExcept(true);
4030 Res->setFlags(Flags);
4031 }
4032
4033 // If the node had chain/glue results, update our notion of the current
4034 // chain and glue.
4035 if (EmitNodeInfo & OPFL_GlueOutput) {
4036 InputGlue = SDValue(Res, VTs.size()-1);
4037 if (EmitNodeInfo & OPFL_Chain)
4038 InputChain = SDValue(Res, VTs.size()-2);
4039 } else if (EmitNodeInfo & OPFL_Chain)
4040 InputChain = SDValue(Res, VTs.size()-1);
4041
4042 // If the OPFL_MemRefs glue is set on this node, slap all of the
4043 // accumulated memrefs onto it.
4044 //
4045 // FIXME: This is vastly incorrect for patterns with multiple outputs
4046 // instructions that access memory and for ComplexPatterns that match
4047 // loads.
4048 if (EmitNodeInfo & OPFL_MemRefs) {
4049 // Only attach load or store memory operands if the generated
4050 // instruction may load or store.
4051 const MCInstrDesc &MCID = TII->get(Opcode: TargetOpc);
4052 bool mayLoad = MCID.mayLoad();
4053 bool mayStore = MCID.mayStore();
4054
4055 // We expect to have relatively few of these so just filter them into a
4056 // temporary buffer so that we can easily add them to the instruction.
4057 SmallVector<MachineMemOperand *, 4> FilteredMemRefs;
4058 for (MachineMemOperand *MMO : MatchedMemRefs) {
4059 if (MMO->isLoad()) {
4060 if (mayLoad)
4061 FilteredMemRefs.push_back(Elt: MMO);
4062 } else if (MMO->isStore()) {
4063 if (mayStore)
4064 FilteredMemRefs.push_back(Elt: MMO);
4065 } else {
4066 FilteredMemRefs.push_back(Elt: MMO);
4067 }
4068 }
4069
4070 CurDAG->setNodeMemRefs(N: Res, NewMemRefs: FilteredMemRefs);
4071 }
4072
4073 LLVM_DEBUG(if (!MatchedMemRefs.empty() && Res->memoperands_empty()) dbgs()
4074 << " Dropping mem operands\n";
4075 dbgs() << " " << (IsMorphNodeTo ? "Morphed" : "Created")
4076 << " node: ";
4077 Res->dump(CurDAG););
4078
4079 // If this was a MorphNodeTo then we're completely done!
4080 if (IsMorphNodeTo) {
4081 // Update chain uses.
4082 UpdateChains(NodeToMatch: Res, InputChain, ChainNodesMatched, isMorphNodeTo: true);
4083 return;
4084 }
4085 continue;
4086 }
4087
4088 case OPC_CompleteMatch: {
4089 // The match has been completed, and any new nodes (if any) have been
4090 // created. Patch up references to the matched dag to use the newly
4091 // created nodes.
4092 unsigned NumResults = MatcherTable[MatcherIndex++];
4093
4094 for (unsigned i = 0; i != NumResults; ++i) {
4095 unsigned ResSlot = MatcherTable[MatcherIndex++];
4096 if (ResSlot & 128)
4097 ResSlot = GetVBR(Val: ResSlot, MatcherTable, Idx&: MatcherIndex);
4098
4099 assert(ResSlot < RecordedNodes.size() && "Invalid CompleteMatch");
4100 SDValue Res = RecordedNodes[ResSlot].first;
4101
4102 assert(i < NodeToMatch->getNumValues() &&
4103 NodeToMatch->getValueType(i) != MVT::Other &&
4104 NodeToMatch->getValueType(i) != MVT::Glue &&
4105 "Invalid number of results to complete!");
4106 assert((NodeToMatch->getValueType(i) == Res.getValueType() ||
4107 NodeToMatch->getValueType(i) == MVT::iPTR ||
4108 Res.getValueType() == MVT::iPTR ||
4109 NodeToMatch->getValueType(i).getSizeInBits() ==
4110 Res.getValueSizeInBits()) &&
4111 "invalid replacement");
4112 ReplaceUses(F: SDValue(NodeToMatch, i), T: Res);
4113 }
4114
4115 // Update chain uses.
4116 UpdateChains(NodeToMatch, InputChain, ChainNodesMatched, isMorphNodeTo: false);
4117
4118 // If the root node defines glue, we need to update it to the glue result.
4119 // TODO: This never happens in our tests and I think it can be removed /
4120 // replaced with an assert, but if we do it this the way the change is
4121 // NFC.
4122 if (NodeToMatch->getValueType(NodeToMatch->getNumValues() - 1) ==
4123 MVT::Glue &&
4124 InputGlue.getNode())
4125 ReplaceUses(F: SDValue(NodeToMatch, NodeToMatch->getNumValues() - 1),
4126 T: InputGlue);
4127
4128 assert(NodeToMatch->use_empty() &&
4129 "Didn't replace all uses of the node?");
4130 CurDAG->RemoveDeadNode(N: NodeToMatch);
4131
4132 return;
4133 }
4134 }
4135
4136 // If the code reached this point, then the match failed. See if there is
4137 // another child to try in the current 'Scope', otherwise pop it until we
4138 // find a case to check.
4139 LLVM_DEBUG(dbgs() << " Match failed at index " << CurrentOpcodeIndex
4140 << "\n");
4141 ++NumDAGIselRetries;
4142 while (true) {
4143 if (MatchScopes.empty()) {
4144 CannotYetSelect(N: NodeToMatch);
4145 return;
4146 }
4147
4148 // Restore the interpreter state back to the point where the scope was
4149 // formed.
4150 MatchScope &LastScope = MatchScopes.back();
4151 RecordedNodes.resize(N: LastScope.NumRecordedNodes);
4152 NodeStack.clear();
4153 NodeStack.append(in_start: LastScope.NodeStack.begin(), in_end: LastScope.NodeStack.end());
4154 N = NodeStack.back();
4155
4156 if (LastScope.NumMatchedMemRefs != MatchedMemRefs.size())
4157 MatchedMemRefs.resize(N: LastScope.NumMatchedMemRefs);
4158 MatcherIndex = LastScope.FailIndex;
4159
4160 LLVM_DEBUG(dbgs() << " Continuing at " << MatcherIndex << "\n");
4161
4162 InputChain = LastScope.InputChain;
4163 InputGlue = LastScope.InputGlue;
4164 if (!LastScope.HasChainNodesMatched)
4165 ChainNodesMatched.clear();
4166
4167 // Check to see what the offset is at the new MatcherIndex. If it is zero
4168 // we have reached the end of this scope, otherwise we have another child
4169 // in the current scope to try.
4170 unsigned NumToSkip = MatcherTable[MatcherIndex++];
4171 if (NumToSkip & 128)
4172 NumToSkip = GetVBR(Val: NumToSkip, MatcherTable, Idx&: MatcherIndex);
4173
4174 // If we have another child in this scope to match, update FailIndex and
4175 // try it.
4176 if (NumToSkip != 0) {
4177 LastScope.FailIndex = MatcherIndex+NumToSkip;
4178 break;
4179 }
4180
4181 // End of this scope, pop it and try the next child in the containing
4182 // scope.
4183 MatchScopes.pop_back();
4184 }
4185 }
4186}
4187
4188/// Return whether the node may raise an FP exception.
4189bool SelectionDAGISel::mayRaiseFPException(SDNode *N) const {
4190 // For machine opcodes, consult the MCID flag.
4191 if (N->isMachineOpcode()) {
4192 const MCInstrDesc &MCID = TII->get(Opcode: N->getMachineOpcode());
4193 return MCID.mayRaiseFPException();
4194 }
4195
4196 // For ISD opcodes, only StrictFP opcodes may raise an FP
4197 // exception.
4198 if (N->isTargetOpcode())
4199 return N->isTargetStrictFPOpcode();
4200 return N->isStrictFPOpcode();
4201}
4202
4203bool SelectionDAGISel::isOrEquivalentToAdd(const SDNode *N) const {
4204 assert(N->getOpcode() == ISD::OR && "Unexpected opcode");
4205 auto *C = dyn_cast<ConstantSDNode>(Val: N->getOperand(Num: 1));
4206 if (!C)
4207 return false;
4208
4209 // Detect when "or" is used to add an offset to a stack object.
4210 if (auto *FN = dyn_cast<FrameIndexSDNode>(Val: N->getOperand(Num: 0))) {
4211 MachineFrameInfo &MFI = MF->getFrameInfo();
4212 Align A = MFI.getObjectAlign(ObjectIdx: FN->getIndex());
4213 int32_t Off = C->getSExtValue();
4214 // If the alleged offset fits in the zero bits guaranteed by
4215 // the alignment, then this or is really an add.
4216 return (Off >= 0) && (((A.value() - 1) & Off) == unsigned(Off));
4217 }
4218 return false;
4219}
4220
4221void SelectionDAGISel::CannotYetSelect(SDNode *N) {
4222 std::string msg;
4223 raw_string_ostream Msg(msg);
4224 Msg << "Cannot select: ";
4225
4226 if (N->getOpcode() != ISD::INTRINSIC_W_CHAIN &&
4227 N->getOpcode() != ISD::INTRINSIC_WO_CHAIN &&
4228 N->getOpcode() != ISD::INTRINSIC_VOID) {
4229 N->printrFull(O&: Msg, G: CurDAG);
4230 Msg << "\nIn function: " << MF->getName();
4231 } else {
4232 bool HasInputChain = N->getOperand(0).getValueType() == MVT::Other;
4233 unsigned iid = N->getConstantOperandVal(Num: HasInputChain);
4234 if (iid < Intrinsic::num_intrinsics)
4235 Msg << "intrinsic %" << Intrinsic::getBaseName(id: (Intrinsic::ID)iid);
4236 else if (const TargetIntrinsicInfo *TII = TM.getIntrinsicInfo())
4237 Msg << "target intrinsic %" << TII->getName(IID: iid);
4238 else
4239 Msg << "unknown intrinsic #" << iid;
4240 }
4241 report_fatal_error(reason: Twine(Msg.str()));
4242}
4243

source code of llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp