1//===- llvm/CodeGen/ScheduleDAG.h - Common Base Class -----------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file Implements the ScheduleDAG class, which is used as the common base
10/// class for instruction schedulers. This encapsulates the scheduling DAG,
11/// which is shared between SelectionDAG and MachineInstr scheduling.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_CODEGEN_SCHEDULEDAG_H
16#define LLVM_CODEGEN_SCHEDULEDAG_H
17
18#include "llvm/ADT/BitVector.h"
19#include "llvm/ADT/PointerIntPair.h"
20#include "llvm/ADT/SmallVector.h"
21#include "llvm/ADT/iterator.h"
22#include "llvm/CodeGen/MachineInstr.h"
23#include "llvm/CodeGen/TargetLowering.h"
24#include "llvm/Support/ErrorHandling.h"
25#include <cassert>
26#include <cstddef>
27#include <iterator>
28#include <string>
29#include <vector>
30
31namespace llvm {
32
33template <class GraphType> struct GraphTraits;
34template<class Graph> class GraphWriter;
35class LLVMTargetMachine;
36class MachineFunction;
37class MachineRegisterInfo;
38class MCInstrDesc;
39struct MCSchedClassDesc;
40class SDNode;
41class SUnit;
42class ScheduleDAG;
43class TargetInstrInfo;
44class TargetRegisterClass;
45class TargetRegisterInfo;
46
47 /// Scheduling dependency. This represents one direction of an edge in the
48 /// scheduling DAG.
49 class SDep {
50 public:
51 /// These are the different kinds of scheduling dependencies.
52 enum Kind {
53 Data, ///< Regular data dependence (aka true-dependence).
54 Anti, ///< A register anti-dependence (aka WAR).
55 Output, ///< A register output-dependence (aka WAW).
56 Order ///< Any other ordering dependency.
57 };
58
59 // Strong dependencies must be respected by the scheduler. Artificial
60 // dependencies may be removed only if they are redundant with another
61 // strong dependence.
62 //
63 // Weak dependencies may be violated by the scheduling strategy, but only if
64 // the strategy can prove it is correct to do so.
65 //
66 // Strong OrderKinds must occur before "Weak".
67 // Weak OrderKinds must occur after "Weak".
68 enum OrderKind {
69 Barrier, ///< An unknown scheduling barrier.
70 MayAliasMem, ///< Nonvolatile load/Store instructions that may alias.
71 MustAliasMem, ///< Nonvolatile load/Store instructions that must alias.
72 Artificial, ///< Arbitrary strong DAG edge (no real dependence).
73 Weak, ///< Arbitrary weak DAG edge.
74 Cluster ///< Weak DAG edge linking a chain of clustered instrs.
75 };
76
77 private:
78 /// A pointer to the depending/depended-on SUnit, and an enum
79 /// indicating the kind of the dependency.
80 PointerIntPair<SUnit *, 2, Kind> Dep;
81
82 /// A union discriminated by the dependence kind.
83 union {
84 /// For Data, Anti, and Output dependencies, the associated register. For
85 /// Data dependencies that don't currently have a register/ assigned, this
86 /// is set to zero.
87 unsigned Reg;
88
89 /// Additional information about Order dependencies.
90 unsigned OrdKind; // enum OrderKind
91 } Contents;
92
93 /// The time associated with this edge. Often this is just the value of the
94 /// Latency field of the predecessor, however advanced models may provide
95 /// additional information about specific edges.
96 unsigned Latency = 0u;
97
98 public:
99 /// Constructs a null SDep. This is only for use by container classes which
100 /// require default constructors. SUnits may not/ have null SDep edges.
101 SDep() : Dep(nullptr, Data) {}
102
103 /// Constructs an SDep with the specified values.
104 SDep(SUnit *S, Kind kind, unsigned Reg)
105 : Dep(S, kind), Contents() {
106 switch (kind) {
107 default:
108 llvm_unreachable("Reg given for non-register dependence!");
109 case Anti:
110 case Output:
111 assert(Reg != 0 &&
112 "SDep::Anti and SDep::Output must use a non-zero Reg!");
113 Contents.Reg = Reg;
114 Latency = 0;
115 break;
116 case Data:
117 Contents.Reg = Reg;
118 Latency = 1;
119 break;
120 }
121 }
122
123 SDep(SUnit *S, OrderKind kind)
124 : Dep(S, Order), Contents(), Latency(0) {
125 Contents.OrdKind = kind;
126 }
127
128 /// Returns true if the specified SDep is equivalent except for latency.
129 bool overlaps(const SDep &Other) const;
130
131 bool operator==(const SDep &Other) const {
132 return overlaps(Other) && Latency == Other.Latency;
133 }
134
135 bool operator!=(const SDep &Other) const {
136 return !operator==(Other);
137 }
138
139 /// Returns the latency value for this edge, which roughly means the
140 /// minimum number of cycles that must elapse between the predecessor and
141 /// the successor, given that they have this edge between them.
142 unsigned getLatency() const {
143 return Latency;
144 }
145
146 /// Sets the latency for this edge.
147 void setLatency(unsigned Lat) {
148 Latency = Lat;
149 }
150
151 //// Returns the SUnit to which this edge points.
152 SUnit *getSUnit() const;
153
154 //// Assigns the SUnit to which this edge points.
155 void setSUnit(SUnit *SU);
156
157 /// Returns an enum value representing the kind of the dependence.
158 Kind getKind() const;
159
160 /// Shorthand for getKind() != SDep::Data.
161 bool isCtrl() const {
162 return getKind() != Data;
163 }
164
165 /// Tests if this is an Order dependence between two memory accesses
166 /// where both sides of the dependence access memory in non-volatile and
167 /// fully modeled ways.
168 bool isNormalMemory() const {
169 return getKind() == Order && (Contents.OrdKind == MayAliasMem
170 || Contents.OrdKind == MustAliasMem);
171 }
172
173 /// Tests if this is an Order dependence that is marked as a barrier.
174 bool isBarrier() const {
175 return getKind() == Order && Contents.OrdKind == Barrier;
176 }
177
178 /// Tests if this is could be any kind of memory dependence.
179 bool isNormalMemoryOrBarrier() const {
180 return (isNormalMemory() || isBarrier());
181 }
182
183 /// Tests if this is an Order dependence that is marked as
184 /// "must alias", meaning that the SUnits at either end of the edge have a
185 /// memory dependence on a known memory location.
186 bool isMustAlias() const {
187 return getKind() == Order && Contents.OrdKind == MustAliasMem;
188 }
189
190 /// Tests if this a weak dependence. Weak dependencies are considered DAG
191 /// edges for height computation and other heuristics, but do not force
192 /// ordering. Breaking a weak edge may require the scheduler to compensate,
193 /// for example by inserting a copy.
194 bool isWeak() const {
195 return getKind() == Order && Contents.OrdKind >= Weak;
196 }
197
198 /// Tests if this is an Order dependence that is marked as
199 /// "artificial", meaning it isn't necessary for correctness.
200 bool isArtificial() const {
201 return getKind() == Order && Contents.OrdKind == Artificial;
202 }
203
204 /// Tests if this is an Order dependence that is marked as "cluster",
205 /// meaning it is artificial and wants to be adjacent.
206 bool isCluster() const {
207 return getKind() == Order && Contents.OrdKind == Cluster;
208 }
209
210 /// Tests if this is a Data dependence that is associated with a register.
211 bool isAssignedRegDep() const {
212 return getKind() == Data && Contents.Reg != 0;
213 }
214
215 /// Returns the register associated with this edge. This is only valid on
216 /// Data, Anti, and Output edges. On Data edges, this value may be zero,
217 /// meaning there is no associated register.
218 unsigned getReg() const {
219 assert((getKind() == Data || getKind() == Anti || getKind() == Output) &&
220 "getReg called on non-register dependence edge!");
221 return Contents.Reg;
222 }
223
224 /// Assigns the associated register for this edge. This is only valid on
225 /// Data, Anti, and Output edges. On Anti and Output edges, this value must
226 /// not be zero. On Data edges, the value may be zero, which would mean that
227 /// no specific register is associated with this edge.
228 void setReg(unsigned Reg) {
229 assert((getKind() == Data || getKind() == Anti || getKind() == Output) &&
230 "setReg called on non-register dependence edge!");
231 assert((getKind() != Anti || Reg != 0) &&
232 "SDep::Anti edge cannot use the zero register!");
233 assert((getKind() != Output || Reg != 0) &&
234 "SDep::Output edge cannot use the zero register!");
235 Contents.Reg = Reg;
236 }
237
238 void dump(const TargetRegisterInfo *TRI = nullptr) const;
239 };
240
241 /// Scheduling unit. This is a node in the scheduling DAG.
242 class SUnit {
243 private:
244 enum : unsigned { BoundaryID = ~0u };
245
246 SDNode *Node = nullptr; ///< Representative node.
247 MachineInstr *Instr = nullptr; ///< Alternatively, a MachineInstr.
248
249 public:
250 SUnit *OrigNode = nullptr; ///< If not this, the node from which this node
251 /// was cloned. (SD scheduling only)
252
253 const MCSchedClassDesc *SchedClass =
254 nullptr; ///< nullptr or resolved SchedClass.
255
256 SmallVector<SDep, 4> Preds; ///< All sunit predecessors.
257 SmallVector<SDep, 4> Succs; ///< All sunit successors.
258
259 typedef SmallVectorImpl<SDep>::iterator pred_iterator;
260 typedef SmallVectorImpl<SDep>::iterator succ_iterator;
261 typedef SmallVectorImpl<SDep>::const_iterator const_pred_iterator;
262 typedef SmallVectorImpl<SDep>::const_iterator const_succ_iterator;
263
264 unsigned NodeNum = BoundaryID; ///< Entry # of node in the node vector.
265 unsigned NodeQueueId = 0; ///< Queue id of node.
266 unsigned NumPreds = 0; ///< # of SDep::Data preds.
267 unsigned NumSuccs = 0; ///< # of SDep::Data sucss.
268 unsigned NumPredsLeft = 0; ///< # of preds not scheduled.
269 unsigned NumSuccsLeft = 0; ///< # of succs not scheduled.
270 unsigned WeakPredsLeft = 0; ///< # of weak preds not scheduled.
271 unsigned WeakSuccsLeft = 0; ///< # of weak succs not scheduled.
272 unsigned short NumRegDefsLeft = 0; ///< # of reg defs with no scheduled use.
273 unsigned short Latency = 0; ///< Node latency.
274 bool isVRegCycle : 1; ///< May use and def the same vreg.
275 bool isCall : 1; ///< Is a function call.
276 bool isCallOp : 1; ///< Is a function call operand.
277 bool isTwoAddress : 1; ///< Is a two-address instruction.
278 bool isCommutable : 1; ///< Is a commutable instruction.
279 bool hasPhysRegUses : 1; ///< Has physreg uses.
280 bool hasPhysRegDefs : 1; ///< Has physreg defs that are being used.
281 bool hasPhysRegClobbers : 1; ///< Has any physreg defs, used or not.
282 bool isPending : 1; ///< True once pending.
283 bool isAvailable : 1; ///< True once available.
284 bool isScheduled : 1; ///< True once scheduled.
285 bool isScheduleHigh : 1; ///< True if preferable to schedule high.
286 bool isScheduleLow : 1; ///< True if preferable to schedule low.
287 bool isCloned : 1; ///< True if this node has been cloned.
288 bool isUnbuffered : 1; ///< Uses an unbuffered resource.
289 bool hasReservedResource : 1; ///< Uses a reserved resource.
290 Sched::Preference SchedulingPref = Sched::None; ///< Scheduling preference.
291
292 private:
293 bool isDepthCurrent : 1; ///< True if Depth is current.
294 bool isHeightCurrent : 1; ///< True if Height is current.
295 unsigned Depth = 0; ///< Node depth.
296 unsigned Height = 0; ///< Node height.
297
298 public:
299 unsigned TopReadyCycle = 0; ///< Cycle relative to start when node is ready.
300 unsigned BotReadyCycle = 0; ///< Cycle relative to end when node is ready.
301
302 const TargetRegisterClass *CopyDstRC =
303 nullptr; ///< Is a special copy node if != nullptr.
304 const TargetRegisterClass *CopySrcRC = nullptr;
305
306 /// Constructs an SUnit for pre-regalloc scheduling to represent an
307 /// SDNode and any nodes flagged to it.
308 SUnit(SDNode *node, unsigned nodenum)
309 : Node(node), NodeNum(nodenum), isVRegCycle(false), isCall(false),
310 isCallOp(false), isTwoAddress(false), isCommutable(false),
311 hasPhysRegUses(false), hasPhysRegDefs(false), hasPhysRegClobbers(false),
312 isPending(false), isAvailable(false), isScheduled(false),
313 isScheduleHigh(false), isScheduleLow(false), isCloned(false),
314 isUnbuffered(false), hasReservedResource(false), isDepthCurrent(false),
315 isHeightCurrent(false) {}
316
317 /// Constructs an SUnit for post-regalloc scheduling to represent a
318 /// MachineInstr.
319 SUnit(MachineInstr *instr, unsigned nodenum)
320 : Instr(instr), NodeNum(nodenum), isVRegCycle(false), isCall(false),
321 isCallOp(false), isTwoAddress(false), isCommutable(false),
322 hasPhysRegUses(false), hasPhysRegDefs(false), hasPhysRegClobbers(false),
323 isPending(false), isAvailable(false), isScheduled(false),
324 isScheduleHigh(false), isScheduleLow(false), isCloned(false),
325 isUnbuffered(false), hasReservedResource(false), isDepthCurrent(false),
326 isHeightCurrent(false) {}
327
328 /// Constructs a placeholder SUnit.
329 SUnit()
330 : isVRegCycle(false), isCall(false), isCallOp(false), isTwoAddress(false),
331 isCommutable(false), hasPhysRegUses(false), hasPhysRegDefs(false),
332 hasPhysRegClobbers(false), isPending(false), isAvailable(false),
333 isScheduled(false), isScheduleHigh(false), isScheduleLow(false),
334 isCloned(false), isUnbuffered(false), hasReservedResource(false),
335 isDepthCurrent(false), isHeightCurrent(false) {}
336
337 /// Boundary nodes are placeholders for the boundary of the
338 /// scheduling region.
339 ///
340 /// BoundaryNodes can have DAG edges, including Data edges, but they do not
341 /// correspond to schedulable entities (e.g. instructions) and do not have a
342 /// valid ID. Consequently, always check for boundary nodes before accessing
343 /// an associative data structure keyed on node ID.
344 bool isBoundaryNode() const { return NodeNum == BoundaryID; }
345
346 /// Assigns the representative SDNode for this SUnit. This may be used
347 /// during pre-regalloc scheduling.
348 void setNode(SDNode *N) {
349 assert(!Instr && "Setting SDNode of SUnit with MachineInstr!");
350 Node = N;
351 }
352
353 /// Returns the representative SDNode for this SUnit. This may be used
354 /// during pre-regalloc scheduling.
355 SDNode *getNode() const {
356 assert(!Instr && "Reading SDNode of SUnit with MachineInstr!");
357 return Node;
358 }
359
360 /// Returns true if this SUnit refers to a machine instruction as
361 /// opposed to an SDNode.
362 bool isInstr() const { return Instr; }
363
364 /// Assigns the instruction for the SUnit. This may be used during
365 /// post-regalloc scheduling.
366 void setInstr(MachineInstr *MI) {
367 assert(!Node && "Setting MachineInstr of SUnit with SDNode!");
368 Instr = MI;
369 }
370
371 /// Returns the representative MachineInstr for this SUnit. This may be used
372 /// during post-regalloc scheduling.
373 MachineInstr *getInstr() const {
374 assert(!Node && "Reading MachineInstr of SUnit with SDNode!");
375 return Instr;
376 }
377
378 /// Adds the specified edge as a pred of the current node if not already.
379 /// It also adds the current node as a successor of the specified node.
380 bool addPred(const SDep &D, bool Required = true);
381
382 /// Adds a barrier edge to SU by calling addPred(), with latency 0
383 /// generally or latency 1 for a store followed by a load.
384 bool addPredBarrier(SUnit *SU) {
385 SDep Dep(SU, SDep::Barrier);
386 unsigned TrueMemOrderLatency =
387 ((SU->getInstr()->mayStore() && this->getInstr()->mayLoad()) ? 1 : 0);
388 Dep.setLatency(TrueMemOrderLatency);
389 return addPred(D: Dep);
390 }
391
392 /// Removes the specified edge as a pred of the current node if it exists.
393 /// It also removes the current node as a successor of the specified node.
394 void removePred(const SDep &D);
395
396 /// Returns the depth of this node, which is the length of the maximum path
397 /// up to any node which has no predecessors.
398 unsigned getDepth() const {
399 if (!isDepthCurrent)
400 const_cast<SUnit *>(this)->ComputeDepth();
401 return Depth;
402 }
403
404 /// Returns the height of this node, which is the length of the
405 /// maximum path down to any node which has no successors.
406 unsigned getHeight() const {
407 if (!isHeightCurrent)
408 const_cast<SUnit *>(this)->ComputeHeight();
409 return Height;
410 }
411
412 /// If NewDepth is greater than this node's depth value, sets it to
413 /// be the new depth value. This also recursively marks successor nodes
414 /// dirty.
415 void setDepthToAtLeast(unsigned NewDepth);
416
417 /// If NewHeight is greater than this node's height value, set it to be
418 /// the new height value. This also recursively marks predecessor nodes
419 /// dirty.
420 void setHeightToAtLeast(unsigned NewHeight);
421
422 /// Sets a flag in this node to indicate that its stored Depth value
423 /// will require recomputation the next time getDepth() is called.
424 void setDepthDirty();
425
426 /// Sets a flag in this node to indicate that its stored Height value
427 /// will require recomputation the next time getHeight() is called.
428 void setHeightDirty();
429
430 /// Tests if node N is a predecessor of this node.
431 bool isPred(const SUnit *N) const {
432 for (const SDep &Pred : Preds)
433 if (Pred.getSUnit() == N)
434 return true;
435 return false;
436 }
437
438 /// Tests if node N is a successor of this node.
439 bool isSucc(const SUnit *N) const {
440 for (const SDep &Succ : Succs)
441 if (Succ.getSUnit() == N)
442 return true;
443 return false;
444 }
445
446 bool isTopReady() const {
447 return NumPredsLeft == 0;
448 }
449 bool isBottomReady() const {
450 return NumSuccsLeft == 0;
451 }
452
453 /// Orders this node's predecessor edges such that the critical path
454 /// edge occurs first.
455 void biasCriticalPath();
456
457 void dumpAttributes() const;
458
459 private:
460 void ComputeDepth();
461 void ComputeHeight();
462 };
463
464 /// Returns true if the specified SDep is equivalent except for latency.
465 inline bool SDep::overlaps(const SDep &Other) const {
466 if (Dep != Other.Dep)
467 return false;
468 switch (Dep.getInt()) {
469 case Data:
470 case Anti:
471 case Output:
472 return Contents.Reg == Other.Contents.Reg;
473 case Order:
474 return Contents.OrdKind == Other.Contents.OrdKind;
475 }
476 llvm_unreachable("Invalid dependency kind!");
477 }
478
479 //// Returns the SUnit to which this edge points.
480 inline SUnit *SDep::getSUnit() const { return Dep.getPointer(); }
481
482 //// Assigns the SUnit to which this edge points.
483 inline void SDep::setSUnit(SUnit *SU) { Dep.setPointer(SU); }
484
485 /// Returns an enum value representing the kind of the dependence.
486 inline SDep::Kind SDep::getKind() const { return Dep.getInt(); }
487
488 //===--------------------------------------------------------------------===//
489
490 /// This interface is used to plug different priorities computation
491 /// algorithms into the list scheduler. It implements the interface of a
492 /// standard priority queue, where nodes are inserted in arbitrary order and
493 /// returned in priority order. The computation of the priority and the
494 /// representation of the queue are totally up to the implementation to
495 /// decide.
496 class SchedulingPriorityQueue {
497 virtual void anchor();
498
499 unsigned CurCycle = 0;
500 bool HasReadyFilter;
501
502 public:
503 SchedulingPriorityQueue(bool rf = false) : HasReadyFilter(rf) {}
504
505 virtual ~SchedulingPriorityQueue() = default;
506
507 virtual bool isBottomUp() const = 0;
508
509 virtual void initNodes(std::vector<SUnit> &SUnits) = 0;
510 virtual void addNode(const SUnit *SU) = 0;
511 virtual void updateNode(const SUnit *SU) = 0;
512 virtual void releaseState() = 0;
513
514 virtual bool empty() const = 0;
515
516 bool hasReadyFilter() const { return HasReadyFilter; }
517
518 virtual bool tracksRegPressure() const { return false; }
519
520 virtual bool isReady(SUnit *) const {
521 assert(!HasReadyFilter && "The ready filter must override isReady()");
522 return true;
523 }
524
525 virtual void push(SUnit *U) = 0;
526
527 void push_all(const std::vector<SUnit *> &Nodes) {
528 for (SUnit *SU : Nodes)
529 push(U: SU);
530 }
531
532 virtual SUnit *pop() = 0;
533
534 virtual void remove(SUnit *SU) = 0;
535
536 virtual void dump(ScheduleDAG *) const {}
537
538 /// As each node is scheduled, this method is invoked. This allows the
539 /// priority function to adjust the priority of related unscheduled nodes,
540 /// for example.
541 virtual void scheduledNode(SUnit *) {}
542
543 virtual void unscheduledNode(SUnit *) {}
544
545 void setCurCycle(unsigned Cycle) {
546 CurCycle = Cycle;
547 }
548
549 unsigned getCurCycle() const {
550 return CurCycle;
551 }
552 };
553
554 class ScheduleDAG {
555 public:
556 const LLVMTargetMachine &TM; ///< Target processor
557 const TargetInstrInfo *TII; ///< Target instruction information
558 const TargetRegisterInfo *TRI; ///< Target processor register info
559 MachineFunction &MF; ///< Machine function
560 MachineRegisterInfo &MRI; ///< Virtual/real register map
561 std::vector<SUnit> SUnits; ///< The scheduling units.
562 SUnit EntrySU; ///< Special node for the region entry.
563 SUnit ExitSU; ///< Special node for the region exit.
564
565#ifdef NDEBUG
566 static const bool StressSched = false;
567#else
568 bool StressSched;
569#endif
570
571 // This class is designed to be passed by reference only. Copy constructor
572 // is declared as deleted here to make the derived classes have deleted
573 // implicit-declared copy constructor, which suppresses the warnings from
574 // static analyzer when the derived classes own resources that are freed in
575 // their destructors, but don't have user-written copy constructors (rule
576 // of three).
577 ScheduleDAG(const ScheduleDAG &) = delete;
578 ScheduleDAG &operator=(const ScheduleDAG &) = delete;
579
580 explicit ScheduleDAG(MachineFunction &mf);
581
582 virtual ~ScheduleDAG();
583
584 /// Clears the DAG state (between regions).
585 void clearDAG();
586
587 /// Returns the MCInstrDesc of this SUnit.
588 /// Returns NULL for SDNodes without a machine opcode.
589 const MCInstrDesc *getInstrDesc(const SUnit *SU) const {
590 if (SU->isInstr()) return &SU->getInstr()->getDesc();
591 return getNodeDesc(Node: SU->getNode());
592 }
593
594 /// Pops up a GraphViz/gv window with the ScheduleDAG rendered using 'dot'.
595 virtual void viewGraph(const Twine &Name, const Twine &Title);
596 virtual void viewGraph();
597
598 virtual void dumpNode(const SUnit &SU) const = 0;
599 virtual void dump() const = 0;
600 void dumpNodeName(const SUnit &SU) const;
601
602 /// Returns a label for an SUnit node in a visualization of the ScheduleDAG.
603 virtual std::string getGraphNodeLabel(const SUnit *SU) const = 0;
604
605 /// Returns a label for the region of code covered by the DAG.
606 virtual std::string getDAGName() const = 0;
607
608 /// Adds custom features for a visualization of the ScheduleDAG.
609 virtual void addCustomGraphFeatures(GraphWriter<ScheduleDAG*> &) const {}
610
611#ifndef NDEBUG
612 /// Verifies that all SUnits were scheduled and that their state is
613 /// consistent. Returns the number of scheduled SUnits.
614 unsigned VerifyScheduledDAG(bool isBottomUp);
615#endif
616
617 protected:
618 void dumpNodeAll(const SUnit &SU) const;
619
620 private:
621 /// Returns the MCInstrDesc of this SDNode or NULL.
622 const MCInstrDesc *getNodeDesc(const SDNode *Node) const;
623 };
624
625 class SUnitIterator {
626 SUnit *Node;
627 unsigned Operand;
628
629 SUnitIterator(SUnit *N, unsigned Op) : Node(N), Operand(Op) {}
630
631 public:
632 using iterator_category = std::forward_iterator_tag;
633 using value_type = SUnit;
634 using difference_type = std::ptrdiff_t;
635 using pointer = value_type *;
636 using reference = value_type &;
637
638 bool operator==(const SUnitIterator& x) const {
639 return Operand == x.Operand;
640 }
641 bool operator!=(const SUnitIterator& x) const { return !operator==(x); }
642
643 pointer operator*() const {
644 return Node->Preds[Operand].getSUnit();
645 }
646 pointer operator->() const { return operator*(); }
647
648 SUnitIterator& operator++() { // Preincrement
649 ++Operand;
650 return *this;
651 }
652 SUnitIterator operator++(int) { // Postincrement
653 SUnitIterator tmp = *this; ++*this; return tmp;
654 }
655
656 static SUnitIterator begin(SUnit *N) { return SUnitIterator(N, 0); }
657 static SUnitIterator end (SUnit *N) {
658 return SUnitIterator(N, (unsigned)N->Preds.size());
659 }
660
661 unsigned getOperand() const { return Operand; }
662 const SUnit *getNode() const { return Node; }
663
664 /// Tests if this is not an SDep::Data dependence.
665 bool isCtrlDep() const {
666 return getSDep().isCtrl();
667 }
668 bool isArtificialDep() const {
669 return getSDep().isArtificial();
670 }
671 const SDep &getSDep() const {
672 return Node->Preds[Operand];
673 }
674 };
675
676 template <> struct GraphTraits<SUnit*> {
677 typedef SUnit *NodeRef;
678 typedef SUnitIterator ChildIteratorType;
679 static NodeRef getEntryNode(SUnit *N) { return N; }
680 static ChildIteratorType child_begin(NodeRef N) {
681 return SUnitIterator::begin(N);
682 }
683 static ChildIteratorType child_end(NodeRef N) {
684 return SUnitIterator::end(N);
685 }
686 };
687
688 template <> struct GraphTraits<ScheduleDAG*> : public GraphTraits<SUnit*> {
689 typedef pointer_iterator<std::vector<SUnit>::iterator> nodes_iterator;
690 static nodes_iterator nodes_begin(ScheduleDAG *G) {
691 return nodes_iterator(G->SUnits.begin());
692 }
693 static nodes_iterator nodes_end(ScheduleDAG *G) {
694 return nodes_iterator(G->SUnits.end());
695 }
696 };
697
698 /// This class can compute a topological ordering for SUnits and provides
699 /// methods for dynamically updating the ordering as new edges are added.
700 ///
701 /// This allows a very fast implementation of IsReachable, for example.
702 class ScheduleDAGTopologicalSort {
703 /// A reference to the ScheduleDAG's SUnits.
704 std::vector<SUnit> &SUnits;
705 SUnit *ExitSU;
706
707 // Have any new nodes been added?
708 bool Dirty = false;
709
710 // Outstanding added edges, that have not been applied to the ordering.
711 SmallVector<std::pair<SUnit *, SUnit *>, 16> Updates;
712
713 /// Maps topological index to the node number.
714 std::vector<int> Index2Node;
715 /// Maps the node number to its topological index.
716 std::vector<int> Node2Index;
717 /// a set of nodes visited during a DFS traversal.
718 BitVector Visited;
719
720 /// Makes a DFS traversal and mark all nodes affected by the edge insertion.
721 /// These nodes will later get new topological indexes by means of the Shift
722 /// method.
723 void DFS(const SUnit *SU, int UpperBound, bool& HasLoop);
724
725 /// Reassigns topological indexes for the nodes in the DAG to
726 /// preserve the topological ordering.
727 void Shift(BitVector& Visited, int LowerBound, int UpperBound);
728
729 /// Assigns the topological index to the node n.
730 void Allocate(int n, int index);
731
732 /// Fix the ordering, by either recomputing from scratch or by applying
733 /// any outstanding updates. Uses a heuristic to estimate what will be
734 /// cheaper.
735 void FixOrder();
736
737 public:
738 ScheduleDAGTopologicalSort(std::vector<SUnit> &SUnits, SUnit *ExitSU);
739
740 /// Add a SUnit without predecessors to the end of the topological order. It
741 /// also must be the first new node added to the DAG.
742 void AddSUnitWithoutPredecessors(const SUnit *SU);
743
744 /// Creates the initial topological ordering from the DAG to be scheduled.
745 void InitDAGTopologicalSorting();
746
747 /// Returns an array of SUs that are both in the successor
748 /// subtree of StartSU and in the predecessor subtree of TargetSU.
749 /// StartSU and TargetSU are not in the array.
750 /// Success is false if TargetSU is not in the successor subtree of
751 /// StartSU, else it is true.
752 std::vector<int> GetSubGraph(const SUnit &StartSU, const SUnit &TargetSU,
753 bool &Success);
754
755 /// Checks if \p SU is reachable from \p TargetSU.
756 bool IsReachable(const SUnit *SU, const SUnit *TargetSU);
757
758 /// Returns true if addPred(TargetSU, SU) creates a cycle.
759 bool WillCreateCycle(SUnit *TargetSU, SUnit *SU);
760
761 /// Updates the topological ordering to accommodate an edge to be
762 /// added from SUnit \p X to SUnit \p Y.
763 void AddPred(SUnit *Y, SUnit *X);
764
765 /// Queues an update to the topological ordering to accommodate an edge to
766 /// be added from SUnit \p X to SUnit \p Y.
767 void AddPredQueued(SUnit *Y, SUnit *X);
768
769 /// Updates the topological ordering to accommodate an edge to be
770 /// removed from the specified node \p N from the predecessors of the
771 /// current node \p M.
772 void RemovePred(SUnit *M, SUnit *N);
773
774 /// Mark the ordering as temporarily broken, after a new node has been
775 /// added.
776 void MarkDirty() { Dirty = true; }
777
778 typedef std::vector<int>::iterator iterator;
779 typedef std::vector<int>::const_iterator const_iterator;
780 iterator begin() { return Index2Node.begin(); }
781 const_iterator begin() const { return Index2Node.begin(); }
782 iterator end() { return Index2Node.end(); }
783 const_iterator end() const { return Index2Node.end(); }
784
785 typedef std::vector<int>::reverse_iterator reverse_iterator;
786 typedef std::vector<int>::const_reverse_iterator const_reverse_iterator;
787 reverse_iterator rbegin() { return Index2Node.rbegin(); }
788 const_reverse_iterator rbegin() const { return Index2Node.rbegin(); }
789 reverse_iterator rend() { return Index2Node.rend(); }
790 const_reverse_iterator rend() const { return Index2Node.rend(); }
791 };
792
793} // end namespace llvm
794
795#endif // LLVM_CODEGEN_SCHEDULEDAG_H
796

source code of llvm/include/llvm/CodeGen/ScheduleDAG.h