1//===- MCSchedule.cpp - Scheduling ------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the default scheduling model.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/MC/MCSchedule.h"
14#include "llvm/MC/MCInst.h"
15#include "llvm/MC/MCInstrDesc.h"
16#include "llvm/MC/MCInstrInfo.h"
17#include "llvm/MC/MCSubtargetInfo.h"
18#include <optional>
19#include <type_traits>
20
21using namespace llvm;
22
23static_assert(std::is_trivial_v<MCSchedModel>,
24 "MCSchedModel is required to be a trivial type");
25const MCSchedModel MCSchedModel::Default = {.IssueWidth: DefaultIssueWidth,
26 .MicroOpBufferSize: DefaultMicroOpBufferSize,
27 .LoopMicroOpBufferSize: DefaultLoopMicroOpBufferSize,
28 .LoadLatency: DefaultLoadLatency,
29 .HighLatency: DefaultHighLatency,
30 .MispredictPenalty: DefaultMispredictPenalty,
31 .PostRAScheduler: false,
32 .CompleteModel: true,
33 /*EnableIntervals=*/false,
34 .ProcID: 0,
35 .ProcResourceTable: nullptr,
36 .SchedClassTable: nullptr,
37 .NumProcResourceKinds: 0,
38 .NumSchedClasses: 0,
39 .InstrItineraries: nullptr,
40 .ExtraProcessorInfo: nullptr};
41
42int MCSchedModel::computeInstrLatency(const MCSubtargetInfo &STI,
43 const MCSchedClassDesc &SCDesc) {
44 int Latency = 0;
45 for (unsigned DefIdx = 0, DefEnd = SCDesc.NumWriteLatencyEntries;
46 DefIdx != DefEnd; ++DefIdx) {
47 // Lookup the definition's write latency in SubtargetInfo.
48 const MCWriteLatencyEntry *WLEntry =
49 STI.getWriteLatencyEntry(SC: &SCDesc, DefIdx);
50 // Early exit if we found an invalid latency.
51 if (WLEntry->Cycles < 0)
52 return WLEntry->Cycles;
53 Latency = std::max(a: Latency, b: static_cast<int>(WLEntry->Cycles));
54 }
55 return Latency;
56}
57
58int MCSchedModel::computeInstrLatency(const MCSubtargetInfo &STI,
59 unsigned SchedClass) const {
60 const MCSchedClassDesc &SCDesc = *getSchedClassDesc(SchedClassIdx: SchedClass);
61 if (!SCDesc.isValid())
62 return 0;
63 if (!SCDesc.isVariant())
64 return MCSchedModel::computeInstrLatency(STI, SCDesc);
65
66 llvm_unreachable("unsupported variant scheduling class");
67}
68
69int MCSchedModel::computeInstrLatency(const MCSubtargetInfo &STI,
70 const MCInstrInfo &MCII,
71 const MCInst &Inst) const {
72 unsigned SchedClass = MCII.get(Opcode: Inst.getOpcode()).getSchedClass();
73 const MCSchedClassDesc *SCDesc = getSchedClassDesc(SchedClassIdx: SchedClass);
74 if (!SCDesc->isValid())
75 return 0;
76
77 unsigned CPUID = getProcessorID();
78 while (SCDesc->isVariant()) {
79 SchedClass = STI.resolveVariantSchedClass(SchedClass, MI: &Inst, MCII: &MCII, CPUID);
80 SCDesc = getSchedClassDesc(SchedClassIdx: SchedClass);
81 }
82
83 if (SchedClass)
84 return MCSchedModel::computeInstrLatency(STI, SCDesc: *SCDesc);
85
86 llvm_unreachable("unsupported variant scheduling class");
87}
88
89double
90MCSchedModel::getReciprocalThroughput(const MCSubtargetInfo &STI,
91 const MCSchedClassDesc &SCDesc) {
92 std::optional<double> Throughput;
93 const MCSchedModel &SM = STI.getSchedModel();
94 const MCWriteProcResEntry *I = STI.getWriteProcResBegin(SC: &SCDesc);
95 const MCWriteProcResEntry *E = STI.getWriteProcResEnd(SC: &SCDesc);
96 for (; I != E; ++I) {
97 if (!I->ReleaseAtCycle)
98 continue;
99 unsigned NumUnits = SM.getProcResource(ProcResourceIdx: I->ProcResourceIdx)->NumUnits;
100 double Temp = NumUnits * 1.0 / I->ReleaseAtCycle;
101 Throughput = Throughput ? std::min(a: *Throughput, b: Temp) : Temp;
102 }
103 if (Throughput)
104 return 1.0 / *Throughput;
105
106 // If no throughput value was calculated, assume that we can execute at the
107 // maximum issue width scaled by number of micro-ops for the schedule class.
108 return ((double)SCDesc.NumMicroOps) / SM.IssueWidth;
109}
110
111double
112MCSchedModel::getReciprocalThroughput(const MCSubtargetInfo &STI,
113 const MCInstrInfo &MCII,
114 const MCInst &Inst) const {
115 unsigned SchedClass = MCII.get(Opcode: Inst.getOpcode()).getSchedClass();
116 const MCSchedClassDesc *SCDesc = getSchedClassDesc(SchedClassIdx: SchedClass);
117
118 // If there's no valid class, assume that the instruction executes/completes
119 // at the maximum issue width.
120 if (!SCDesc->isValid())
121 return 1.0 / IssueWidth;
122
123 unsigned CPUID = getProcessorID();
124 while (SCDesc->isVariant()) {
125 SchedClass = STI.resolveVariantSchedClass(SchedClass, MI: &Inst, MCII: &MCII, CPUID);
126 SCDesc = getSchedClassDesc(SchedClassIdx: SchedClass);
127 }
128
129 if (SchedClass)
130 return MCSchedModel::getReciprocalThroughput(STI, SCDesc: *SCDesc);
131
132 llvm_unreachable("unsupported variant scheduling class");
133}
134
135double
136MCSchedModel::getReciprocalThroughput(unsigned SchedClass,
137 const InstrItineraryData &IID) {
138 std::optional<double> Throughput;
139 const InstrStage *I = IID.beginStage(ItinClassIndx: SchedClass);
140 const InstrStage *E = IID.endStage(ItinClassIndx: SchedClass);
141 for (; I != E; ++I) {
142 if (!I->getCycles())
143 continue;
144 double Temp = llvm::popcount(Value: I->getUnits()) * 1.0 / I->getCycles();
145 Throughput = Throughput ? std::min(a: *Throughput, b: Temp) : Temp;
146 }
147 if (Throughput)
148 return 1.0 / *Throughput;
149
150 // If there are no execution resources specified for this class, then assume
151 // that it can execute at the maximum default issue width.
152 return 1.0 / DefaultIssueWidth;
153}
154
155unsigned
156MCSchedModel::getForwardingDelayCycles(ArrayRef<MCReadAdvanceEntry> Entries,
157 unsigned WriteResourceID) {
158 if (Entries.empty())
159 return 0;
160
161 int DelayCycles = 0;
162 for (const MCReadAdvanceEntry &E : Entries) {
163 if (E.WriteResourceID != WriteResourceID)
164 continue;
165 DelayCycles = std::min(a: DelayCycles, b: E.Cycles);
166 }
167
168 return std::abs(x: DelayCycles);
169}
170

source code of llvm/lib/MC/MCSchedule.cpp