1//===- InlineOrder.cpp - Inlining order abstraction -*- C++ ---*-----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "llvm/Analysis/InlineOrder.h"
10#include "llvm/Analysis/AssumptionCache.h"
11#include "llvm/Analysis/BlockFrequencyInfo.h"
12#include "llvm/Analysis/GlobalsModRef.h"
13#include "llvm/Analysis/InlineAdvisor.h"
14#include "llvm/Analysis/InlineCost.h"
15#include "llvm/Analysis/OptimizationRemarkEmitter.h"
16#include "llvm/Analysis/ProfileSummaryInfo.h"
17#include "llvm/Analysis/TargetLibraryInfo.h"
18#include "llvm/Analysis/TargetTransformInfo.h"
19#include "llvm/Support/CommandLine.h"
20
21using namespace llvm;
22
23#define DEBUG_TYPE "inline-order"
24
25enum class InlinePriorityMode : int { Size, Cost, CostBenefit, ML };
26
27static cl::opt<InlinePriorityMode> UseInlinePriority(
28 "inline-priority-mode", cl::init(Val: InlinePriorityMode::Size), cl::Hidden,
29 cl::desc("Choose the priority mode to use in module inline"),
30 cl::values(clEnumValN(InlinePriorityMode::Size, "size",
31 "Use callee size priority."),
32 clEnumValN(InlinePriorityMode::Cost, "cost",
33 "Use inline cost priority."),
34 clEnumValN(InlinePriorityMode::CostBenefit, "cost-benefit",
35 "Use cost-benefit ratio."),
36 clEnumValN(InlinePriorityMode::ML, "ml", "Use ML.")));
37
38static cl::opt<int> ModuleInlinerTopPriorityThreshold(
39 "module-inliner-top-priority-threshold", cl::Hidden, cl::init(Val: 0),
40 cl::desc("The cost threshold for call sites that get inlined without the "
41 "cost-benefit analysis"));
42
43namespace {
44
45llvm::InlineCost getInlineCostWrapper(CallBase &CB,
46 FunctionAnalysisManager &FAM,
47 const InlineParams &Params) {
48 Function &Caller = *CB.getCaller();
49 ProfileSummaryInfo *PSI =
50 FAM.getResult<ModuleAnalysisManagerFunctionProxy>(IR&: Caller)
51 .getCachedResult<ProfileSummaryAnalysis>(
52 IR&: *CB.getParent()->getParent()->getParent());
53
54 auto &ORE = FAM.getResult<OptimizationRemarkEmitterAnalysis>(IR&: Caller);
55 auto GetAssumptionCache = [&](Function &F) -> AssumptionCache & {
56 return FAM.getResult<AssumptionAnalysis>(IR&: F);
57 };
58 auto GetBFI = [&](Function &F) -> BlockFrequencyInfo & {
59 return FAM.getResult<BlockFrequencyAnalysis>(IR&: F);
60 };
61 auto GetTLI = [&](Function &F) -> const TargetLibraryInfo & {
62 return FAM.getResult<TargetLibraryAnalysis>(IR&: F);
63 };
64
65 Function &Callee = *CB.getCalledFunction();
66 auto &CalleeTTI = FAM.getResult<TargetIRAnalysis>(IR&: Callee);
67 bool RemarksEnabled =
68 Callee.getContext().getDiagHandlerPtr()->isMissedOptRemarkEnabled(
69 DEBUG_TYPE);
70 return getInlineCost(Call&: CB, Params, CalleeTTI, GetAssumptionCache, GetTLI,
71 GetBFI, PSI, ORE: RemarksEnabled ? &ORE : nullptr);
72}
73
74class SizePriority {
75public:
76 SizePriority() = default;
77 SizePriority(const CallBase *CB, FunctionAnalysisManager &,
78 const InlineParams &) {
79 Function *Callee = CB->getCalledFunction();
80 Size = Callee->getInstructionCount();
81 }
82
83 static bool isMoreDesirable(const SizePriority &P1, const SizePriority &P2) {
84 return P1.Size < P2.Size;
85 }
86
87private:
88 unsigned Size = UINT_MAX;
89};
90
91class CostPriority {
92public:
93 CostPriority() = default;
94 CostPriority(const CallBase *CB, FunctionAnalysisManager &FAM,
95 const InlineParams &Params) {
96 auto IC = getInlineCostWrapper(CB&: const_cast<CallBase &>(*CB), FAM, Params);
97 if (IC.isVariable())
98 Cost = IC.getCost();
99 else
100 Cost = IC.isNever() ? INT_MAX : INT_MIN;
101 }
102
103 static bool isMoreDesirable(const CostPriority &P1, const CostPriority &P2) {
104 return P1.Cost < P2.Cost;
105 }
106
107private:
108 int Cost = INT_MAX;
109};
110
111class CostBenefitPriority {
112public:
113 CostBenefitPriority() = default;
114 CostBenefitPriority(const CallBase *CB, FunctionAnalysisManager &FAM,
115 const InlineParams &Params) {
116 auto IC = getInlineCostWrapper(CB&: const_cast<CallBase &>(*CB), FAM, Params);
117 if (IC.isVariable())
118 Cost = IC.getCost();
119 else
120 Cost = IC.isNever() ? INT_MAX : INT_MIN;
121 StaticBonusApplied = IC.getStaticBonusApplied();
122 CostBenefit = IC.getCostBenefit();
123 }
124
125 static bool isMoreDesirable(const CostBenefitPriority &P1,
126 const CostBenefitPriority &P2) {
127 // We prioritize call sites in the dictionary order of the following
128 // priorities:
129 //
130 // 1. Those call sites that are expected to reduce the caller size when
131 // inlined. Within them, we prioritize those call sites with bigger
132 // reduction.
133 //
134 // 2. Those call sites that have gone through the cost-benefit analysis.
135 // Currently, they are limited to hot call sites. Within them, we
136 // prioritize those call sites with higher benefit-to-cost ratios.
137 //
138 // 3. Remaining call sites are prioritized according to their costs.
139
140 // We add back StaticBonusApplied to determine whether we expect the caller
141 // to shrink (even if we don't delete the callee).
142 bool P1ReducesCallerSize =
143 P1.Cost + P1.StaticBonusApplied < ModuleInlinerTopPriorityThreshold;
144 bool P2ReducesCallerSize =
145 P2.Cost + P2.StaticBonusApplied < ModuleInlinerTopPriorityThreshold;
146 if (P1ReducesCallerSize || P2ReducesCallerSize) {
147 // If one reduces the caller size while the other doesn't, then return
148 // true iff P1 reduces the caller size.
149 if (P1ReducesCallerSize != P2ReducesCallerSize)
150 return P1ReducesCallerSize;
151
152 // If they both reduce the caller size, pick the one with the smaller
153 // cost.
154 return P1.Cost < P2.Cost;
155 }
156
157 bool P1HasCB = P1.CostBenefit.has_value();
158 bool P2HasCB = P2.CostBenefit.has_value();
159 if (P1HasCB || P2HasCB) {
160 // If one has undergone the cost-benefit analysis while the other hasn't,
161 // then return true iff P1 has.
162 if (P1HasCB != P2HasCB)
163 return P1HasCB;
164
165 // If they have undergone the cost-benefit analysis, then pick the one
166 // with a higher benefit-to-cost ratio.
167 APInt LHS = P1.CostBenefit->getBenefit() * P2.CostBenefit->getCost();
168 APInt RHS = P2.CostBenefit->getBenefit() * P1.CostBenefit->getCost();
169 return LHS.ugt(RHS);
170 }
171
172 // Remaining call sites are ordered according to their costs.
173 return P1.Cost < P2.Cost;
174 }
175
176private:
177 int Cost = INT_MAX;
178 int StaticBonusApplied = 0;
179 std::optional<CostBenefitPair> CostBenefit;
180};
181
182class MLPriority {
183public:
184 MLPriority() = default;
185 MLPriority(const CallBase *CB, FunctionAnalysisManager &FAM,
186 const InlineParams &Params) {
187 auto IC = getInlineCostWrapper(CB&: const_cast<CallBase &>(*CB), FAM, Params);
188 if (IC.isVariable())
189 Cost = IC.getCost();
190 else
191 Cost = IC.isNever() ? INT_MAX : INT_MIN;
192 }
193
194 static bool isMoreDesirable(const MLPriority &P1, const MLPriority &P2) {
195 return P1.Cost < P2.Cost;
196 }
197
198private:
199 int Cost = INT_MAX;
200};
201
202template <typename PriorityT>
203class PriorityInlineOrder : public InlineOrder<std::pair<CallBase *, int>> {
204 using T = std::pair<CallBase *, int>;
205
206 bool hasLowerPriority(const CallBase *L, const CallBase *R) const {
207 const auto I1 = Priorities.find(L);
208 const auto I2 = Priorities.find(R);
209 assert(I1 != Priorities.end() && I2 != Priorities.end());
210 return PriorityT::isMoreDesirable(I2->second, I1->second);
211 }
212
213 bool updateAndCheckDecreased(const CallBase *CB) {
214 auto It = Priorities.find(CB);
215 const auto OldPriority = It->second;
216 It->second = PriorityT(CB, FAM, Params);
217 const auto NewPriority = It->second;
218 return PriorityT::isMoreDesirable(OldPriority, NewPriority);
219 }
220
221 // A call site could become less desirable for inlining because of the size
222 // growth from prior inlining into the callee. This method is used to lazily
223 // update the desirability of a call site if it's decreasing. It is only
224 // called on pop(), not every time the desirability changes. When the
225 // desirability of the front call site decreases, an updated one would be
226 // pushed right back into the heap. For simplicity, those cases where the
227 // desirability of a call site increases are ignored here.
228 void pop_heap_adjust() {
229 std::pop_heap(first: Heap.begin(), last: Heap.end(), comp: isLess);
230 while (updateAndCheckDecreased(CB: Heap.back())) {
231 std::push_heap(first: Heap.begin(), last: Heap.end(), comp: isLess);
232 std::pop_heap(first: Heap.begin(), last: Heap.end(), comp: isLess);
233 }
234 }
235
236public:
237 PriorityInlineOrder(FunctionAnalysisManager &FAM, const InlineParams &Params)
238 : FAM(FAM), Params(Params) {
239 isLess = [&](const CallBase *L, const CallBase *R) {
240 return hasLowerPriority(L, R);
241 };
242 }
243
244 size_t size() override { return Heap.size(); }
245
246 void push(const T &Elt) override {
247 CallBase *CB = Elt.first;
248 const int InlineHistoryID = Elt.second;
249
250 Heap.push_back(Elt: CB);
251 Priorities[CB] = PriorityT(CB, FAM, Params);
252 std::push_heap(first: Heap.begin(), last: Heap.end(), comp: isLess);
253 InlineHistoryMap[CB] = InlineHistoryID;
254 }
255
256 T pop() override {
257 assert(size() > 0);
258 pop_heap_adjust();
259
260 CallBase *CB = Heap.pop_back_val();
261 T Result = std::make_pair(x&: CB, y&: InlineHistoryMap[CB]);
262 InlineHistoryMap.erase(Val: CB);
263 return Result;
264 }
265
266 void erase_if(function_ref<bool(T)> Pred) override {
267 auto PredWrapper = [=](CallBase *CB) -> bool {
268 return Pred(std::make_pair(x&: CB, y&: InlineHistoryMap[CB]));
269 };
270 llvm::erase_if(Heap, PredWrapper);
271 std::make_heap(first: Heap.begin(), last: Heap.end(), comp: isLess);
272 }
273
274private:
275 SmallVector<CallBase *, 16> Heap;
276 std::function<bool(const CallBase *L, const CallBase *R)> isLess;
277 DenseMap<CallBase *, int> InlineHistoryMap;
278 DenseMap<const CallBase *, PriorityT> Priorities;
279 FunctionAnalysisManager &FAM;
280 const InlineParams &Params;
281};
282
283} // namespace
284
285AnalysisKey llvm::PluginInlineOrderAnalysis::Key;
286bool llvm::PluginInlineOrderAnalysis::HasBeenRegistered;
287
288std::unique_ptr<InlineOrder<std::pair<CallBase *, int>>>
289llvm::getDefaultInlineOrder(FunctionAnalysisManager &FAM,
290 const InlineParams &Params,
291 ModuleAnalysisManager &MAM, Module &M) {
292 switch (UseInlinePriority) {
293 case InlinePriorityMode::Size:
294 LLVM_DEBUG(dbgs() << " Current used priority: Size priority ---- \n");
295 return std::make_unique<PriorityInlineOrder<SizePriority>>(args&: FAM, args: Params);
296
297 case InlinePriorityMode::Cost:
298 LLVM_DEBUG(dbgs() << " Current used priority: Cost priority ---- \n");
299 return std::make_unique<PriorityInlineOrder<CostPriority>>(args&: FAM, args: Params);
300
301 case InlinePriorityMode::CostBenefit:
302 LLVM_DEBUG(
303 dbgs() << " Current used priority: cost-benefit priority ---- \n");
304 return std::make_unique<PriorityInlineOrder<CostBenefitPriority>>(args&: FAM,
305 args: Params);
306 case InlinePriorityMode::ML:
307 LLVM_DEBUG(dbgs() << " Current used priority: ML priority ---- \n");
308 return std::make_unique<PriorityInlineOrder<MLPriority>>(args&: FAM, args: Params);
309 }
310 return nullptr;
311}
312
313std::unique_ptr<InlineOrder<std::pair<CallBase *, int>>>
314llvm::getInlineOrder(FunctionAnalysisManager &FAM, const InlineParams &Params,
315 ModuleAnalysisManager &MAM, Module &M) {
316 if (llvm::PluginInlineOrderAnalysis::isRegistered()) {
317 LLVM_DEBUG(dbgs() << " Current used priority: plugin ---- \n");
318 return MAM.getResult<PluginInlineOrderAnalysis>(IR&: M).Factory(FAM, Params, MAM,
319 M);
320 }
321 return getDefaultInlineOrder(FAM, Params, MAM, M);
322}
323

source code of llvm/lib/Analysis/InlineOrder.cpp