1//===- CodeExtractor.cpp - Pull code region into a new function -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the interface to tear out a code region, such as an
10// individual loop or a parallel section, into a new function, replacing it with
11// a call to the new function.
12//
13//===----------------------------------------------------------------------===//
14
15#include "llvm/Transforms/Utils/CodeExtractor.h"
16#include "llvm/ADT/ArrayRef.h"
17#include "llvm/ADT/DenseMap.h"
18#include "llvm/ADT/STLExtras.h"
19#include "llvm/ADT/SetVector.h"
20#include "llvm/ADT/SmallPtrSet.h"
21#include "llvm/ADT/SmallVector.h"
22#include "llvm/Analysis/AssumptionCache.h"
23#include "llvm/Analysis/BlockFrequencyInfo.h"
24#include "llvm/Analysis/BlockFrequencyInfoImpl.h"
25#include "llvm/Analysis/BranchProbabilityInfo.h"
26#include "llvm/Analysis/LoopInfo.h"
27#include "llvm/IR/Argument.h"
28#include "llvm/IR/Attributes.h"
29#include "llvm/IR/BasicBlock.h"
30#include "llvm/IR/CFG.h"
31#include "llvm/IR/Constant.h"
32#include "llvm/IR/Constants.h"
33#include "llvm/IR/DIBuilder.h"
34#include "llvm/IR/DataLayout.h"
35#include "llvm/IR/DebugInfo.h"
36#include "llvm/IR/DebugInfoMetadata.h"
37#include "llvm/IR/DerivedTypes.h"
38#include "llvm/IR/Dominators.h"
39#include "llvm/IR/Function.h"
40#include "llvm/IR/GlobalValue.h"
41#include "llvm/IR/InstIterator.h"
42#include "llvm/IR/InstrTypes.h"
43#include "llvm/IR/Instruction.h"
44#include "llvm/IR/Instructions.h"
45#include "llvm/IR/IntrinsicInst.h"
46#include "llvm/IR/Intrinsics.h"
47#include "llvm/IR/LLVMContext.h"
48#include "llvm/IR/MDBuilder.h"
49#include "llvm/IR/Module.h"
50#include "llvm/IR/PatternMatch.h"
51#include "llvm/IR/Type.h"
52#include "llvm/IR/User.h"
53#include "llvm/IR/Value.h"
54#include "llvm/IR/Verifier.h"
55#include "llvm/Support/BlockFrequency.h"
56#include "llvm/Support/BranchProbability.h"
57#include "llvm/Support/Casting.h"
58#include "llvm/Support/CommandLine.h"
59#include "llvm/Support/Debug.h"
60#include "llvm/Support/ErrorHandling.h"
61#include "llvm/Support/raw_ostream.h"
62#include "llvm/Transforms/Utils/BasicBlockUtils.h"
63#include <cassert>
64#include <cstdint>
65#include <iterator>
66#include <map>
67#include <utility>
68#include <vector>
69
70using namespace llvm;
71using namespace llvm::PatternMatch;
72using ProfileCount = Function::ProfileCount;
73
74#define DEBUG_TYPE "code-extractor"
75
76// Provide a command-line option to aggregate function arguments into a struct
77// for functions produced by the code extractor. This is useful when converting
78// extracted functions to pthread-based code, as only one argument (void*) can
79// be passed in to pthread_create().
80static cl::opt<bool>
81AggregateArgsOpt("aggregate-extracted-args", cl::Hidden,
82 cl::desc("Aggregate arguments to code-extracted functions"));
83
84/// Test whether a block is valid for extraction.
85static bool isBlockValidForExtraction(const BasicBlock &BB,
86 const SetVector<BasicBlock *> &Result,
87 bool AllowVarArgs, bool AllowAlloca) {
88 // taking the address of a basic block moved to another function is illegal
89 if (BB.hasAddressTaken())
90 return false;
91
92 // don't hoist code that uses another basicblock address, as it's likely to
93 // lead to unexpected behavior, like cross-function jumps
94 SmallPtrSet<User const *, 16> Visited;
95 SmallVector<User const *, 16> ToVisit;
96
97 for (Instruction const &Inst : BB)
98 ToVisit.push_back(Elt: &Inst);
99
100 while (!ToVisit.empty()) {
101 User const *Curr = ToVisit.pop_back_val();
102 if (!Visited.insert(Ptr: Curr).second)
103 continue;
104 if (isa<BlockAddress const>(Val: Curr))
105 return false; // even a reference to self is likely to be not compatible
106
107 if (isa<Instruction>(Val: Curr) && cast<Instruction>(Val: Curr)->getParent() != &BB)
108 continue;
109
110 for (auto const &U : Curr->operands()) {
111 if (auto *UU = dyn_cast<User>(Val: U))
112 ToVisit.push_back(Elt: UU);
113 }
114 }
115
116 // If explicitly requested, allow vastart and alloca. For invoke instructions
117 // verify that extraction is valid.
118 for (BasicBlock::const_iterator I = BB.begin(), E = BB.end(); I != E; ++I) {
119 if (isa<AllocaInst>(Val: I)) {
120 if (!AllowAlloca)
121 return false;
122 continue;
123 }
124
125 if (const auto *II = dyn_cast<InvokeInst>(Val&: I)) {
126 // Unwind destination (either a landingpad, catchswitch, or cleanuppad)
127 // must be a part of the subgraph which is being extracted.
128 if (auto *UBB = II->getUnwindDest())
129 if (!Result.count(key: UBB))
130 return false;
131 continue;
132 }
133
134 // All catch handlers of a catchswitch instruction as well as the unwind
135 // destination must be in the subgraph.
136 if (const auto *CSI = dyn_cast<CatchSwitchInst>(Val&: I)) {
137 if (auto *UBB = CSI->getUnwindDest())
138 if (!Result.count(key: UBB))
139 return false;
140 for (const auto *HBB : CSI->handlers())
141 if (!Result.count(key: const_cast<BasicBlock*>(HBB)))
142 return false;
143 continue;
144 }
145
146 // Make sure that entire catch handler is within subgraph. It is sufficient
147 // to check that catch return's block is in the list.
148 if (const auto *CPI = dyn_cast<CatchPadInst>(Val&: I)) {
149 for (const auto *U : CPI->users())
150 if (const auto *CRI = dyn_cast<CatchReturnInst>(Val: U))
151 if (!Result.count(key: const_cast<BasicBlock*>(CRI->getParent())))
152 return false;
153 continue;
154 }
155
156 // And do similar checks for cleanup handler - the entire handler must be
157 // in subgraph which is going to be extracted. For cleanup return should
158 // additionally check that the unwind destination is also in the subgraph.
159 if (const auto *CPI = dyn_cast<CleanupPadInst>(Val&: I)) {
160 for (const auto *U : CPI->users())
161 if (const auto *CRI = dyn_cast<CleanupReturnInst>(Val: U))
162 if (!Result.count(key: const_cast<BasicBlock*>(CRI->getParent())))
163 return false;
164 continue;
165 }
166 if (const auto *CRI = dyn_cast<CleanupReturnInst>(Val&: I)) {
167 if (auto *UBB = CRI->getUnwindDest())
168 if (!Result.count(key: UBB))
169 return false;
170 continue;
171 }
172
173 if (const CallInst *CI = dyn_cast<CallInst>(Val&: I)) {
174 if (const Function *F = CI->getCalledFunction()) {
175 auto IID = F->getIntrinsicID();
176 if (IID == Intrinsic::vastart) {
177 if (AllowVarArgs)
178 continue;
179 else
180 return false;
181 }
182
183 // Currently, we miscompile outlined copies of eh_typid_for. There are
184 // proposals for fixing this in llvm.org/PR39545.
185 if (IID == Intrinsic::eh_typeid_for)
186 return false;
187 }
188 }
189 }
190
191 return true;
192}
193
194/// Build a set of blocks to extract if the input blocks are viable.
195static SetVector<BasicBlock *>
196buildExtractionBlockSet(ArrayRef<BasicBlock *> BBs, DominatorTree *DT,
197 bool AllowVarArgs, bool AllowAlloca) {
198 assert(!BBs.empty() && "The set of blocks to extract must be non-empty");
199 SetVector<BasicBlock *> Result;
200
201 // Loop over the blocks, adding them to our set-vector, and aborting with an
202 // empty set if we encounter invalid blocks.
203 for (BasicBlock *BB : BBs) {
204 // If this block is dead, don't process it.
205 if (DT && !DT->isReachableFromEntry(A: BB))
206 continue;
207
208 if (!Result.insert(X: BB))
209 llvm_unreachable("Repeated basic blocks in extraction input");
210 }
211
212 LLVM_DEBUG(dbgs() << "Region front block: " << Result.front()->getName()
213 << '\n');
214
215 for (auto *BB : Result) {
216 if (!isBlockValidForExtraction(BB: *BB, Result, AllowVarArgs, AllowAlloca))
217 return {};
218
219 // Make sure that the first block is not a landing pad.
220 if (BB == Result.front()) {
221 if (BB->isEHPad()) {
222 LLVM_DEBUG(dbgs() << "The first block cannot be an unwind block\n");
223 return {};
224 }
225 continue;
226 }
227
228 // All blocks other than the first must not have predecessors outside of
229 // the subgraph which is being extracted.
230 for (auto *PBB : predecessors(BB))
231 if (!Result.count(key: PBB)) {
232 LLVM_DEBUG(dbgs() << "No blocks in this region may have entries from "
233 "outside the region except for the first block!\n"
234 << "Problematic source BB: " << BB->getName() << "\n"
235 << "Problematic destination BB: " << PBB->getName()
236 << "\n");
237 return {};
238 }
239 }
240
241 return Result;
242}
243
244CodeExtractor::CodeExtractor(ArrayRef<BasicBlock *> BBs, DominatorTree *DT,
245 bool AggregateArgs, BlockFrequencyInfo *BFI,
246 BranchProbabilityInfo *BPI, AssumptionCache *AC,
247 bool AllowVarArgs, bool AllowAlloca,
248 BasicBlock *AllocationBlock, std::string Suffix,
249 bool ArgsInZeroAddressSpace)
250 : DT(DT), AggregateArgs(AggregateArgs || AggregateArgsOpt), BFI(BFI),
251 BPI(BPI), AC(AC), AllocationBlock(AllocationBlock),
252 AllowVarArgs(AllowVarArgs),
253 Blocks(buildExtractionBlockSet(BBs, DT, AllowVarArgs, AllowAlloca)),
254 Suffix(Suffix), ArgsInZeroAddressSpace(ArgsInZeroAddressSpace) {}
255
256CodeExtractor::CodeExtractor(DominatorTree &DT, Loop &L, bool AggregateArgs,
257 BlockFrequencyInfo *BFI,
258 BranchProbabilityInfo *BPI, AssumptionCache *AC,
259 std::string Suffix)
260 : DT(&DT), AggregateArgs(AggregateArgs || AggregateArgsOpt), BFI(BFI),
261 BPI(BPI), AC(AC), AllocationBlock(nullptr), AllowVarArgs(false),
262 Blocks(buildExtractionBlockSet(BBs: L.getBlocks(), DT: &DT,
263 /* AllowVarArgs */ false,
264 /* AllowAlloca */ false)),
265 Suffix(Suffix) {}
266
267/// definedInRegion - Return true if the specified value is defined in the
268/// extracted region.
269static bool definedInRegion(const SetVector<BasicBlock *> &Blocks, Value *V) {
270 if (Instruction *I = dyn_cast<Instruction>(Val: V))
271 if (Blocks.count(key: I->getParent()))
272 return true;
273 return false;
274}
275
276/// definedInCaller - Return true if the specified value is defined in the
277/// function being code extracted, but not in the region being extracted.
278/// These values must be passed in as live-ins to the function.
279static bool definedInCaller(const SetVector<BasicBlock *> &Blocks, Value *V) {
280 if (isa<Argument>(Val: V)) return true;
281 if (Instruction *I = dyn_cast<Instruction>(Val: V))
282 if (!Blocks.count(key: I->getParent()))
283 return true;
284 return false;
285}
286
287static BasicBlock *getCommonExitBlock(const SetVector<BasicBlock *> &Blocks) {
288 BasicBlock *CommonExitBlock = nullptr;
289 auto hasNonCommonExitSucc = [&](BasicBlock *Block) {
290 for (auto *Succ : successors(BB: Block)) {
291 // Internal edges, ok.
292 if (Blocks.count(key: Succ))
293 continue;
294 if (!CommonExitBlock) {
295 CommonExitBlock = Succ;
296 continue;
297 }
298 if (CommonExitBlock != Succ)
299 return true;
300 }
301 return false;
302 };
303
304 if (any_of(Range: Blocks, P: hasNonCommonExitSucc))
305 return nullptr;
306
307 return CommonExitBlock;
308}
309
310CodeExtractorAnalysisCache::CodeExtractorAnalysisCache(Function &F) {
311 for (BasicBlock &BB : F) {
312 for (Instruction &II : BB.instructionsWithoutDebug())
313 if (auto *AI = dyn_cast<AllocaInst>(Val: &II))
314 Allocas.push_back(Elt: AI);
315
316 findSideEffectInfoForBlock(BB);
317 }
318}
319
320void CodeExtractorAnalysisCache::findSideEffectInfoForBlock(BasicBlock &BB) {
321 for (Instruction &II : BB.instructionsWithoutDebug()) {
322 unsigned Opcode = II.getOpcode();
323 Value *MemAddr = nullptr;
324 switch (Opcode) {
325 case Instruction::Store:
326 case Instruction::Load: {
327 if (Opcode == Instruction::Store) {
328 StoreInst *SI = cast<StoreInst>(Val: &II);
329 MemAddr = SI->getPointerOperand();
330 } else {
331 LoadInst *LI = cast<LoadInst>(Val: &II);
332 MemAddr = LI->getPointerOperand();
333 }
334 // Global variable can not be aliased with locals.
335 if (isa<Constant>(Val: MemAddr))
336 break;
337 Value *Base = MemAddr->stripInBoundsConstantOffsets();
338 if (!isa<AllocaInst>(Val: Base)) {
339 SideEffectingBlocks.insert(V: &BB);
340 return;
341 }
342 BaseMemAddrs[&BB].insert(V: Base);
343 break;
344 }
345 default: {
346 IntrinsicInst *IntrInst = dyn_cast<IntrinsicInst>(Val: &II);
347 if (IntrInst) {
348 if (IntrInst->isLifetimeStartOrEnd())
349 break;
350 SideEffectingBlocks.insert(V: &BB);
351 return;
352 }
353 // Treat all the other cases conservatively if it has side effects.
354 if (II.mayHaveSideEffects()) {
355 SideEffectingBlocks.insert(V: &BB);
356 return;
357 }
358 }
359 }
360 }
361}
362
363bool CodeExtractorAnalysisCache::doesBlockContainClobberOfAddr(
364 BasicBlock &BB, AllocaInst *Addr) const {
365 if (SideEffectingBlocks.count(V: &BB))
366 return true;
367 auto It = BaseMemAddrs.find(Val: &BB);
368 if (It != BaseMemAddrs.end())
369 return It->second.count(V: Addr);
370 return false;
371}
372
373bool CodeExtractor::isLegalToShrinkwrapLifetimeMarkers(
374 const CodeExtractorAnalysisCache &CEAC, Instruction *Addr) const {
375 AllocaInst *AI = cast<AllocaInst>(Val: Addr->stripInBoundsConstantOffsets());
376 Function *Func = (*Blocks.begin())->getParent();
377 for (BasicBlock &BB : *Func) {
378 if (Blocks.count(key: &BB))
379 continue;
380 if (CEAC.doesBlockContainClobberOfAddr(BB, Addr: AI))
381 return false;
382 }
383 return true;
384}
385
386BasicBlock *
387CodeExtractor::findOrCreateBlockForHoisting(BasicBlock *CommonExitBlock) {
388 BasicBlock *SinglePredFromOutlineRegion = nullptr;
389 assert(!Blocks.count(CommonExitBlock) &&
390 "Expect a block outside the region!");
391 for (auto *Pred : predecessors(BB: CommonExitBlock)) {
392 if (!Blocks.count(key: Pred))
393 continue;
394 if (!SinglePredFromOutlineRegion) {
395 SinglePredFromOutlineRegion = Pred;
396 } else if (SinglePredFromOutlineRegion != Pred) {
397 SinglePredFromOutlineRegion = nullptr;
398 break;
399 }
400 }
401
402 if (SinglePredFromOutlineRegion)
403 return SinglePredFromOutlineRegion;
404
405#ifndef NDEBUG
406 auto getFirstPHI = [](BasicBlock *BB) {
407 BasicBlock::iterator I = BB->begin();
408 PHINode *FirstPhi = nullptr;
409 while (I != BB->end()) {
410 PHINode *Phi = dyn_cast<PHINode>(Val&: I);
411 if (!Phi)
412 break;
413 if (!FirstPhi) {
414 FirstPhi = Phi;
415 break;
416 }
417 }
418 return FirstPhi;
419 };
420 // If there are any phi nodes, the single pred either exists or has already
421 // be created before code extraction.
422 assert(!getFirstPHI(CommonExitBlock) && "Phi not expected");
423#endif
424
425 BasicBlock *NewExitBlock = CommonExitBlock->splitBasicBlock(
426 I: CommonExitBlock->getFirstNonPHI()->getIterator());
427
428 for (BasicBlock *Pred :
429 llvm::make_early_inc_range(Range: predecessors(BB: CommonExitBlock))) {
430 if (Blocks.count(key: Pred))
431 continue;
432 Pred->getTerminator()->replaceUsesOfWith(From: CommonExitBlock, To: NewExitBlock);
433 }
434 // Now add the old exit block to the outline region.
435 Blocks.insert(X: CommonExitBlock);
436 OldTargets.push_back(Elt: NewExitBlock);
437 return CommonExitBlock;
438}
439
440// Find the pair of life time markers for address 'Addr' that are either
441// defined inside the outline region or can legally be shrinkwrapped into the
442// outline region. If there are not other untracked uses of the address, return
443// the pair of markers if found; otherwise return a pair of nullptr.
444CodeExtractor::LifetimeMarkerInfo
445CodeExtractor::getLifetimeMarkers(const CodeExtractorAnalysisCache &CEAC,
446 Instruction *Addr,
447 BasicBlock *ExitBlock) const {
448 LifetimeMarkerInfo Info;
449
450 for (User *U : Addr->users()) {
451 IntrinsicInst *IntrInst = dyn_cast<IntrinsicInst>(Val: U);
452 if (IntrInst) {
453 // We don't model addresses with multiple start/end markers, but the
454 // markers do not need to be in the region.
455 if (IntrInst->getIntrinsicID() == Intrinsic::lifetime_start) {
456 if (Info.LifeStart)
457 return {};
458 Info.LifeStart = IntrInst;
459 continue;
460 }
461 if (IntrInst->getIntrinsicID() == Intrinsic::lifetime_end) {
462 if (Info.LifeEnd)
463 return {};
464 Info.LifeEnd = IntrInst;
465 continue;
466 }
467 // At this point, permit debug uses outside of the region.
468 // This is fixed in a later call to fixupDebugInfoPostExtraction().
469 if (isa<DbgInfoIntrinsic>(Val: IntrInst))
470 continue;
471 }
472 // Find untracked uses of the address, bail.
473 if (!definedInRegion(Blocks, V: U))
474 return {};
475 }
476
477 if (!Info.LifeStart || !Info.LifeEnd)
478 return {};
479
480 Info.SinkLifeStart = !definedInRegion(Blocks, V: Info.LifeStart);
481 Info.HoistLifeEnd = !definedInRegion(Blocks, V: Info.LifeEnd);
482 // Do legality check.
483 if ((Info.SinkLifeStart || Info.HoistLifeEnd) &&
484 !isLegalToShrinkwrapLifetimeMarkers(CEAC, Addr))
485 return {};
486
487 // Check to see if we have a place to do hoisting, if not, bail.
488 if (Info.HoistLifeEnd && !ExitBlock)
489 return {};
490
491 return Info;
492}
493
494void CodeExtractor::findAllocas(const CodeExtractorAnalysisCache &CEAC,
495 ValueSet &SinkCands, ValueSet &HoistCands,
496 BasicBlock *&ExitBlock) const {
497 Function *Func = (*Blocks.begin())->getParent();
498 ExitBlock = getCommonExitBlock(Blocks);
499
500 auto moveOrIgnoreLifetimeMarkers =
501 [&](const LifetimeMarkerInfo &LMI) -> bool {
502 if (!LMI.LifeStart)
503 return false;
504 if (LMI.SinkLifeStart) {
505 LLVM_DEBUG(dbgs() << "Sinking lifetime.start: " << *LMI.LifeStart
506 << "\n");
507 SinkCands.insert(X: LMI.LifeStart);
508 }
509 if (LMI.HoistLifeEnd) {
510 LLVM_DEBUG(dbgs() << "Hoisting lifetime.end: " << *LMI.LifeEnd << "\n");
511 HoistCands.insert(X: LMI.LifeEnd);
512 }
513 return true;
514 };
515
516 // Look up allocas in the original function in CodeExtractorAnalysisCache, as
517 // this is much faster than walking all the instructions.
518 for (AllocaInst *AI : CEAC.getAllocas()) {
519 BasicBlock *BB = AI->getParent();
520 if (Blocks.count(key: BB))
521 continue;
522
523 // As a prior call to extractCodeRegion() may have shrinkwrapped the alloca,
524 // check whether it is actually still in the original function.
525 Function *AIFunc = BB->getParent();
526 if (AIFunc != Func)
527 continue;
528
529 LifetimeMarkerInfo MarkerInfo = getLifetimeMarkers(CEAC, Addr: AI, ExitBlock);
530 bool Moved = moveOrIgnoreLifetimeMarkers(MarkerInfo);
531 if (Moved) {
532 LLVM_DEBUG(dbgs() << "Sinking alloca: " << *AI << "\n");
533 SinkCands.insert(X: AI);
534 continue;
535 }
536
537 // Find bitcasts in the outlined region that have lifetime marker users
538 // outside that region. Replace the lifetime marker use with an
539 // outside region bitcast to avoid unnecessary alloca/reload instructions
540 // and extra lifetime markers.
541 SmallVector<Instruction *, 2> LifetimeBitcastUsers;
542 for (User *U : AI->users()) {
543 if (!definedInRegion(Blocks, V: U))
544 continue;
545
546 if (U->stripInBoundsConstantOffsets() != AI)
547 continue;
548
549 Instruction *Bitcast = cast<Instruction>(Val: U);
550 for (User *BU : Bitcast->users()) {
551 IntrinsicInst *IntrInst = dyn_cast<IntrinsicInst>(Val: BU);
552 if (!IntrInst)
553 continue;
554
555 if (!IntrInst->isLifetimeStartOrEnd())
556 continue;
557
558 if (definedInRegion(Blocks, V: IntrInst))
559 continue;
560
561 LLVM_DEBUG(dbgs() << "Replace use of extracted region bitcast"
562 << *Bitcast << " in out-of-region lifetime marker "
563 << *IntrInst << "\n");
564 LifetimeBitcastUsers.push_back(Elt: IntrInst);
565 }
566 }
567
568 for (Instruction *I : LifetimeBitcastUsers) {
569 Module *M = AIFunc->getParent();
570 LLVMContext &Ctx = M->getContext();
571 auto *Int8PtrTy = PointerType::getUnqual(C&: Ctx);
572 CastInst *CastI =
573 CastInst::CreatePointerCast(S: AI, Ty: Int8PtrTy, Name: "lt.cast", InsertBefore: I->getIterator());
574 I->replaceUsesOfWith(From: I->getOperand(i: 1), To: CastI);
575 }
576
577 // Follow any bitcasts.
578 SmallVector<Instruction *, 2> Bitcasts;
579 SmallVector<LifetimeMarkerInfo, 2> BitcastLifetimeInfo;
580 for (User *U : AI->users()) {
581 if (U->stripInBoundsConstantOffsets() == AI) {
582 Instruction *Bitcast = cast<Instruction>(Val: U);
583 LifetimeMarkerInfo LMI = getLifetimeMarkers(CEAC, Addr: Bitcast, ExitBlock);
584 if (LMI.LifeStart) {
585 Bitcasts.push_back(Elt: Bitcast);
586 BitcastLifetimeInfo.push_back(Elt: LMI);
587 continue;
588 }
589 }
590
591 // Found unknown use of AI.
592 if (!definedInRegion(Blocks, V: U)) {
593 Bitcasts.clear();
594 break;
595 }
596 }
597
598 // Either no bitcasts reference the alloca or there are unknown uses.
599 if (Bitcasts.empty())
600 continue;
601
602 LLVM_DEBUG(dbgs() << "Sinking alloca (via bitcast): " << *AI << "\n");
603 SinkCands.insert(X: AI);
604 for (unsigned I = 0, E = Bitcasts.size(); I != E; ++I) {
605 Instruction *BitcastAddr = Bitcasts[I];
606 const LifetimeMarkerInfo &LMI = BitcastLifetimeInfo[I];
607 assert(LMI.LifeStart &&
608 "Unsafe to sink bitcast without lifetime markers");
609 moveOrIgnoreLifetimeMarkers(LMI);
610 if (!definedInRegion(Blocks, V: BitcastAddr)) {
611 LLVM_DEBUG(dbgs() << "Sinking bitcast-of-alloca: " << *BitcastAddr
612 << "\n");
613 SinkCands.insert(X: BitcastAddr);
614 }
615 }
616 }
617}
618
619bool CodeExtractor::isEligible() const {
620 if (Blocks.empty())
621 return false;
622 BasicBlock *Header = *Blocks.begin();
623 Function *F = Header->getParent();
624
625 // For functions with varargs, check that varargs handling is only done in the
626 // outlined function, i.e vastart and vaend are only used in outlined blocks.
627 if (AllowVarArgs && F->getFunctionType()->isVarArg()) {
628 auto containsVarArgIntrinsic = [](const Instruction &I) {
629 if (const CallInst *CI = dyn_cast<CallInst>(&I))
630 if (const Function *Callee = CI->getCalledFunction())
631 return Callee->getIntrinsicID() == Intrinsic::vastart ||
632 Callee->getIntrinsicID() == Intrinsic::vaend;
633 return false;
634 };
635
636 for (auto &BB : *F) {
637 if (Blocks.count(key: &BB))
638 continue;
639 if (llvm::any_of(Range&: BB, P: containsVarArgIntrinsic))
640 return false;
641 }
642 }
643 return true;
644}
645
646void CodeExtractor::findInputsOutputs(ValueSet &Inputs, ValueSet &Outputs,
647 const ValueSet &SinkCands) const {
648 for (BasicBlock *BB : Blocks) {
649 // If a used value is defined outside the region, it's an input. If an
650 // instruction is used outside the region, it's an output.
651 for (Instruction &II : *BB) {
652 for (auto &OI : II.operands()) {
653 Value *V = OI;
654 if (!SinkCands.count(key: V) && definedInCaller(Blocks, V))
655 Inputs.insert(X: V);
656 }
657
658 for (User *U : II.users())
659 if (!definedInRegion(Blocks, V: U)) {
660 Outputs.insert(X: &II);
661 break;
662 }
663 }
664 }
665}
666
667/// severSplitPHINodesOfEntry - If a PHI node has multiple inputs from outside
668/// of the region, we need to split the entry block of the region so that the
669/// PHI node is easier to deal with.
670void CodeExtractor::severSplitPHINodesOfEntry(BasicBlock *&Header) {
671 unsigned NumPredsFromRegion = 0;
672 unsigned NumPredsOutsideRegion = 0;
673
674 if (Header != &Header->getParent()->getEntryBlock()) {
675 PHINode *PN = dyn_cast<PHINode>(Val: Header->begin());
676 if (!PN) return; // No PHI nodes.
677
678 // If the header node contains any PHI nodes, check to see if there is more
679 // than one entry from outside the region. If so, we need to sever the
680 // header block into two.
681 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
682 if (Blocks.count(key: PN->getIncomingBlock(i)))
683 ++NumPredsFromRegion;
684 else
685 ++NumPredsOutsideRegion;
686
687 // If there is one (or fewer) predecessor from outside the region, we don't
688 // need to do anything special.
689 if (NumPredsOutsideRegion <= 1) return;
690 }
691
692 // Otherwise, we need to split the header block into two pieces: one
693 // containing PHI nodes merging values from outside of the region, and a
694 // second that contains all of the code for the block and merges back any
695 // incoming values from inside of the region.
696 BasicBlock *NewBB = SplitBlock(Old: Header, SplitPt: Header->getFirstNonPHI(), DT);
697
698 // We only want to code extract the second block now, and it becomes the new
699 // header of the region.
700 BasicBlock *OldPred = Header;
701 Blocks.remove(X: OldPred);
702 Blocks.insert(X: NewBB);
703 Header = NewBB;
704
705 // Okay, now we need to adjust the PHI nodes and any branches from within the
706 // region to go to the new header block instead of the old header block.
707 if (NumPredsFromRegion) {
708 PHINode *PN = cast<PHINode>(Val: OldPred->begin());
709 // Loop over all of the predecessors of OldPred that are in the region,
710 // changing them to branch to NewBB instead.
711 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
712 if (Blocks.count(key: PN->getIncomingBlock(i))) {
713 Instruction *TI = PN->getIncomingBlock(i)->getTerminator();
714 TI->replaceUsesOfWith(From: OldPred, To: NewBB);
715 }
716
717 // Okay, everything within the region is now branching to the right block, we
718 // just have to update the PHI nodes now, inserting PHI nodes into NewBB.
719 BasicBlock::iterator AfterPHIs;
720 for (AfterPHIs = OldPred->begin(); isa<PHINode>(Val: AfterPHIs); ++AfterPHIs) {
721 PHINode *PN = cast<PHINode>(Val&: AfterPHIs);
722 // Create a new PHI node in the new region, which has an incoming value
723 // from OldPred of PN.
724 PHINode *NewPN = PHINode::Create(Ty: PN->getType(), NumReservedValues: 1 + NumPredsFromRegion,
725 NameStr: PN->getName() + ".ce");
726 NewPN->insertBefore(InsertPos: NewBB->begin());
727 PN->replaceAllUsesWith(V: NewPN);
728 NewPN->addIncoming(V: PN, BB: OldPred);
729
730 // Loop over all of the incoming value in PN, moving them to NewPN if they
731 // are from the extracted region.
732 for (unsigned i = 0; i != PN->getNumIncomingValues(); ++i) {
733 if (Blocks.count(key: PN->getIncomingBlock(i))) {
734 NewPN->addIncoming(V: PN->getIncomingValue(i), BB: PN->getIncomingBlock(i));
735 PN->removeIncomingValue(Idx: i);
736 --i;
737 }
738 }
739 }
740 }
741}
742
743/// severSplitPHINodesOfExits - if PHI nodes in exit blocks have inputs from
744/// outlined region, we split these PHIs on two: one with inputs from region
745/// and other with remaining incoming blocks; then first PHIs are placed in
746/// outlined region.
747void CodeExtractor::severSplitPHINodesOfExits(
748 const SetVector<BasicBlock *> &Exits) {
749 for (BasicBlock *ExitBB : Exits) {
750 BasicBlock *NewBB = nullptr;
751
752 for (PHINode &PN : ExitBB->phis()) {
753 // Find all incoming values from the outlining region.
754 SmallVector<unsigned, 2> IncomingVals;
755 for (unsigned i = 0; i < PN.getNumIncomingValues(); ++i)
756 if (Blocks.count(key: PN.getIncomingBlock(i)))
757 IncomingVals.push_back(Elt: i);
758
759 // Do not process PHI if there is one (or fewer) predecessor from region.
760 // If PHI has exactly one predecessor from region, only this one incoming
761 // will be replaced on codeRepl block, so it should be safe to skip PHI.
762 if (IncomingVals.size() <= 1)
763 continue;
764
765 // Create block for new PHIs and add it to the list of outlined if it
766 // wasn't done before.
767 if (!NewBB) {
768 NewBB = BasicBlock::Create(Context&: ExitBB->getContext(),
769 Name: ExitBB->getName() + ".split",
770 Parent: ExitBB->getParent(), InsertBefore: ExitBB);
771 NewBB->IsNewDbgInfoFormat = ExitBB->IsNewDbgInfoFormat;
772 SmallVector<BasicBlock *, 4> Preds(predecessors(BB: ExitBB));
773 for (BasicBlock *PredBB : Preds)
774 if (Blocks.count(key: PredBB))
775 PredBB->getTerminator()->replaceUsesOfWith(From: ExitBB, To: NewBB);
776 BranchInst::Create(IfTrue: ExitBB, InsertAtEnd: NewBB);
777 Blocks.insert(X: NewBB);
778 }
779
780 // Split this PHI.
781 PHINode *NewPN = PHINode::Create(Ty: PN.getType(), NumReservedValues: IncomingVals.size(),
782 NameStr: PN.getName() + ".ce");
783 NewPN->insertBefore(InsertPos: NewBB->getFirstNonPHIIt());
784 for (unsigned i : IncomingVals)
785 NewPN->addIncoming(V: PN.getIncomingValue(i), BB: PN.getIncomingBlock(i));
786 for (unsigned i : reverse(C&: IncomingVals))
787 PN.removeIncomingValue(Idx: i, DeletePHIIfEmpty: false);
788 PN.addIncoming(V: NewPN, BB: NewBB);
789 }
790 }
791}
792
793void CodeExtractor::splitReturnBlocks() {
794 for (BasicBlock *Block : Blocks)
795 if (ReturnInst *RI = dyn_cast<ReturnInst>(Val: Block->getTerminator())) {
796 BasicBlock *New =
797 Block->splitBasicBlock(I: RI->getIterator(), BBName: Block->getName() + ".ret");
798 if (DT) {
799 // Old dominates New. New node dominates all other nodes dominated
800 // by Old.
801 DomTreeNode *OldNode = DT->getNode(BB: Block);
802 SmallVector<DomTreeNode *, 8> Children(OldNode->begin(),
803 OldNode->end());
804
805 DomTreeNode *NewNode = DT->addNewBlock(BB: New, DomBB: Block);
806
807 for (DomTreeNode *I : Children)
808 DT->changeImmediateDominator(N: I, NewIDom: NewNode);
809 }
810 }
811}
812
813/// constructFunction - make a function based on inputs and outputs, as follows:
814/// f(in0, ..., inN, out0, ..., outN)
815Function *CodeExtractor::constructFunction(const ValueSet &inputs,
816 const ValueSet &outputs,
817 BasicBlock *header,
818 BasicBlock *newRootNode,
819 BasicBlock *newHeader,
820 Function *oldFunction,
821 Module *M) {
822 LLVM_DEBUG(dbgs() << "inputs: " << inputs.size() << "\n");
823 LLVM_DEBUG(dbgs() << "outputs: " << outputs.size() << "\n");
824
825 // This function returns unsigned, outputs will go back by reference.
826 switch (NumExitBlocks) {
827 case 0:
828 case 1: RetTy = Type::getVoidTy(C&: header->getContext()); break;
829 case 2: RetTy = Type::getInt1Ty(C&: header->getContext()); break;
830 default: RetTy = Type::getInt16Ty(C&: header->getContext()); break;
831 }
832
833 std::vector<Type *> ParamTy;
834 std::vector<Type *> AggParamTy;
835 ValueSet StructValues;
836 const DataLayout &DL = M->getDataLayout();
837
838 // Add the types of the input values to the function's argument list
839 for (Value *value : inputs) {
840 LLVM_DEBUG(dbgs() << "value used in func: " << *value << "\n");
841 if (AggregateArgs && !ExcludeArgsFromAggregate.contains(key: value)) {
842 AggParamTy.push_back(x: value->getType());
843 StructValues.insert(X: value);
844 } else
845 ParamTy.push_back(x: value->getType());
846 }
847
848 // Add the types of the output values to the function's argument list.
849 for (Value *output : outputs) {
850 LLVM_DEBUG(dbgs() << "instr used in func: " << *output << "\n");
851 if (AggregateArgs && !ExcludeArgsFromAggregate.contains(key: output)) {
852 AggParamTy.push_back(x: output->getType());
853 StructValues.insert(X: output);
854 } else
855 ParamTy.push_back(
856 x: PointerType::get(ElementType: output->getType(), AddressSpace: DL.getAllocaAddrSpace()));
857 }
858
859 assert(
860 (ParamTy.size() + AggParamTy.size()) ==
861 (inputs.size() + outputs.size()) &&
862 "Number of scalar and aggregate params does not match inputs, outputs");
863 assert((StructValues.empty() || AggregateArgs) &&
864 "Expeced StructValues only with AggregateArgs set");
865
866 // Concatenate scalar and aggregate params in ParamTy.
867 size_t NumScalarParams = ParamTy.size();
868 StructType *StructTy = nullptr;
869 if (AggregateArgs && !AggParamTy.empty()) {
870 StructTy = StructType::get(Context&: M->getContext(), Elements: AggParamTy);
871 ParamTy.push_back(x: PointerType::get(
872 ElementType: StructTy, AddressSpace: ArgsInZeroAddressSpace ? 0 : DL.getAllocaAddrSpace()));
873 }
874
875 LLVM_DEBUG({
876 dbgs() << "Function type: " << *RetTy << " f(";
877 for (Type *i : ParamTy)
878 dbgs() << *i << ", ";
879 dbgs() << ")\n";
880 });
881
882 FunctionType *funcType = FunctionType::get(
883 Result: RetTy, Params: ParamTy, isVarArg: AllowVarArgs && oldFunction->isVarArg());
884
885 std::string SuffixToUse =
886 Suffix.empty()
887 ? (header->getName().empty() ? "extracted" : header->getName().str())
888 : Suffix;
889 // Create the new function
890 Function *newFunction = Function::Create(
891 Ty: funcType, Linkage: GlobalValue::InternalLinkage, AddrSpace: oldFunction->getAddressSpace(),
892 N: oldFunction->getName() + "." + SuffixToUse, M);
893 newFunction->IsNewDbgInfoFormat = oldFunction->IsNewDbgInfoFormat;
894
895 // Inherit all of the target dependent attributes and white-listed
896 // target independent attributes.
897 // (e.g. If the extracted region contains a call to an x86.sse
898 // instruction we need to make sure that the extracted region has the
899 // "target-features" attribute allowing it to be lowered.
900 // FIXME: This should be changed to check to see if a specific
901 // attribute can not be inherited.
902 for (const auto &Attr : oldFunction->getAttributes().getFnAttrs()) {
903 if (Attr.isStringAttribute()) {
904 if (Attr.getKindAsString() == "thunk")
905 continue;
906 } else
907 switch (Attr.getKindAsEnum()) {
908 // Those attributes cannot be propagated safely. Explicitly list them
909 // here so we get a warning if new attributes are added.
910 case Attribute::AllocSize:
911 case Attribute::Builtin:
912 case Attribute::Convergent:
913 case Attribute::JumpTable:
914 case Attribute::Naked:
915 case Attribute::NoBuiltin:
916 case Attribute::NoMerge:
917 case Attribute::NoReturn:
918 case Attribute::NoSync:
919 case Attribute::ReturnsTwice:
920 case Attribute::Speculatable:
921 case Attribute::StackAlignment:
922 case Attribute::WillReturn:
923 case Attribute::AllocKind:
924 case Attribute::PresplitCoroutine:
925 case Attribute::Memory:
926 case Attribute::NoFPClass:
927 case Attribute::CoroDestroyOnlyWhenComplete:
928 continue;
929 // Those attributes should be safe to propagate to the extracted function.
930 case Attribute::AlwaysInline:
931 case Attribute::Cold:
932 case Attribute::DisableSanitizerInstrumentation:
933 case Attribute::FnRetThunkExtern:
934 case Attribute::Hot:
935 case Attribute::NoRecurse:
936 case Attribute::InlineHint:
937 case Attribute::MinSize:
938 case Attribute::NoCallback:
939 case Attribute::NoDuplicate:
940 case Attribute::NoFree:
941 case Attribute::NoImplicitFloat:
942 case Attribute::NoInline:
943 case Attribute::NonLazyBind:
944 case Attribute::NoRedZone:
945 case Attribute::NoUnwind:
946 case Attribute::NoSanitizeBounds:
947 case Attribute::NoSanitizeCoverage:
948 case Attribute::NullPointerIsValid:
949 case Attribute::OptimizeForDebugging:
950 case Attribute::OptForFuzzing:
951 case Attribute::OptimizeNone:
952 case Attribute::OptimizeForSize:
953 case Attribute::SafeStack:
954 case Attribute::ShadowCallStack:
955 case Attribute::SanitizeAddress:
956 case Attribute::SanitizeMemory:
957 case Attribute::SanitizeThread:
958 case Attribute::SanitizeHWAddress:
959 case Attribute::SanitizeMemTag:
960 case Attribute::SpeculativeLoadHardening:
961 case Attribute::StackProtect:
962 case Attribute::StackProtectReq:
963 case Attribute::StackProtectStrong:
964 case Attribute::StrictFP:
965 case Attribute::UWTable:
966 case Attribute::VScaleRange:
967 case Attribute::NoCfCheck:
968 case Attribute::MustProgress:
969 case Attribute::NoProfile:
970 case Attribute::SkipProfile:
971 break;
972 // These attributes cannot be applied to functions.
973 case Attribute::Alignment:
974 case Attribute::AllocatedPointer:
975 case Attribute::AllocAlign:
976 case Attribute::ByVal:
977 case Attribute::Dereferenceable:
978 case Attribute::DereferenceableOrNull:
979 case Attribute::ElementType:
980 case Attribute::InAlloca:
981 case Attribute::InReg:
982 case Attribute::Nest:
983 case Attribute::NoAlias:
984 case Attribute::NoCapture:
985 case Attribute::NoUndef:
986 case Attribute::NonNull:
987 case Attribute::Preallocated:
988 case Attribute::ReadNone:
989 case Attribute::ReadOnly:
990 case Attribute::Returned:
991 case Attribute::SExt:
992 case Attribute::StructRet:
993 case Attribute::SwiftError:
994 case Attribute::SwiftSelf:
995 case Attribute::SwiftAsync:
996 case Attribute::ZExt:
997 case Attribute::ImmArg:
998 case Attribute::ByRef:
999 case Attribute::WriteOnly:
1000 case Attribute::Writable:
1001 case Attribute::DeadOnUnwind:
1002 case Attribute::Range:
1003 // These are not really attributes.
1004 case Attribute::None:
1005 case Attribute::EndAttrKinds:
1006 case Attribute::EmptyKey:
1007 case Attribute::TombstoneKey:
1008 llvm_unreachable("Not a function attribute");
1009 }
1010
1011 newFunction->addFnAttr(Attr);
1012 }
1013
1014 if (NumExitBlocks == 0) {
1015 // Mark the new function `noreturn` if applicable. Terminators which resume
1016 // exception propagation are treated as returning instructions. This is to
1017 // avoid inserting traps after calls to outlined functions which unwind.
1018 if (none_of(Range&: Blocks, P: [](const BasicBlock *BB) {
1019 const Instruction *Term = BB->getTerminator();
1020 return isa<ReturnInst>(Val: Term) || isa<ResumeInst>(Val: Term);
1021 }))
1022 newFunction->setDoesNotReturn();
1023 }
1024
1025 newFunction->insert(Position: newFunction->end(), BB: newRootNode);
1026
1027 // Create scalar and aggregate iterators to name all of the arguments we
1028 // inserted.
1029 Function::arg_iterator ScalarAI = newFunction->arg_begin();
1030 Function::arg_iterator AggAI = std::next(x: ScalarAI, n: NumScalarParams);
1031
1032 // Rewrite all users of the inputs in the extracted region to use the
1033 // arguments (or appropriate addressing into struct) instead.
1034 for (unsigned i = 0, e = inputs.size(), aggIdx = 0; i != e; ++i) {
1035 Value *RewriteVal;
1036 if (AggregateArgs && StructValues.contains(key: inputs[i])) {
1037 Value *Idx[2];
1038 Idx[0] = Constant::getNullValue(Ty: Type::getInt32Ty(C&: header->getContext()));
1039 Idx[1] = ConstantInt::get(Ty: Type::getInt32Ty(C&: header->getContext()), V: aggIdx);
1040 BasicBlock::iterator TI = newFunction->begin()->getTerminator()->getIterator();
1041 GetElementPtrInst *GEP = GetElementPtrInst::Create(
1042 PointeeType: StructTy, Ptr: &*AggAI, IdxList: Idx, NameStr: "gep_" + inputs[i]->getName(), InsertBefore: TI);
1043 RewriteVal = new LoadInst(StructTy->getElementType(N: aggIdx), GEP,
1044 "loadgep_" + inputs[i]->getName(), TI);
1045 ++aggIdx;
1046 } else
1047 RewriteVal = &*ScalarAI++;
1048
1049 std::vector<User *> Users(inputs[i]->user_begin(), inputs[i]->user_end());
1050 for (User *use : Users)
1051 if (Instruction *inst = dyn_cast<Instruction>(Val: use))
1052 if (Blocks.count(key: inst->getParent()))
1053 inst->replaceUsesOfWith(From: inputs[i], To: RewriteVal);
1054 }
1055
1056 // Set names for input and output arguments.
1057 if (NumScalarParams) {
1058 ScalarAI = newFunction->arg_begin();
1059 for (unsigned i = 0, e = inputs.size(); i != e; ++i, ++ScalarAI)
1060 if (!StructValues.contains(key: inputs[i]))
1061 ScalarAI->setName(inputs[i]->getName());
1062 for (unsigned i = 0, e = outputs.size(); i != e; ++i, ++ScalarAI)
1063 if (!StructValues.contains(key: outputs[i]))
1064 ScalarAI->setName(outputs[i]->getName() + ".out");
1065 }
1066
1067 // Rewrite branches to basic blocks outside of the loop to new dummy blocks
1068 // within the new function. This must be done before we lose track of which
1069 // blocks were originally in the code region.
1070 std::vector<User *> Users(header->user_begin(), header->user_end());
1071 for (auto &U : Users)
1072 // The BasicBlock which contains the branch is not in the region
1073 // modify the branch target to a new block
1074 if (Instruction *I = dyn_cast<Instruction>(Val: U))
1075 if (I->isTerminator() && I->getFunction() == oldFunction &&
1076 !Blocks.count(key: I->getParent()))
1077 I->replaceUsesOfWith(From: header, To: newHeader);
1078
1079 return newFunction;
1080}
1081
1082/// Erase lifetime.start markers which reference inputs to the extraction
1083/// region, and insert the referenced memory into \p LifetimesStart.
1084///
1085/// The extraction region is defined by a set of blocks (\p Blocks), and a set
1086/// of allocas which will be moved from the caller function into the extracted
1087/// function (\p SunkAllocas).
1088static void eraseLifetimeMarkersOnInputs(const SetVector<BasicBlock *> &Blocks,
1089 const SetVector<Value *> &SunkAllocas,
1090 SetVector<Value *> &LifetimesStart) {
1091 for (BasicBlock *BB : Blocks) {
1092 for (Instruction &I : llvm::make_early_inc_range(Range&: *BB)) {
1093 auto *II = dyn_cast<IntrinsicInst>(Val: &I);
1094 if (!II || !II->isLifetimeStartOrEnd())
1095 continue;
1096
1097 // Get the memory operand of the lifetime marker. If the underlying
1098 // object is a sunk alloca, or is otherwise defined in the extraction
1099 // region, the lifetime marker must not be erased.
1100 Value *Mem = II->getOperand(i_nocapture: 1)->stripInBoundsOffsets();
1101 if (SunkAllocas.count(key: Mem) || definedInRegion(Blocks, V: Mem))
1102 continue;
1103
1104 if (II->getIntrinsicID() == Intrinsic::lifetime_start)
1105 LifetimesStart.insert(X: Mem);
1106 II->eraseFromParent();
1107 }
1108 }
1109}
1110
1111/// Insert lifetime start/end markers surrounding the call to the new function
1112/// for objects defined in the caller.
1113static void insertLifetimeMarkersSurroundingCall(
1114 Module *M, ArrayRef<Value *> LifetimesStart, ArrayRef<Value *> LifetimesEnd,
1115 CallInst *TheCall) {
1116 LLVMContext &Ctx = M->getContext();
1117 auto NegativeOne = ConstantInt::getSigned(Ty: Type::getInt64Ty(C&: Ctx), V: -1);
1118 Instruction *Term = TheCall->getParent()->getTerminator();
1119
1120 // Emit lifetime markers for the pointers given in \p Objects. Insert the
1121 // markers before the call if \p InsertBefore, and after the call otherwise.
1122 auto insertMarkers = [&](Intrinsic::ID MarkerFunc, ArrayRef<Value *> Objects,
1123 bool InsertBefore) {
1124 for (Value *Mem : Objects) {
1125 assert((!isa<Instruction>(Mem) || cast<Instruction>(Mem)->getFunction() ==
1126 TheCall->getFunction()) &&
1127 "Input memory not defined in original function");
1128
1129 Function *Func = Intrinsic::getDeclaration(M, id: MarkerFunc, Tys: Mem->getType());
1130 auto Marker = CallInst::Create(Func, Args: {NegativeOne, Mem});
1131 if (InsertBefore)
1132 Marker->insertBefore(InsertPos: TheCall);
1133 else
1134 Marker->insertBefore(InsertPos: Term);
1135 }
1136 };
1137
1138 if (!LifetimesStart.empty()) {
1139 insertMarkers(Intrinsic::lifetime_start, LifetimesStart,
1140 /*InsertBefore=*/true);
1141 }
1142
1143 if (!LifetimesEnd.empty()) {
1144 insertMarkers(Intrinsic::lifetime_end, LifetimesEnd,
1145 /*InsertBefore=*/false);
1146 }
1147}
1148
1149/// emitCallAndSwitchStatement - This method sets up the caller side by adding
1150/// the call instruction, splitting any PHI nodes in the header block as
1151/// necessary.
1152CallInst *CodeExtractor::emitCallAndSwitchStatement(Function *newFunction,
1153 BasicBlock *codeReplacer,
1154 ValueSet &inputs,
1155 ValueSet &outputs) {
1156 // Emit a call to the new function, passing in: *pointer to struct (if
1157 // aggregating parameters), or plan inputs and allocated memory for outputs
1158 std::vector<Value *> params, ReloadOutputs, Reloads;
1159 ValueSet StructValues;
1160
1161 Module *M = newFunction->getParent();
1162 LLVMContext &Context = M->getContext();
1163 const DataLayout &DL = M->getDataLayout();
1164 CallInst *call = nullptr;
1165
1166 // Add inputs as params, or to be filled into the struct
1167 unsigned ScalarInputArgNo = 0;
1168 SmallVector<unsigned, 1> SwiftErrorArgs;
1169 for (Value *input : inputs) {
1170 if (AggregateArgs && !ExcludeArgsFromAggregate.contains(key: input))
1171 StructValues.insert(X: input);
1172 else {
1173 params.push_back(x: input);
1174 if (input->isSwiftError())
1175 SwiftErrorArgs.push_back(Elt: ScalarInputArgNo);
1176 }
1177 ++ScalarInputArgNo;
1178 }
1179
1180 // Create allocas for the outputs
1181 unsigned ScalarOutputArgNo = 0;
1182 for (Value *output : outputs) {
1183 if (AggregateArgs && !ExcludeArgsFromAggregate.contains(key: output)) {
1184 StructValues.insert(X: output);
1185 } else {
1186 AllocaInst *alloca =
1187 new AllocaInst(output->getType(), DL.getAllocaAddrSpace(),
1188 nullptr, output->getName() + ".loc",
1189 codeReplacer->getParent()->front().begin());
1190 ReloadOutputs.push_back(x: alloca);
1191 params.push_back(x: alloca);
1192 ++ScalarOutputArgNo;
1193 }
1194 }
1195
1196 StructType *StructArgTy = nullptr;
1197 AllocaInst *Struct = nullptr;
1198 unsigned NumAggregatedInputs = 0;
1199 if (AggregateArgs && !StructValues.empty()) {
1200 std::vector<Type *> ArgTypes;
1201 for (Value *V : StructValues)
1202 ArgTypes.push_back(x: V->getType());
1203
1204 // Allocate a struct at the beginning of this function
1205 StructArgTy = StructType::get(Context&: newFunction->getContext(), Elements: ArgTypes);
1206 Struct = new AllocaInst(
1207 StructArgTy, DL.getAllocaAddrSpace(), nullptr, "structArg",
1208 AllocationBlock ? AllocationBlock->getFirstInsertionPt()
1209 : codeReplacer->getParent()->front().begin());
1210
1211 if (ArgsInZeroAddressSpace && DL.getAllocaAddrSpace() != 0) {
1212 auto *StructSpaceCast = new AddrSpaceCastInst(
1213 Struct, PointerType ::get(C&: Context, AddressSpace: 0), "structArg.ascast");
1214 StructSpaceCast->insertAfter(InsertPos: Struct);
1215 params.push_back(x: StructSpaceCast);
1216 } else {
1217 params.push_back(x: Struct);
1218 }
1219 // Store aggregated inputs in the struct.
1220 for (unsigned i = 0, e = StructValues.size(); i != e; ++i) {
1221 if (inputs.contains(key: StructValues[i])) {
1222 Value *Idx[2];
1223 Idx[0] = Constant::getNullValue(Ty: Type::getInt32Ty(C&: Context));
1224 Idx[1] = ConstantInt::get(Ty: Type::getInt32Ty(C&: Context), V: i);
1225 GetElementPtrInst *GEP = GetElementPtrInst::Create(
1226 PointeeType: StructArgTy, Ptr: Struct, IdxList: Idx, NameStr: "gep_" + StructValues[i]->getName());
1227 GEP->insertInto(ParentBB: codeReplacer, It: codeReplacer->end());
1228 new StoreInst(StructValues[i], GEP, codeReplacer);
1229 NumAggregatedInputs++;
1230 }
1231 }
1232 }
1233
1234 // Emit the call to the function
1235 call = CallInst::Create(Func: newFunction, Args: params,
1236 NameStr: NumExitBlocks > 1 ? "targetBlock" : "");
1237 // Add debug location to the new call, if the original function has debug
1238 // info. In that case, the terminator of the entry block of the extracted
1239 // function contains the first debug location of the extracted function,
1240 // set in extractCodeRegion.
1241 if (codeReplacer->getParent()->getSubprogram()) {
1242 if (auto DL = newFunction->getEntryBlock().getTerminator()->getDebugLoc())
1243 call->setDebugLoc(DL);
1244 }
1245 call->insertInto(ParentBB: codeReplacer, It: codeReplacer->end());
1246
1247 // Set swifterror parameter attributes.
1248 for (unsigned SwiftErrArgNo : SwiftErrorArgs) {
1249 call->addParamAttr(SwiftErrArgNo, Attribute::SwiftError);
1250 newFunction->addParamAttr(SwiftErrArgNo, Attribute::SwiftError);
1251 }
1252
1253 // Reload the outputs passed in by reference, use the struct if output is in
1254 // the aggregate or reload from the scalar argument.
1255 for (unsigned i = 0, e = outputs.size(), scalarIdx = 0,
1256 aggIdx = NumAggregatedInputs;
1257 i != e; ++i) {
1258 Value *Output = nullptr;
1259 if (AggregateArgs && StructValues.contains(key: outputs[i])) {
1260 Value *Idx[2];
1261 Idx[0] = Constant::getNullValue(Ty: Type::getInt32Ty(C&: Context));
1262 Idx[1] = ConstantInt::get(Ty: Type::getInt32Ty(C&: Context), V: aggIdx);
1263 GetElementPtrInst *GEP = GetElementPtrInst::Create(
1264 PointeeType: StructArgTy, Ptr: Struct, IdxList: Idx, NameStr: "gep_reload_" + outputs[i]->getName());
1265 GEP->insertInto(ParentBB: codeReplacer, It: codeReplacer->end());
1266 Output = GEP;
1267 ++aggIdx;
1268 } else {
1269 Output = ReloadOutputs[scalarIdx];
1270 ++scalarIdx;
1271 }
1272 LoadInst *load = new LoadInst(outputs[i]->getType(), Output,
1273 outputs[i]->getName() + ".reload",
1274 codeReplacer);
1275 Reloads.push_back(x: load);
1276 std::vector<User *> Users(outputs[i]->user_begin(), outputs[i]->user_end());
1277 for (User *U : Users) {
1278 Instruction *inst = cast<Instruction>(Val: U);
1279 if (!Blocks.count(key: inst->getParent()))
1280 inst->replaceUsesOfWith(From: outputs[i], To: load);
1281 }
1282 }
1283
1284 // Now we can emit a switch statement using the call as a value.
1285 SwitchInst *TheSwitch =
1286 SwitchInst::Create(Value: Constant::getNullValue(Ty: Type::getInt16Ty(C&: Context)),
1287 Default: codeReplacer, NumCases: 0, InsertAtEnd: codeReplacer);
1288
1289 // Since there may be multiple exits from the original region, make the new
1290 // function return an unsigned, switch on that number. This loop iterates
1291 // over all of the blocks in the extracted region, updating any terminator
1292 // instructions in the to-be-extracted region that branch to blocks that are
1293 // not in the region to be extracted.
1294 std::map<BasicBlock *, BasicBlock *> ExitBlockMap;
1295
1296 // Iterate over the previously collected targets, and create new blocks inside
1297 // the function to branch to.
1298 unsigned switchVal = 0;
1299 for (BasicBlock *OldTarget : OldTargets) {
1300 if (Blocks.count(key: OldTarget))
1301 continue;
1302 BasicBlock *&NewTarget = ExitBlockMap[OldTarget];
1303 if (NewTarget)
1304 continue;
1305
1306 // If we don't already have an exit stub for this non-extracted
1307 // destination, create one now!
1308 NewTarget = BasicBlock::Create(Context,
1309 Name: OldTarget->getName() + ".exitStub",
1310 Parent: newFunction);
1311 unsigned SuccNum = switchVal++;
1312
1313 Value *brVal = nullptr;
1314 assert(NumExitBlocks < 0xffff && "too many exit blocks for switch");
1315 switch (NumExitBlocks) {
1316 case 0:
1317 case 1: break; // No value needed.
1318 case 2: // Conditional branch, return a bool
1319 brVal = ConstantInt::get(Ty: Type::getInt1Ty(C&: Context), V: !SuccNum);
1320 break;
1321 default:
1322 brVal = ConstantInt::get(Ty: Type::getInt16Ty(C&: Context), V: SuccNum);
1323 break;
1324 }
1325
1326 ReturnInst::Create(C&: Context, retVal: brVal, InsertAtEnd: NewTarget);
1327
1328 // Update the switch instruction.
1329 TheSwitch->addCase(OnVal: ConstantInt::get(Ty: Type::getInt16Ty(C&: Context),
1330 V: SuccNum),
1331 Dest: OldTarget);
1332 }
1333
1334 for (BasicBlock *Block : Blocks) {
1335 Instruction *TI = Block->getTerminator();
1336 for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) {
1337 if (Blocks.count(key: TI->getSuccessor(Idx: i)))
1338 continue;
1339 BasicBlock *OldTarget = TI->getSuccessor(Idx: i);
1340 // add a new basic block which returns the appropriate value
1341 BasicBlock *NewTarget = ExitBlockMap[OldTarget];
1342 assert(NewTarget && "Unknown target block!");
1343
1344 // rewrite the original branch instruction with this new target
1345 TI->setSuccessor(Idx: i, BB: NewTarget);
1346 }
1347 }
1348
1349 // Store the arguments right after the definition of output value.
1350 // This should be proceeded after creating exit stubs to be ensure that invoke
1351 // result restore will be placed in the outlined function.
1352 Function::arg_iterator ScalarOutputArgBegin = newFunction->arg_begin();
1353 std::advance(i&: ScalarOutputArgBegin, n: ScalarInputArgNo);
1354 Function::arg_iterator AggOutputArgBegin = newFunction->arg_begin();
1355 std::advance(i&: AggOutputArgBegin, n: ScalarInputArgNo + ScalarOutputArgNo);
1356
1357 for (unsigned i = 0, e = outputs.size(), aggIdx = NumAggregatedInputs; i != e;
1358 ++i) {
1359 auto *OutI = dyn_cast<Instruction>(Val: outputs[i]);
1360 if (!OutI)
1361 continue;
1362
1363 // Find proper insertion point.
1364 BasicBlock::iterator InsertPt;
1365 // In case OutI is an invoke, we insert the store at the beginning in the
1366 // 'normal destination' BB. Otherwise we insert the store right after OutI.
1367 if (auto *InvokeI = dyn_cast<InvokeInst>(Val: OutI))
1368 InsertPt = InvokeI->getNormalDest()->getFirstInsertionPt();
1369 else if (auto *Phi = dyn_cast<PHINode>(Val: OutI))
1370 InsertPt = Phi->getParent()->getFirstInsertionPt();
1371 else
1372 InsertPt = std::next(x: OutI->getIterator());
1373
1374 assert((InsertPt->getFunction() == newFunction ||
1375 Blocks.count(InsertPt->getParent())) &&
1376 "InsertPt should be in new function");
1377 if (AggregateArgs && StructValues.contains(key: outputs[i])) {
1378 assert(AggOutputArgBegin != newFunction->arg_end() &&
1379 "Number of aggregate output arguments should match "
1380 "the number of defined values");
1381 Value *Idx[2];
1382 Idx[0] = Constant::getNullValue(Ty: Type::getInt32Ty(C&: Context));
1383 Idx[1] = ConstantInt::get(Ty: Type::getInt32Ty(C&: Context), V: aggIdx);
1384 GetElementPtrInst *GEP = GetElementPtrInst::Create(
1385 PointeeType: StructArgTy, Ptr: &*AggOutputArgBegin, IdxList: Idx, NameStr: "gep_" + outputs[i]->getName(),
1386 InsertBefore: InsertPt);
1387 new StoreInst(outputs[i], GEP, InsertPt);
1388 ++aggIdx;
1389 // Since there should be only one struct argument aggregating
1390 // all the output values, we shouldn't increment AggOutputArgBegin, which
1391 // always points to the struct argument, in this case.
1392 } else {
1393 assert(ScalarOutputArgBegin != newFunction->arg_end() &&
1394 "Number of scalar output arguments should match "
1395 "the number of defined values");
1396 new StoreInst(outputs[i], &*ScalarOutputArgBegin, InsertPt);
1397 ++ScalarOutputArgBegin;
1398 }
1399 }
1400
1401 // Now that we've done the deed, simplify the switch instruction.
1402 Type *OldFnRetTy = TheSwitch->getParent()->getParent()->getReturnType();
1403 switch (NumExitBlocks) {
1404 case 0:
1405 // There are no successors (the block containing the switch itself), which
1406 // means that previously this was the last part of the function, and hence
1407 // this should be rewritten as a `ret` or `unreachable`.
1408 if (newFunction->doesNotReturn()) {
1409 // If fn is no return, end with an unreachable terminator.
1410 (void)new UnreachableInst(Context, TheSwitch->getIterator());
1411 } else if (OldFnRetTy->isVoidTy()) {
1412 // We have no return value.
1413 ReturnInst::Create(C&: Context, retVal: nullptr,
1414 InsertBefore: TheSwitch->getIterator()); // Return void
1415 } else if (OldFnRetTy == TheSwitch->getCondition()->getType()) {
1416 // return what we have
1417 ReturnInst::Create(C&: Context, retVal: TheSwitch->getCondition(),
1418 InsertBefore: TheSwitch->getIterator());
1419 } else {
1420 // Otherwise we must have code extracted an unwind or something, just
1421 // return whatever we want.
1422 ReturnInst::Create(C&: Context, retVal: Constant::getNullValue(Ty: OldFnRetTy),
1423 InsertBefore: TheSwitch->getIterator());
1424 }
1425
1426 TheSwitch->eraseFromParent();
1427 break;
1428 case 1:
1429 // Only a single destination, change the switch into an unconditional
1430 // branch.
1431 BranchInst::Create(IfTrue: TheSwitch->getSuccessor(idx: 1), InsertBefore: TheSwitch->getIterator());
1432 TheSwitch->eraseFromParent();
1433 break;
1434 case 2:
1435 BranchInst::Create(IfTrue: TheSwitch->getSuccessor(idx: 1), IfFalse: TheSwitch->getSuccessor(idx: 2),
1436 Cond: call, InsertBefore: TheSwitch->getIterator());
1437 TheSwitch->eraseFromParent();
1438 break;
1439 default:
1440 // Otherwise, make the default destination of the switch instruction be one
1441 // of the other successors.
1442 TheSwitch->setCondition(call);
1443 TheSwitch->setDefaultDest(TheSwitch->getSuccessor(idx: NumExitBlocks));
1444 // Remove redundant case
1445 TheSwitch->removeCase(I: SwitchInst::CaseIt(TheSwitch, NumExitBlocks-1));
1446 break;
1447 }
1448
1449 // Insert lifetime markers around the reloads of any output values. The
1450 // allocas output values are stored in are only in-use in the codeRepl block.
1451 insertLifetimeMarkersSurroundingCall(M, LifetimesStart: ReloadOutputs, LifetimesEnd: ReloadOutputs, TheCall: call);
1452
1453 return call;
1454}
1455
1456void CodeExtractor::moveCodeToFunction(Function *newFunction) {
1457 auto newFuncIt = newFunction->front().getIterator();
1458 for (BasicBlock *Block : Blocks) {
1459 // Delete the basic block from the old function, and the list of blocks
1460 Block->removeFromParent();
1461
1462 // Insert this basic block into the new function
1463 // Insert the original blocks after the entry block created
1464 // for the new function. The entry block may be followed
1465 // by a set of exit blocks at this point, but these exit
1466 // blocks better be placed at the end of the new function.
1467 newFuncIt = newFunction->insert(Position: std::next(x: newFuncIt), BB: Block);
1468 }
1469}
1470
1471void CodeExtractor::calculateNewCallTerminatorWeights(
1472 BasicBlock *CodeReplacer,
1473 DenseMap<BasicBlock *, BlockFrequency> &ExitWeights,
1474 BranchProbabilityInfo *BPI) {
1475 using Distribution = BlockFrequencyInfoImplBase::Distribution;
1476 using BlockNode = BlockFrequencyInfoImplBase::BlockNode;
1477
1478 // Update the branch weights for the exit block.
1479 Instruction *TI = CodeReplacer->getTerminator();
1480 SmallVector<unsigned, 8> BranchWeights(TI->getNumSuccessors(), 0);
1481
1482 // Block Frequency distribution with dummy node.
1483 Distribution BranchDist;
1484
1485 SmallVector<BranchProbability, 4> EdgeProbabilities(
1486 TI->getNumSuccessors(), BranchProbability::getUnknown());
1487
1488 // Add each of the frequencies of the successors.
1489 for (unsigned i = 0, e = TI->getNumSuccessors(); i < e; ++i) {
1490 BlockNode ExitNode(i);
1491 uint64_t ExitFreq = ExitWeights[TI->getSuccessor(Idx: i)].getFrequency();
1492 if (ExitFreq != 0)
1493 BranchDist.addExit(Node: ExitNode, Amount: ExitFreq);
1494 else
1495 EdgeProbabilities[i] = BranchProbability::getZero();
1496 }
1497
1498 // Check for no total weight.
1499 if (BranchDist.Total == 0) {
1500 BPI->setEdgeProbability(Src: CodeReplacer, Probs: EdgeProbabilities);
1501 return;
1502 }
1503
1504 // Normalize the distribution so that they can fit in unsigned.
1505 BranchDist.normalize();
1506
1507 // Create normalized branch weights and set the metadata.
1508 for (unsigned I = 0, E = BranchDist.Weights.size(); I < E; ++I) {
1509 const auto &Weight = BranchDist.Weights[I];
1510
1511 // Get the weight and update the current BFI.
1512 BranchWeights[Weight.TargetNode.Index] = Weight.Amount;
1513 BranchProbability BP(Weight.Amount, BranchDist.Total);
1514 EdgeProbabilities[Weight.TargetNode.Index] = BP;
1515 }
1516 BPI->setEdgeProbability(Src: CodeReplacer, Probs: EdgeProbabilities);
1517 TI->setMetadata(
1518 KindID: LLVMContext::MD_prof,
1519 Node: MDBuilder(TI->getContext()).createBranchWeights(Weights: BranchWeights));
1520}
1521
1522/// Erase debug info intrinsics which refer to values in \p F but aren't in
1523/// \p F.
1524static void eraseDebugIntrinsicsWithNonLocalRefs(Function &F) {
1525 for (Instruction &I : instructions(F)) {
1526 SmallVector<DbgVariableIntrinsic *, 4> DbgUsers;
1527 SmallVector<DbgVariableRecord *, 4> DbgVariableRecords;
1528 findDbgUsers(DbgInsts&: DbgUsers, V: &I, DbgVariableRecords: &DbgVariableRecords);
1529 for (DbgVariableIntrinsic *DVI : DbgUsers)
1530 if (DVI->getFunction() != &F)
1531 DVI->eraseFromParent();
1532 for (DbgVariableRecord *DVR : DbgVariableRecords)
1533 if (DVR->getFunction() != &F)
1534 DVR->eraseFromParent();
1535 }
1536}
1537
1538/// Fix up the debug info in the old and new functions by pointing line
1539/// locations and debug intrinsics to the new subprogram scope, and by deleting
1540/// intrinsics which point to values outside of the new function.
1541static void fixupDebugInfoPostExtraction(Function &OldFunc, Function &NewFunc,
1542 CallInst &TheCall) {
1543 DISubprogram *OldSP = OldFunc.getSubprogram();
1544 LLVMContext &Ctx = OldFunc.getContext();
1545
1546 if (!OldSP) {
1547 // Erase any debug info the new function contains.
1548 stripDebugInfo(F&: NewFunc);
1549 // Make sure the old function doesn't contain any non-local metadata refs.
1550 eraseDebugIntrinsicsWithNonLocalRefs(F&: NewFunc);
1551 return;
1552 }
1553
1554 // Create a subprogram for the new function. Leave out a description of the
1555 // function arguments, as the parameters don't correspond to anything at the
1556 // source level.
1557 assert(OldSP->getUnit() && "Missing compile unit for subprogram");
1558 DIBuilder DIB(*OldFunc.getParent(), /*AllowUnresolved=*/false,
1559 OldSP->getUnit());
1560 auto SPType =
1561 DIB.createSubroutineType(ParameterTypes: DIB.getOrCreateTypeArray(Elements: std::nullopt));
1562 DISubprogram::DISPFlags SPFlags = DISubprogram::SPFlagDefinition |
1563 DISubprogram::SPFlagOptimized |
1564 DISubprogram::SPFlagLocalToUnit;
1565 auto NewSP = DIB.createFunction(
1566 Scope: OldSP->getUnit(), Name: NewFunc.getName(), LinkageName: NewFunc.getName(), File: OldSP->getFile(),
1567 /*LineNo=*/0, Ty: SPType, /*ScopeLine=*/0, Flags: DINode::FlagZero, SPFlags);
1568 NewFunc.setSubprogram(NewSP);
1569
1570 auto IsInvalidLocation = [&NewFunc](Value *Location) {
1571 // Location is invalid if it isn't a constant or an instruction, or is an
1572 // instruction but isn't in the new function.
1573 if (!Location ||
1574 (!isa<Constant>(Val: Location) && !isa<Instruction>(Val: Location)))
1575 return true;
1576 Instruction *LocationInst = dyn_cast<Instruction>(Val: Location);
1577 return LocationInst && LocationInst->getFunction() != &NewFunc;
1578 };
1579
1580 // Debug intrinsics in the new function need to be updated in one of two
1581 // ways:
1582 // 1) They need to be deleted, because they describe a value in the old
1583 // function.
1584 // 2) They need to point to fresh metadata, e.g. because they currently
1585 // point to a variable in the wrong scope.
1586 SmallDenseMap<DINode *, DINode *> RemappedMetadata;
1587 SmallVector<Instruction *, 4> DebugIntrinsicsToDelete;
1588 SmallVector<DbgVariableRecord *, 4> DVRsToDelete;
1589 DenseMap<const MDNode *, MDNode *> Cache;
1590
1591 auto GetUpdatedDIVariable = [&](DILocalVariable *OldVar) {
1592 DINode *&NewVar = RemappedMetadata[OldVar];
1593 if (!NewVar) {
1594 DILocalScope *NewScope = DILocalScope::cloneScopeForSubprogram(
1595 RootScope&: *OldVar->getScope(), NewSP&: *NewSP, Ctx, Cache);
1596 NewVar = DIB.createAutoVariable(
1597 Scope: NewScope, Name: OldVar->getName(), File: OldVar->getFile(), LineNo: OldVar->getLine(),
1598 Ty: OldVar->getType(), /*AlwaysPreserve=*/false, Flags: DINode::FlagZero,
1599 AlignInBits: OldVar->getAlignInBits());
1600 }
1601 return cast<DILocalVariable>(Val: NewVar);
1602 };
1603
1604 auto UpdateDbgLabel = [&](auto *LabelRecord) {
1605 // Point the label record to a fresh label within the new function if
1606 // the record was not inlined from some other function.
1607 if (LabelRecord->getDebugLoc().getInlinedAt())
1608 return;
1609 DILabel *OldLabel = LabelRecord->getLabel();
1610 DINode *&NewLabel = RemappedMetadata[OldLabel];
1611 if (!NewLabel) {
1612 DILocalScope *NewScope = DILocalScope::cloneScopeForSubprogram(
1613 RootScope&: *OldLabel->getScope(), NewSP&: *NewSP, Ctx, Cache);
1614 NewLabel = DILabel::get(Context&: Ctx, Scope: NewScope, Name: OldLabel->getName(),
1615 File: OldLabel->getFile(), Line: OldLabel->getLine());
1616 }
1617 LabelRecord->setLabel(cast<DILabel>(Val: NewLabel));
1618 };
1619
1620 auto UpdateDbgRecordsOnInst = [&](Instruction &I) -> void {
1621 for (DbgRecord &DR : I.getDbgRecordRange()) {
1622 if (DbgLabelRecord *DLR = dyn_cast<DbgLabelRecord>(Val: &DR)) {
1623 UpdateDbgLabel(DLR);
1624 continue;
1625 }
1626
1627 DbgVariableRecord &DVR = cast<DbgVariableRecord>(Val&: DR);
1628 // Apply the two updates that dbg.values get: invalid operands, and
1629 // variable metadata fixup.
1630 if (any_of(Range: DVR.location_ops(), P: IsInvalidLocation)) {
1631 DVRsToDelete.push_back(Elt: &DVR);
1632 continue;
1633 }
1634 if (DVR.isDbgAssign() && IsInvalidLocation(DVR.getAddress())) {
1635 DVRsToDelete.push_back(Elt: &DVR);
1636 continue;
1637 }
1638 if (!DVR.getDebugLoc().getInlinedAt())
1639 DVR.setVariable(GetUpdatedDIVariable(DVR.getVariable()));
1640 }
1641 };
1642
1643 for (Instruction &I : instructions(F&: NewFunc)) {
1644 UpdateDbgRecordsOnInst(I);
1645
1646 auto *DII = dyn_cast<DbgInfoIntrinsic>(Val: &I);
1647 if (!DII)
1648 continue;
1649
1650 // Point the intrinsic to a fresh label within the new function if the
1651 // intrinsic was not inlined from some other function.
1652 if (auto *DLI = dyn_cast<DbgLabelInst>(Val: &I)) {
1653 UpdateDbgLabel(DLI);
1654 continue;
1655 }
1656
1657 auto *DVI = cast<DbgVariableIntrinsic>(Val: DII);
1658 // If any of the used locations are invalid, delete the intrinsic.
1659 if (any_of(Range: DVI->location_ops(), P: IsInvalidLocation)) {
1660 DebugIntrinsicsToDelete.push_back(Elt: DVI);
1661 continue;
1662 }
1663 // DbgAssign intrinsics have an extra Value argument:
1664 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(Val: DVI);
1665 DAI && IsInvalidLocation(DAI->getAddress())) {
1666 DebugIntrinsicsToDelete.push_back(Elt: DVI);
1667 continue;
1668 }
1669 // If the variable was in the scope of the old function, i.e. it was not
1670 // inlined, point the intrinsic to a fresh variable within the new function.
1671 if (!DVI->getDebugLoc().getInlinedAt())
1672 DVI->setVariable(GetUpdatedDIVariable(DVI->getVariable()));
1673 }
1674
1675 for (auto *DII : DebugIntrinsicsToDelete)
1676 DII->eraseFromParent();
1677 for (auto *DVR : DVRsToDelete)
1678 DVR->getMarker()->MarkedInstr->dropOneDbgRecord(I: DVR);
1679 DIB.finalizeSubprogram(SP: NewSP);
1680
1681 // Fix up the scope information attached to the line locations in the new
1682 // function.
1683 for (Instruction &I : instructions(F&: NewFunc)) {
1684 if (const DebugLoc &DL = I.getDebugLoc())
1685 I.setDebugLoc(
1686 DebugLoc::replaceInlinedAtSubprogram(DL, NewSP&: *NewSP, Ctx, Cache));
1687 for (DbgRecord &DR : I.getDbgRecordRange())
1688 DR.setDebugLoc(DebugLoc::replaceInlinedAtSubprogram(DL: DR.getDebugLoc(),
1689 NewSP&: *NewSP, Ctx, Cache));
1690
1691 // Loop info metadata may contain line locations. Fix them up.
1692 auto updateLoopInfoLoc = [&Ctx, &Cache, NewSP](Metadata *MD) -> Metadata * {
1693 if (auto *Loc = dyn_cast_or_null<DILocation>(Val: MD))
1694 return DebugLoc::replaceInlinedAtSubprogram(DL: Loc, NewSP&: *NewSP, Ctx, Cache);
1695 return MD;
1696 };
1697 updateLoopMetadataDebugLocations(I, Updater: updateLoopInfoLoc);
1698 }
1699 if (!TheCall.getDebugLoc())
1700 TheCall.setDebugLoc(DILocation::get(Context&: Ctx, Line: 0, Column: 0, Scope: OldSP));
1701
1702 eraseDebugIntrinsicsWithNonLocalRefs(F&: NewFunc);
1703}
1704
1705Function *
1706CodeExtractor::extractCodeRegion(const CodeExtractorAnalysisCache &CEAC) {
1707 ValueSet Inputs, Outputs;
1708 return extractCodeRegion(CEAC, Inputs, Outputs);
1709}
1710
1711Function *
1712CodeExtractor::extractCodeRegion(const CodeExtractorAnalysisCache &CEAC,
1713 ValueSet &inputs, ValueSet &outputs) {
1714 if (!isEligible())
1715 return nullptr;
1716
1717 // Assumption: this is a single-entry code region, and the header is the first
1718 // block in the region.
1719 BasicBlock *header = *Blocks.begin();
1720 Function *oldFunction = header->getParent();
1721
1722 // Calculate the entry frequency of the new function before we change the root
1723 // block.
1724 BlockFrequency EntryFreq;
1725 if (BFI) {
1726 assert(BPI && "Both BPI and BFI are required to preserve profile info");
1727 for (BasicBlock *Pred : predecessors(BB: header)) {
1728 if (Blocks.count(key: Pred))
1729 continue;
1730 EntryFreq +=
1731 BFI->getBlockFreq(BB: Pred) * BPI->getEdgeProbability(Src: Pred, Dst: header);
1732 }
1733 }
1734
1735 // Remove @llvm.assume calls that will be moved to the new function from the
1736 // old function's assumption cache.
1737 for (BasicBlock *Block : Blocks) {
1738 for (Instruction &I : llvm::make_early_inc_range(Range&: *Block)) {
1739 if (auto *AI = dyn_cast<AssumeInst>(Val: &I)) {
1740 if (AC)
1741 AC->unregisterAssumption(CI: AI);
1742 AI->eraseFromParent();
1743 }
1744 }
1745 }
1746
1747 // If we have any return instructions in the region, split those blocks so
1748 // that the return is not in the region.
1749 splitReturnBlocks();
1750
1751 // Calculate the exit blocks for the extracted region and the total exit
1752 // weights for each of those blocks.
1753 DenseMap<BasicBlock *, BlockFrequency> ExitWeights;
1754 SetVector<BasicBlock *> ExitBlocks;
1755 for (BasicBlock *Block : Blocks) {
1756 for (BasicBlock *Succ : successors(BB: Block)) {
1757 if (!Blocks.count(key: Succ)) {
1758 // Update the branch weight for this successor.
1759 if (BFI) {
1760 BlockFrequency &BF = ExitWeights[Succ];
1761 BF += BFI->getBlockFreq(BB: Block) * BPI->getEdgeProbability(Src: Block, Dst: Succ);
1762 }
1763 ExitBlocks.insert(X: Succ);
1764 }
1765 }
1766 }
1767 NumExitBlocks = ExitBlocks.size();
1768
1769 for (BasicBlock *Block : Blocks) {
1770 for (BasicBlock *OldTarget : successors(BB: Block))
1771 if (!Blocks.contains(key: OldTarget))
1772 OldTargets.push_back(Elt: OldTarget);
1773 }
1774
1775 // If we have to split PHI nodes of the entry or exit blocks, do so now.
1776 severSplitPHINodesOfEntry(Header&: header);
1777 severSplitPHINodesOfExits(Exits: ExitBlocks);
1778
1779 // This takes place of the original loop
1780 BasicBlock *codeReplacer = BasicBlock::Create(Context&: header->getContext(),
1781 Name: "codeRepl", Parent: oldFunction,
1782 InsertBefore: header);
1783 codeReplacer->IsNewDbgInfoFormat = oldFunction->IsNewDbgInfoFormat;
1784
1785 // The new function needs a root node because other nodes can branch to the
1786 // head of the region, but the entry node of a function cannot have preds.
1787 BasicBlock *newFuncRoot = BasicBlock::Create(Context&: header->getContext(),
1788 Name: "newFuncRoot");
1789 newFuncRoot->IsNewDbgInfoFormat = oldFunction->IsNewDbgInfoFormat;
1790
1791 auto *BranchI = BranchInst::Create(IfTrue: header);
1792 // If the original function has debug info, we have to add a debug location
1793 // to the new branch instruction from the artificial entry block.
1794 // We use the debug location of the first instruction in the extracted
1795 // blocks, as there is no other equivalent line in the source code.
1796 if (oldFunction->getSubprogram()) {
1797 any_of(Range&: Blocks, P: [&BranchI](const BasicBlock *BB) {
1798 return any_of(Range: *BB, P: [&BranchI](const Instruction &I) {
1799 if (!I.getDebugLoc())
1800 return false;
1801 // Don't use source locations attached to debug-intrinsics: they could
1802 // be from completely unrelated scopes.
1803 if (isa<DbgInfoIntrinsic>(Val: I))
1804 return false;
1805 BranchI->setDebugLoc(I.getDebugLoc());
1806 return true;
1807 });
1808 });
1809 }
1810 BranchI->insertInto(ParentBB: newFuncRoot, It: newFuncRoot->end());
1811
1812 ValueSet SinkingCands, HoistingCands;
1813 BasicBlock *CommonExit = nullptr;
1814 findAllocas(CEAC, SinkCands&: SinkingCands, HoistCands&: HoistingCands, ExitBlock&: CommonExit);
1815 assert(HoistingCands.empty() || CommonExit);
1816
1817 // Find inputs to, outputs from the code region.
1818 findInputsOutputs(Inputs&: inputs, Outputs&: outputs, SinkCands: SinkingCands);
1819
1820 // Now sink all instructions which only have non-phi uses inside the region.
1821 // Group the allocas at the start of the block, so that any bitcast uses of
1822 // the allocas are well-defined.
1823 AllocaInst *FirstSunkAlloca = nullptr;
1824 for (auto *II : SinkingCands) {
1825 if (auto *AI = dyn_cast<AllocaInst>(Val: II)) {
1826 AI->moveBefore(BB&: *newFuncRoot, I: newFuncRoot->getFirstInsertionPt());
1827 if (!FirstSunkAlloca)
1828 FirstSunkAlloca = AI;
1829 }
1830 }
1831 assert((SinkingCands.empty() || FirstSunkAlloca) &&
1832 "Did not expect a sink candidate without any allocas");
1833 for (auto *II : SinkingCands) {
1834 if (!isa<AllocaInst>(Val: II)) {
1835 cast<Instruction>(Val: II)->moveAfter(MovePos: FirstSunkAlloca);
1836 }
1837 }
1838
1839 if (!HoistingCands.empty()) {
1840 auto *HoistToBlock = findOrCreateBlockForHoisting(CommonExitBlock: CommonExit);
1841 Instruction *TI = HoistToBlock->getTerminator();
1842 for (auto *II : HoistingCands)
1843 cast<Instruction>(Val: II)->moveBefore(MovePos: TI);
1844 }
1845
1846 // Collect objects which are inputs to the extraction region and also
1847 // referenced by lifetime start markers within it. The effects of these
1848 // markers must be replicated in the calling function to prevent the stack
1849 // coloring pass from merging slots which store input objects.
1850 ValueSet LifetimesStart;
1851 eraseLifetimeMarkersOnInputs(Blocks, SunkAllocas: SinkingCands, LifetimesStart);
1852
1853 // Construct new function based on inputs/outputs & add allocas for all defs.
1854 Function *newFunction =
1855 constructFunction(inputs, outputs, header, newRootNode: newFuncRoot, newHeader: codeReplacer,
1856 oldFunction, M: oldFunction->getParent());
1857
1858 // Update the entry count of the function.
1859 if (BFI) {
1860 auto Count = BFI->getProfileCountFromFreq(Freq: EntryFreq);
1861 if (Count)
1862 newFunction->setEntryCount(
1863 Count: ProfileCount(*Count, Function::PCT_Real)); // FIXME
1864 BFI->setBlockFreq(BB: codeReplacer, Freq: EntryFreq);
1865 }
1866
1867 CallInst *TheCall =
1868 emitCallAndSwitchStatement(newFunction, codeReplacer, inputs, outputs);
1869
1870 moveCodeToFunction(newFunction);
1871
1872 // Replicate the effects of any lifetime start/end markers which referenced
1873 // input objects in the extraction region by placing markers around the call.
1874 insertLifetimeMarkersSurroundingCall(
1875 M: oldFunction->getParent(), LifetimesStart: LifetimesStart.getArrayRef(), LifetimesEnd: {}, TheCall);
1876
1877 // Propagate personality info to the new function if there is one.
1878 if (oldFunction->hasPersonalityFn())
1879 newFunction->setPersonalityFn(oldFunction->getPersonalityFn());
1880
1881 // Update the branch weights for the exit block.
1882 if (BFI && NumExitBlocks > 1)
1883 calculateNewCallTerminatorWeights(CodeReplacer: codeReplacer, ExitWeights, BPI);
1884
1885 // Loop over all of the PHI nodes in the header and exit blocks, and change
1886 // any references to the old incoming edge to be the new incoming edge.
1887 for (BasicBlock::iterator I = header->begin(); isa<PHINode>(Val: I); ++I) {
1888 PHINode *PN = cast<PHINode>(Val&: I);
1889 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
1890 if (!Blocks.count(key: PN->getIncomingBlock(i)))
1891 PN->setIncomingBlock(i, BB: newFuncRoot);
1892 }
1893
1894 for (BasicBlock *ExitBB : ExitBlocks)
1895 for (PHINode &PN : ExitBB->phis()) {
1896 Value *IncomingCodeReplacerVal = nullptr;
1897 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) {
1898 // Ignore incoming values from outside of the extracted region.
1899 if (!Blocks.count(key: PN.getIncomingBlock(i)))
1900 continue;
1901
1902 // Ensure that there is only one incoming value from codeReplacer.
1903 if (!IncomingCodeReplacerVal) {
1904 PN.setIncomingBlock(i, BB: codeReplacer);
1905 IncomingCodeReplacerVal = PN.getIncomingValue(i);
1906 } else
1907 assert(IncomingCodeReplacerVal == PN.getIncomingValue(i) &&
1908 "PHI has two incompatbile incoming values from codeRepl");
1909 }
1910 }
1911
1912 fixupDebugInfoPostExtraction(OldFunc&: *oldFunction, NewFunc&: *newFunction, TheCall&: *TheCall);
1913
1914 LLVM_DEBUG(if (verifyFunction(*newFunction, &errs())) {
1915 newFunction->dump();
1916 report_fatal_error("verification of newFunction failed!");
1917 });
1918 LLVM_DEBUG(if (verifyFunction(*oldFunction))
1919 report_fatal_error("verification of oldFunction failed!"));
1920 LLVM_DEBUG(if (AC && verifyAssumptionCache(*oldFunction, *newFunction, AC))
1921 report_fatal_error("Stale Asumption cache for old Function!"));
1922 return newFunction;
1923}
1924
1925bool CodeExtractor::verifyAssumptionCache(const Function &OldFunc,
1926 const Function &NewFunc,
1927 AssumptionCache *AC) {
1928 for (auto AssumeVH : AC->assumptions()) {
1929 auto *I = dyn_cast_or_null<CallInst>(Val&: AssumeVH);
1930 if (!I)
1931 continue;
1932
1933 // There shouldn't be any llvm.assume intrinsics in the new function.
1934 if (I->getFunction() != &OldFunc)
1935 return true;
1936
1937 // There shouldn't be any stale affected values in the assumption cache
1938 // that were previously in the old function, but that have now been moved
1939 // to the new function.
1940 for (auto AffectedValVH : AC->assumptionsFor(V: I->getOperand(i_nocapture: 0))) {
1941 auto *AffectedCI = dyn_cast_or_null<CallInst>(Val&: AffectedValVH);
1942 if (!AffectedCI)
1943 continue;
1944 if (AffectedCI->getFunction() != &OldFunc)
1945 return true;
1946 auto *AssumedInst = cast<Instruction>(Val: AffectedCI->getOperand(i_nocapture: 0));
1947 if (AssumedInst->getFunction() != &OldFunc)
1948 return true;
1949 }
1950 }
1951 return false;
1952}
1953
1954void CodeExtractor::excludeArgFromAggregate(Value *Arg) {
1955 ExcludeArgsFromAggregate.insert(X: Arg);
1956}
1957

source code of llvm/lib/Transforms/Utils/CodeExtractor.cpp