1 | //===--- CGCleanup.cpp - Bookkeeping and code emission for cleanups -------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file contains code dealing with the IR generation for cleanups |
10 | // and related information. |
11 | // |
12 | // A "cleanup" is a piece of code which needs to be executed whenever |
13 | // control transfers out of a particular scope. This can be |
14 | // conditionalized to occur only on exceptional control flow, only on |
15 | // normal control flow, or both. |
16 | // |
17 | //===----------------------------------------------------------------------===// |
18 | |
19 | #include "CGCleanup.h" |
20 | #include "CodeGenFunction.h" |
21 | #include "llvm/Support/SaveAndRestore.h" |
22 | |
23 | using namespace clang; |
24 | using namespace CodeGen; |
25 | |
26 | bool DominatingValue<RValue>::saved_type::needsSaving(RValue rv) { |
27 | if (rv.isScalar()) |
28 | return DominatingLLVMValue::needsSaving(value: rv.getScalarVal()); |
29 | if (rv.isAggregate()) |
30 | return DominatingLLVMValue::needsSaving(value: rv.getAggregatePointer()); |
31 | return true; |
32 | } |
33 | |
34 | DominatingValue<RValue>::saved_type |
35 | DominatingValue<RValue>::saved_type::save(CodeGenFunction &CGF, RValue rv) { |
36 | if (rv.isScalar()) { |
37 | llvm::Value *V = rv.getScalarVal(); |
38 | |
39 | // These automatically dominate and don't need to be saved. |
40 | if (!DominatingLLVMValue::needsSaving(value: V)) |
41 | return saved_type(V, nullptr, ScalarLiteral); |
42 | |
43 | // Everything else needs an alloca. |
44 | Address addr = |
45 | CGF.CreateDefaultAlignTempAlloca(Ty: V->getType(), Name: "saved-rvalue" ); |
46 | CGF.Builder.CreateStore(Val: V, Addr: addr); |
47 | return saved_type(addr.getPointer(), nullptr, ScalarAddress); |
48 | } |
49 | |
50 | if (rv.isComplex()) { |
51 | CodeGenFunction::ComplexPairTy V = rv.getComplexVal(); |
52 | llvm::Type *ComplexTy = |
53 | llvm::StructType::get(elt1: V.first->getType(), elts: V.second->getType()); |
54 | Address addr = CGF.CreateDefaultAlignTempAlloca(Ty: ComplexTy, Name: "saved-complex" ); |
55 | CGF.Builder.CreateStore(Val: V.first, Addr: CGF.Builder.CreateStructGEP(Addr: addr, Index: 0)); |
56 | CGF.Builder.CreateStore(Val: V.second, Addr: CGF.Builder.CreateStructGEP(Addr: addr, Index: 1)); |
57 | return saved_type(addr.getPointer(), nullptr, ComplexAddress); |
58 | } |
59 | |
60 | assert(rv.isAggregate()); |
61 | Address V = rv.getAggregateAddress(); // TODO: volatile? |
62 | if (!DominatingLLVMValue::needsSaving(value: V.getPointer())) |
63 | return saved_type(V.getPointer(), V.getElementType(), AggregateLiteral, |
64 | V.getAlignment().getQuantity()); |
65 | |
66 | Address addr = |
67 | CGF.CreateTempAlloca(V.getType(), CGF.getPointerAlign(), "saved-rvalue" ); |
68 | CGF.Builder.CreateStore(Val: V.getPointer(), Addr: addr); |
69 | return saved_type(addr.getPointer(), V.getElementType(), AggregateAddress, |
70 | V.getAlignment().getQuantity()); |
71 | } |
72 | |
73 | /// Given a saved r-value produced by SaveRValue, perform the code |
74 | /// necessary to restore it to usability at the current insertion |
75 | /// point. |
76 | RValue DominatingValue<RValue>::saved_type::restore(CodeGenFunction &CGF) { |
77 | auto getSavingAddress = [&](llvm::Value *value) { |
78 | auto *AI = cast<llvm::AllocaInst>(Val: value); |
79 | return Address(value, AI->getAllocatedType(), |
80 | CharUnits::fromQuantity(Quantity: AI->getAlign().value())); |
81 | }; |
82 | switch (K) { |
83 | case ScalarLiteral: |
84 | return RValue::get(V: Value); |
85 | case ScalarAddress: |
86 | return RValue::get(V: CGF.Builder.CreateLoad(Addr: getSavingAddress(Value))); |
87 | case AggregateLiteral: |
88 | return RValue::getAggregate( |
89 | addr: Address(Value, ElementType, CharUnits::fromQuantity(Quantity: Align))); |
90 | case AggregateAddress: { |
91 | auto addr = CGF.Builder.CreateLoad(Addr: getSavingAddress(Value)); |
92 | return RValue::getAggregate( |
93 | addr: Address(addr, ElementType, CharUnits::fromQuantity(Quantity: Align))); |
94 | } |
95 | case ComplexAddress: { |
96 | Address address = getSavingAddress(Value); |
97 | llvm::Value *real = |
98 | CGF.Builder.CreateLoad(Addr: CGF.Builder.CreateStructGEP(Addr: address, Index: 0)); |
99 | llvm::Value *imag = |
100 | CGF.Builder.CreateLoad(Addr: CGF.Builder.CreateStructGEP(Addr: address, Index: 1)); |
101 | return RValue::getComplex(V1: real, V2: imag); |
102 | } |
103 | } |
104 | |
105 | llvm_unreachable("bad saved r-value kind" ); |
106 | } |
107 | |
108 | /// Push an entry of the given size onto this protected-scope stack. |
109 | char *EHScopeStack::allocate(size_t Size) { |
110 | Size = llvm::alignTo(Value: Size, Align: ScopeStackAlignment); |
111 | if (!StartOfBuffer) { |
112 | unsigned Capacity = 1024; |
113 | while (Capacity < Size) Capacity *= 2; |
114 | StartOfBuffer = new char[Capacity]; |
115 | StartOfData = EndOfBuffer = StartOfBuffer + Capacity; |
116 | } else if (static_cast<size_t>(StartOfData - StartOfBuffer) < Size) { |
117 | unsigned CurrentCapacity = EndOfBuffer - StartOfBuffer; |
118 | unsigned UsedCapacity = CurrentCapacity - (StartOfData - StartOfBuffer); |
119 | |
120 | unsigned NewCapacity = CurrentCapacity; |
121 | do { |
122 | NewCapacity *= 2; |
123 | } while (NewCapacity < UsedCapacity + Size); |
124 | |
125 | char *NewStartOfBuffer = new char[NewCapacity]; |
126 | char *NewEndOfBuffer = NewStartOfBuffer + NewCapacity; |
127 | char *NewStartOfData = NewEndOfBuffer - UsedCapacity; |
128 | memcpy(dest: NewStartOfData, src: StartOfData, n: UsedCapacity); |
129 | delete [] StartOfBuffer; |
130 | StartOfBuffer = NewStartOfBuffer; |
131 | EndOfBuffer = NewEndOfBuffer; |
132 | StartOfData = NewStartOfData; |
133 | } |
134 | |
135 | assert(StartOfBuffer + Size <= StartOfData); |
136 | StartOfData -= Size; |
137 | return StartOfData; |
138 | } |
139 | |
140 | void EHScopeStack::deallocate(size_t Size) { |
141 | StartOfData += llvm::alignTo(Value: Size, Align: ScopeStackAlignment); |
142 | } |
143 | |
144 | bool EHScopeStack::containsOnlyLifetimeMarkers( |
145 | EHScopeStack::stable_iterator Old) const { |
146 | for (EHScopeStack::iterator it = begin(); stabilize(ir: it) != Old; it++) { |
147 | EHCleanupScope *cleanup = dyn_cast<EHCleanupScope>(Val: &*it); |
148 | if (!cleanup || !cleanup->isLifetimeMarker()) |
149 | return false; |
150 | } |
151 | |
152 | return true; |
153 | } |
154 | |
155 | bool EHScopeStack::requiresLandingPad() const { |
156 | for (stable_iterator si = getInnermostEHScope(); si != stable_end(); ) { |
157 | // Skip lifetime markers. |
158 | if (auto *cleanup = dyn_cast<EHCleanupScope>(Val: &*find(sp: si))) |
159 | if (cleanup->isLifetimeMarker()) { |
160 | si = cleanup->getEnclosingEHScope(); |
161 | continue; |
162 | } |
163 | return true; |
164 | } |
165 | |
166 | return false; |
167 | } |
168 | |
169 | EHScopeStack::stable_iterator |
170 | EHScopeStack::getInnermostActiveNormalCleanup() const { |
171 | for (stable_iterator si = getInnermostNormalCleanup(), se = stable_end(); |
172 | si != se; ) { |
173 | EHCleanupScope &cleanup = cast<EHCleanupScope>(Val&: *find(sp: si)); |
174 | if (cleanup.isActive()) return si; |
175 | si = cleanup.getEnclosingNormalCleanup(); |
176 | } |
177 | return stable_end(); |
178 | } |
179 | |
180 | |
181 | void *EHScopeStack::pushCleanup(CleanupKind Kind, size_t Size) { |
182 | char *Buffer = allocate(Size: EHCleanupScope::getSizeForCleanupSize(Size)); |
183 | bool IsNormalCleanup = Kind & NormalCleanup; |
184 | bool IsEHCleanup = Kind & EHCleanup; |
185 | bool IsLifetimeMarker = Kind & LifetimeMarker; |
186 | |
187 | // Per C++ [except.terminate], it is implementation-defined whether none, |
188 | // some, or all cleanups are called before std::terminate. Thus, when |
189 | // terminate is the current EH scope, we may skip adding any EH cleanup |
190 | // scopes. |
191 | if (InnermostEHScope != stable_end() && |
192 | find(sp: InnermostEHScope)->getKind() == EHScope::Terminate) |
193 | IsEHCleanup = false; |
194 | |
195 | EHCleanupScope *Scope = |
196 | new (Buffer) EHCleanupScope(IsNormalCleanup, |
197 | IsEHCleanup, |
198 | Size, |
199 | BranchFixups.size(), |
200 | InnermostNormalCleanup, |
201 | InnermostEHScope); |
202 | if (IsNormalCleanup) |
203 | InnermostNormalCleanup = stable_begin(); |
204 | if (IsEHCleanup) |
205 | InnermostEHScope = stable_begin(); |
206 | if (IsLifetimeMarker) |
207 | Scope->setLifetimeMarker(); |
208 | |
209 | // With Windows -EHa, Invoke llvm.seh.scope.begin() for EHCleanup |
210 | // If exceptions are disabled/ignored and SEH is not in use, then there is no |
211 | // invoke destination. SEH "works" even if exceptions are off. In practice, |
212 | // this means that C++ destructors and other EH cleanups don't run, which is |
213 | // consistent with MSVC's behavior, except in the presence of -EHa. |
214 | // Check getInvokeDest() to generate llvm.seh.scope.begin() as needed. |
215 | if (CGF->getLangOpts().EHAsynch && IsEHCleanup && !IsLifetimeMarker && |
216 | CGF->getTarget().getCXXABI().isMicrosoft() && CGF->getInvokeDest()) |
217 | CGF->EmitSehCppScopeBegin(); |
218 | |
219 | return Scope->getCleanupBuffer(); |
220 | } |
221 | |
222 | void EHScopeStack::popCleanup() { |
223 | assert(!empty() && "popping exception stack when not empty" ); |
224 | |
225 | assert(isa<EHCleanupScope>(*begin())); |
226 | EHCleanupScope &Cleanup = cast<EHCleanupScope>(Val&: *begin()); |
227 | InnermostNormalCleanup = Cleanup.getEnclosingNormalCleanup(); |
228 | InnermostEHScope = Cleanup.getEnclosingEHScope(); |
229 | deallocate(Size: Cleanup.getAllocatedSize()); |
230 | |
231 | // Destroy the cleanup. |
232 | Cleanup.Destroy(); |
233 | |
234 | // Check whether we can shrink the branch-fixups stack. |
235 | if (!BranchFixups.empty()) { |
236 | // If we no longer have any normal cleanups, all the fixups are |
237 | // complete. |
238 | if (!hasNormalCleanups()) |
239 | BranchFixups.clear(); |
240 | |
241 | // Otherwise we can still trim out unnecessary nulls. |
242 | else |
243 | popNullFixups(); |
244 | } |
245 | } |
246 | |
247 | EHFilterScope *EHScopeStack::pushFilter(unsigned numFilters) { |
248 | assert(getInnermostEHScope() == stable_end()); |
249 | char *buffer = allocate(Size: EHFilterScope::getSizeForNumFilters(numFilters)); |
250 | EHFilterScope *filter = new (buffer) EHFilterScope(numFilters); |
251 | InnermostEHScope = stable_begin(); |
252 | return filter; |
253 | } |
254 | |
255 | void EHScopeStack::popFilter() { |
256 | assert(!empty() && "popping exception stack when not empty" ); |
257 | |
258 | EHFilterScope &filter = cast<EHFilterScope>(Val&: *begin()); |
259 | deallocate(Size: EHFilterScope::getSizeForNumFilters(numFilters: filter.getNumFilters())); |
260 | |
261 | InnermostEHScope = filter.getEnclosingEHScope(); |
262 | } |
263 | |
264 | EHCatchScope *EHScopeStack::pushCatch(unsigned numHandlers) { |
265 | char *buffer = allocate(Size: EHCatchScope::getSizeForNumHandlers(N: numHandlers)); |
266 | EHCatchScope *scope = |
267 | new (buffer) EHCatchScope(numHandlers, InnermostEHScope); |
268 | InnermostEHScope = stable_begin(); |
269 | return scope; |
270 | } |
271 | |
272 | void EHScopeStack::pushTerminate() { |
273 | char *Buffer = allocate(Size: EHTerminateScope::getSize()); |
274 | new (Buffer) EHTerminateScope(InnermostEHScope); |
275 | InnermostEHScope = stable_begin(); |
276 | } |
277 | |
278 | /// Remove any 'null' fixups on the stack. However, we can't pop more |
279 | /// fixups than the fixup depth on the innermost normal cleanup, or |
280 | /// else fixups that we try to add to that cleanup will end up in the |
281 | /// wrong place. We *could* try to shrink fixup depths, but that's |
282 | /// actually a lot of work for little benefit. |
283 | void EHScopeStack::popNullFixups() { |
284 | // We expect this to only be called when there's still an innermost |
285 | // normal cleanup; otherwise there really shouldn't be any fixups. |
286 | assert(hasNormalCleanups()); |
287 | |
288 | EHScopeStack::iterator it = find(sp: InnermostNormalCleanup); |
289 | unsigned MinSize = cast<EHCleanupScope>(Val&: *it).getFixupDepth(); |
290 | assert(BranchFixups.size() >= MinSize && "fixup stack out of order" ); |
291 | |
292 | while (BranchFixups.size() > MinSize && |
293 | BranchFixups.back().Destination == nullptr) |
294 | BranchFixups.pop_back(); |
295 | } |
296 | |
297 | Address CodeGenFunction::createCleanupActiveFlag() { |
298 | // Create a variable to decide whether the cleanup needs to be run. |
299 | Address active = CreateTempAllocaWithoutCast( |
300 | Ty: Builder.getInt1Ty(), align: CharUnits::One(), Name: "cleanup.cond" ); |
301 | |
302 | // Initialize it to false at a site that's guaranteed to be run |
303 | // before each evaluation. |
304 | setBeforeOutermostConditional(value: Builder.getFalse(), addr: active); |
305 | |
306 | // Initialize it to true at the current location. |
307 | Builder.CreateStore(Val: Builder.getTrue(), Addr: active); |
308 | |
309 | return active; |
310 | } |
311 | |
312 | void CodeGenFunction::initFullExprCleanupWithFlag(Address ActiveFlag) { |
313 | // Set that as the active flag in the cleanup. |
314 | EHCleanupScope &cleanup = cast<EHCleanupScope>(Val&: *EHStack.begin()); |
315 | assert(!cleanup.hasActiveFlag() && "cleanup already has active flag?" ); |
316 | cleanup.setActiveFlag(ActiveFlag); |
317 | |
318 | if (cleanup.isNormalCleanup()) cleanup.setTestFlagInNormalCleanup(); |
319 | if (cleanup.isEHCleanup()) cleanup.setTestFlagInEHCleanup(); |
320 | } |
321 | |
322 | void EHScopeStack::Cleanup::anchor() {} |
323 | |
324 | static void createStoreInstBefore(llvm::Value *value, Address addr, |
325 | llvm::Instruction *beforeInst) { |
326 | auto store = new llvm::StoreInst(value, addr.getPointer(), beforeInst); |
327 | store->setAlignment(addr.getAlignment().getAsAlign()); |
328 | } |
329 | |
330 | static llvm::LoadInst *createLoadInstBefore(Address addr, const Twine &name, |
331 | llvm::Instruction *beforeInst) { |
332 | return new llvm::LoadInst(addr.getElementType(), addr.getPointer(), name, |
333 | false, addr.getAlignment().getAsAlign(), |
334 | beforeInst); |
335 | } |
336 | |
337 | /// All the branch fixups on the EH stack have propagated out past the |
338 | /// outermost normal cleanup; resolve them all by adding cases to the |
339 | /// given switch instruction. |
340 | static void ResolveAllBranchFixups(CodeGenFunction &CGF, |
341 | llvm::SwitchInst *Switch, |
342 | llvm::BasicBlock *CleanupEntry) { |
343 | llvm::SmallPtrSet<llvm::BasicBlock*, 4> CasesAdded; |
344 | |
345 | for (unsigned I = 0, E = CGF.EHStack.getNumBranchFixups(); I != E; ++I) { |
346 | // Skip this fixup if its destination isn't set. |
347 | BranchFixup &Fixup = CGF.EHStack.getBranchFixup(I); |
348 | if (Fixup.Destination == nullptr) continue; |
349 | |
350 | // If there isn't an OptimisticBranchBlock, then InitialBranch is |
351 | // still pointing directly to its destination; forward it to the |
352 | // appropriate cleanup entry. This is required in the specific |
353 | // case of |
354 | // { std::string s; goto lbl; } |
355 | // lbl: |
356 | // i.e. where there's an unresolved fixup inside a single cleanup |
357 | // entry which we're currently popping. |
358 | if (Fixup.OptimisticBranchBlock == nullptr) { |
359 | createStoreInstBefore(value: CGF.Builder.getInt32(C: Fixup.DestinationIndex), |
360 | addr: CGF.getNormalCleanupDestSlot(), |
361 | beforeInst: Fixup.InitialBranch); |
362 | Fixup.InitialBranch->setSuccessor(idx: 0, NewSucc: CleanupEntry); |
363 | } |
364 | |
365 | // Don't add this case to the switch statement twice. |
366 | if (!CasesAdded.insert(Ptr: Fixup.Destination).second) |
367 | continue; |
368 | |
369 | Switch->addCase(OnVal: CGF.Builder.getInt32(C: Fixup.DestinationIndex), |
370 | Dest: Fixup.Destination); |
371 | } |
372 | |
373 | CGF.EHStack.clearFixups(); |
374 | } |
375 | |
376 | /// Transitions the terminator of the given exit-block of a cleanup to |
377 | /// be a cleanup switch. |
378 | static llvm::SwitchInst *TransitionToCleanupSwitch(CodeGenFunction &CGF, |
379 | llvm::BasicBlock *Block) { |
380 | // If it's a branch, turn it into a switch whose default |
381 | // destination is its original target. |
382 | llvm::Instruction *Term = Block->getTerminator(); |
383 | assert(Term && "can't transition block without terminator" ); |
384 | |
385 | if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Val: Term)) { |
386 | assert(Br->isUnconditional()); |
387 | auto Load = createLoadInstBefore(addr: CGF.getNormalCleanupDestSlot(), |
388 | name: "cleanup.dest" , beforeInst: Term); |
389 | llvm::SwitchInst *Switch = |
390 | llvm::SwitchInst::Create(Value: Load, Default: Br->getSuccessor(i: 0), NumCases: 4, InsertAtEnd: Block); |
391 | Br->eraseFromParent(); |
392 | return Switch; |
393 | } else { |
394 | return cast<llvm::SwitchInst>(Val: Term); |
395 | } |
396 | } |
397 | |
398 | void CodeGenFunction::ResolveBranchFixups(llvm::BasicBlock *Block) { |
399 | assert(Block && "resolving a null target block" ); |
400 | if (!EHStack.getNumBranchFixups()) return; |
401 | |
402 | assert(EHStack.hasNormalCleanups() && |
403 | "branch fixups exist with no normal cleanups on stack" ); |
404 | |
405 | llvm::SmallPtrSet<llvm::BasicBlock*, 4> ModifiedOptimisticBlocks; |
406 | bool ResolvedAny = false; |
407 | |
408 | for (unsigned I = 0, E = EHStack.getNumBranchFixups(); I != E; ++I) { |
409 | // Skip this fixup if its destination doesn't match. |
410 | BranchFixup &Fixup = EHStack.getBranchFixup(I); |
411 | if (Fixup.Destination != Block) continue; |
412 | |
413 | Fixup.Destination = nullptr; |
414 | ResolvedAny = true; |
415 | |
416 | // If it doesn't have an optimistic branch block, LatestBranch is |
417 | // already pointing to the right place. |
418 | llvm::BasicBlock *BranchBB = Fixup.OptimisticBranchBlock; |
419 | if (!BranchBB) |
420 | continue; |
421 | |
422 | // Don't process the same optimistic branch block twice. |
423 | if (!ModifiedOptimisticBlocks.insert(Ptr: BranchBB).second) |
424 | continue; |
425 | |
426 | llvm::SwitchInst *Switch = TransitionToCleanupSwitch(CGF&: *this, Block: BranchBB); |
427 | |
428 | // Add a case to the switch. |
429 | Switch->addCase(OnVal: Builder.getInt32(C: Fixup.DestinationIndex), Dest: Block); |
430 | } |
431 | |
432 | if (ResolvedAny) |
433 | EHStack.popNullFixups(); |
434 | } |
435 | |
436 | /// Pops cleanup blocks until the given savepoint is reached. |
437 | void CodeGenFunction::PopCleanupBlocks( |
438 | EHScopeStack::stable_iterator Old, |
439 | std::initializer_list<llvm::Value **> ValuesToReload) { |
440 | assert(Old.isValid()); |
441 | |
442 | bool HadBranches = false; |
443 | while (EHStack.stable_begin() != Old) { |
444 | EHCleanupScope &Scope = cast<EHCleanupScope>(Val&: *EHStack.begin()); |
445 | HadBranches |= Scope.hasBranches(); |
446 | |
447 | // As long as Old strictly encloses the scope's enclosing normal |
448 | // cleanup, we're going to emit another normal cleanup which |
449 | // fallthrough can propagate through. |
450 | bool FallThroughIsBranchThrough = |
451 | Old.strictlyEncloses(I: Scope.getEnclosingNormalCleanup()); |
452 | |
453 | PopCleanupBlock(FallThroughIsBranchThrough); |
454 | } |
455 | |
456 | // If we didn't have any branches, the insertion point before cleanups must |
457 | // dominate the current insertion point and we don't need to reload any |
458 | // values. |
459 | if (!HadBranches) |
460 | return; |
461 | |
462 | // Spill and reload all values that the caller wants to be live at the current |
463 | // insertion point. |
464 | for (llvm::Value **ReloadedValue : ValuesToReload) { |
465 | auto *Inst = dyn_cast_or_null<llvm::Instruction>(Val: *ReloadedValue); |
466 | if (!Inst) |
467 | continue; |
468 | |
469 | // Don't spill static allocas, they dominate all cleanups. These are created |
470 | // by binding a reference to a local variable or temporary. |
471 | auto *AI = dyn_cast<llvm::AllocaInst>(Val: Inst); |
472 | if (AI && AI->isStaticAlloca()) |
473 | continue; |
474 | |
475 | Address Tmp = |
476 | CreateDefaultAlignTempAlloca(Ty: Inst->getType(), Name: "tmp.exprcleanup" ); |
477 | |
478 | // Find an insertion point after Inst and spill it to the temporary. |
479 | llvm::BasicBlock::iterator InsertBefore; |
480 | if (auto *Invoke = dyn_cast<llvm::InvokeInst>(Val: Inst)) |
481 | InsertBefore = Invoke->getNormalDest()->getFirstInsertionPt(); |
482 | else |
483 | InsertBefore = std::next(x: Inst->getIterator()); |
484 | CGBuilderTy(CGM, &*InsertBefore).CreateStore(Val: Inst, Addr: Tmp); |
485 | |
486 | // Reload the value at the current insertion point. |
487 | *ReloadedValue = Builder.CreateLoad(Addr: Tmp); |
488 | } |
489 | } |
490 | |
491 | /// Pops cleanup blocks until the given savepoint is reached, then add the |
492 | /// cleanups from the given savepoint in the lifetime-extended cleanups stack. |
493 | void CodeGenFunction::PopCleanupBlocks( |
494 | EHScopeStack::stable_iterator Old, size_t OldLifetimeExtendedSize, |
495 | std::initializer_list<llvm::Value **> ValuesToReload) { |
496 | PopCleanupBlocks(Old, ValuesToReload); |
497 | |
498 | // Move our deferred cleanups onto the EH stack. |
499 | for (size_t I = OldLifetimeExtendedSize, |
500 | E = LifetimeExtendedCleanupStack.size(); I != E; /**/) { |
501 | // Alignment should be guaranteed by the vptrs in the individual cleanups. |
502 | assert((I % alignof(LifetimeExtendedCleanupHeader) == 0) && |
503 | "misaligned cleanup stack entry" ); |
504 | |
505 | LifetimeExtendedCleanupHeader & = |
506 | reinterpret_cast<LifetimeExtendedCleanupHeader&>( |
507 | LifetimeExtendedCleanupStack[I]); |
508 | I += sizeof(Header); |
509 | |
510 | EHStack.pushCopyOfCleanup(Kind: Header.getKind(), |
511 | Cleanup: &LifetimeExtendedCleanupStack[I], |
512 | Size: Header.getSize()); |
513 | I += Header.getSize(); |
514 | |
515 | if (Header.isConditional()) { |
516 | Address ActiveFlag = |
517 | reinterpret_cast<Address &>(LifetimeExtendedCleanupStack[I]); |
518 | initFullExprCleanupWithFlag(ActiveFlag); |
519 | I += sizeof(ActiveFlag); |
520 | } |
521 | } |
522 | LifetimeExtendedCleanupStack.resize(N: OldLifetimeExtendedSize); |
523 | } |
524 | |
525 | static llvm::BasicBlock *CreateNormalEntry(CodeGenFunction &CGF, |
526 | EHCleanupScope &Scope) { |
527 | assert(Scope.isNormalCleanup()); |
528 | llvm::BasicBlock *Entry = Scope.getNormalBlock(); |
529 | if (!Entry) { |
530 | Entry = CGF.createBasicBlock(name: "cleanup" ); |
531 | Scope.setNormalBlock(Entry); |
532 | } |
533 | return Entry; |
534 | } |
535 | |
536 | /// Attempts to reduce a cleanup's entry block to a fallthrough. This |
537 | /// is basically llvm::MergeBlockIntoPredecessor, except |
538 | /// simplified/optimized for the tighter constraints on cleanup blocks. |
539 | /// |
540 | /// Returns the new block, whatever it is. |
541 | static llvm::BasicBlock *SimplifyCleanupEntry(CodeGenFunction &CGF, |
542 | llvm::BasicBlock *Entry) { |
543 | llvm::BasicBlock *Pred = Entry->getSinglePredecessor(); |
544 | if (!Pred) return Entry; |
545 | |
546 | llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Val: Pred->getTerminator()); |
547 | if (!Br || Br->isConditional()) return Entry; |
548 | assert(Br->getSuccessor(0) == Entry); |
549 | |
550 | // If we were previously inserting at the end of the cleanup entry |
551 | // block, we'll need to continue inserting at the end of the |
552 | // predecessor. |
553 | bool WasInsertBlock = CGF.Builder.GetInsertBlock() == Entry; |
554 | assert(!WasInsertBlock || CGF.Builder.GetInsertPoint() == Entry->end()); |
555 | |
556 | // Kill the branch. |
557 | Br->eraseFromParent(); |
558 | |
559 | // Replace all uses of the entry with the predecessor, in case there |
560 | // are phis in the cleanup. |
561 | Entry->replaceAllUsesWith(V: Pred); |
562 | |
563 | // Merge the blocks. |
564 | Pred->splice(ToIt: Pred->end(), FromBB: Entry); |
565 | |
566 | // Kill the entry block. |
567 | Entry->eraseFromParent(); |
568 | |
569 | if (WasInsertBlock) |
570 | CGF.Builder.SetInsertPoint(Pred); |
571 | |
572 | return Pred; |
573 | } |
574 | |
575 | static void EmitCleanup(CodeGenFunction &CGF, |
576 | EHScopeStack::Cleanup *Fn, |
577 | EHScopeStack::Cleanup::Flags flags, |
578 | Address ActiveFlag) { |
579 | // If there's an active flag, load it and skip the cleanup if it's |
580 | // false. |
581 | llvm::BasicBlock *ContBB = nullptr; |
582 | if (ActiveFlag.isValid()) { |
583 | ContBB = CGF.createBasicBlock(name: "cleanup.done" ); |
584 | llvm::BasicBlock *CleanupBB = CGF.createBasicBlock(name: "cleanup.action" ); |
585 | llvm::Value *IsActive |
586 | = CGF.Builder.CreateLoad(Addr: ActiveFlag, Name: "cleanup.is_active" ); |
587 | CGF.Builder.CreateCondBr(Cond: IsActive, True: CleanupBB, False: ContBB); |
588 | CGF.EmitBlock(BB: CleanupBB); |
589 | } |
590 | |
591 | // Ask the cleanup to emit itself. |
592 | Fn->Emit(CGF, flags); |
593 | assert(CGF.HaveInsertPoint() && "cleanup ended with no insertion point?" ); |
594 | |
595 | // Emit the continuation block if there was an active flag. |
596 | if (ActiveFlag.isValid()) |
597 | CGF.EmitBlock(BB: ContBB); |
598 | } |
599 | |
600 | static void ForwardPrebranchedFallthrough(llvm::BasicBlock *Exit, |
601 | llvm::BasicBlock *From, |
602 | llvm::BasicBlock *To) { |
603 | // Exit is the exit block of a cleanup, so it always terminates in |
604 | // an unconditional branch or a switch. |
605 | llvm::Instruction *Term = Exit->getTerminator(); |
606 | |
607 | if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Val: Term)) { |
608 | assert(Br->isUnconditional() && Br->getSuccessor(0) == From); |
609 | Br->setSuccessor(idx: 0, NewSucc: To); |
610 | } else { |
611 | llvm::SwitchInst *Switch = cast<llvm::SwitchInst>(Val: Term); |
612 | for (unsigned I = 0, E = Switch->getNumSuccessors(); I != E; ++I) |
613 | if (Switch->getSuccessor(idx: I) == From) |
614 | Switch->setSuccessor(idx: I, NewSucc: To); |
615 | } |
616 | } |
617 | |
618 | /// We don't need a normal entry block for the given cleanup. |
619 | /// Optimistic fixup branches can cause these blocks to come into |
620 | /// existence anyway; if so, destroy it. |
621 | /// |
622 | /// The validity of this transformation is very much specific to the |
623 | /// exact ways in which we form branches to cleanup entries. |
624 | static void destroyOptimisticNormalEntry(CodeGenFunction &CGF, |
625 | EHCleanupScope &scope) { |
626 | llvm::BasicBlock *entry = scope.getNormalBlock(); |
627 | if (!entry) return; |
628 | |
629 | // Replace all the uses with unreachable. |
630 | llvm::BasicBlock *unreachableBB = CGF.getUnreachableBlock(); |
631 | for (llvm::BasicBlock::use_iterator |
632 | i = entry->use_begin(), e = entry->use_end(); i != e; ) { |
633 | llvm::Use &use = *i; |
634 | ++i; |
635 | |
636 | use.set(unreachableBB); |
637 | |
638 | // The only uses should be fixup switches. |
639 | llvm::SwitchInst *si = cast<llvm::SwitchInst>(Val: use.getUser()); |
640 | if (si->getNumCases() == 1 && si->getDefaultDest() == unreachableBB) { |
641 | // Replace the switch with a branch. |
642 | llvm::BranchInst::Create(IfTrue: si->case_begin()->getCaseSuccessor(), InsertBefore: si); |
643 | |
644 | // The switch operand is a load from the cleanup-dest alloca. |
645 | llvm::LoadInst *condition = cast<llvm::LoadInst>(Val: si->getCondition()); |
646 | |
647 | // Destroy the switch. |
648 | si->eraseFromParent(); |
649 | |
650 | // Destroy the load. |
651 | assert(condition->getOperand(0) == CGF.NormalCleanupDest.getPointer()); |
652 | assert(condition->use_empty()); |
653 | condition->eraseFromParent(); |
654 | } |
655 | } |
656 | |
657 | assert(entry->use_empty()); |
658 | delete entry; |
659 | } |
660 | |
661 | /// Pops a cleanup block. If the block includes a normal cleanup, the |
662 | /// current insertion point is threaded through the cleanup, as are |
663 | /// any branch fixups on the cleanup. |
664 | void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { |
665 | assert(!EHStack.empty() && "cleanup stack is empty!" ); |
666 | assert(isa<EHCleanupScope>(*EHStack.begin()) && "top not a cleanup!" ); |
667 | EHCleanupScope &Scope = cast<EHCleanupScope>(Val&: *EHStack.begin()); |
668 | assert(Scope.getFixupDepth() <= EHStack.getNumBranchFixups()); |
669 | |
670 | // Remember activation information. |
671 | bool IsActive = Scope.isActive(); |
672 | Address NormalActiveFlag = |
673 | Scope.shouldTestFlagInNormalCleanup() ? Scope.getActiveFlag() |
674 | : Address::invalid(); |
675 | Address EHActiveFlag = |
676 | Scope.shouldTestFlagInEHCleanup() ? Scope.getActiveFlag() |
677 | : Address::invalid(); |
678 | |
679 | // Check whether we need an EH cleanup. This is only true if we've |
680 | // generated a lazy EH cleanup block. |
681 | llvm::BasicBlock *EHEntry = Scope.getCachedEHDispatchBlock(); |
682 | assert(Scope.hasEHBranches() == (EHEntry != nullptr)); |
683 | bool RequiresEHCleanup = (EHEntry != nullptr); |
684 | EHScopeStack::stable_iterator EHParent = Scope.getEnclosingEHScope(); |
685 | |
686 | // Check the three conditions which might require a normal cleanup: |
687 | |
688 | // - whether there are branch fix-ups through this cleanup |
689 | unsigned FixupDepth = Scope.getFixupDepth(); |
690 | bool HasFixups = EHStack.getNumBranchFixups() != FixupDepth; |
691 | |
692 | // - whether there are branch-throughs or branch-afters |
693 | bool HasExistingBranches = Scope.hasBranches(); |
694 | |
695 | // - whether there's a fallthrough |
696 | llvm::BasicBlock *FallthroughSource = Builder.GetInsertBlock(); |
697 | bool HasFallthrough = (FallthroughSource != nullptr && IsActive); |
698 | |
699 | // Branch-through fall-throughs leave the insertion point set to the |
700 | // end of the last cleanup, which points to the current scope. The |
701 | // rest of IR gen doesn't need to worry about this; it only happens |
702 | // during the execution of PopCleanupBlocks(). |
703 | bool HasPrebranchedFallthrough = |
704 | (FallthroughSource && FallthroughSource->getTerminator()); |
705 | |
706 | // If this is a normal cleanup, then having a prebranched |
707 | // fallthrough implies that the fallthrough source unconditionally |
708 | // jumps here. |
709 | assert(!Scope.isNormalCleanup() || !HasPrebranchedFallthrough || |
710 | (Scope.getNormalBlock() && |
711 | FallthroughSource->getTerminator()->getSuccessor(0) |
712 | == Scope.getNormalBlock())); |
713 | |
714 | bool RequiresNormalCleanup = false; |
715 | if (Scope.isNormalCleanup() && |
716 | (HasFixups || HasExistingBranches || HasFallthrough)) { |
717 | RequiresNormalCleanup = true; |
718 | } |
719 | |
720 | // If we have a prebranched fallthrough into an inactive normal |
721 | // cleanup, rewrite it so that it leads to the appropriate place. |
722 | if (Scope.isNormalCleanup() && HasPrebranchedFallthrough && !IsActive) { |
723 | llvm::BasicBlock *prebranchDest; |
724 | |
725 | // If the prebranch is semantically branching through the next |
726 | // cleanup, just forward it to the next block, leaving the |
727 | // insertion point in the prebranched block. |
728 | if (FallthroughIsBranchThrough) { |
729 | EHScope &enclosing = *EHStack.find(sp: Scope.getEnclosingNormalCleanup()); |
730 | prebranchDest = CreateNormalEntry(CGF&: *this, Scope&: cast<EHCleanupScope>(Val&: enclosing)); |
731 | |
732 | // Otherwise, we need to make a new block. If the normal cleanup |
733 | // isn't being used at all, we could actually reuse the normal |
734 | // entry block, but this is simpler, and it avoids conflicts with |
735 | // dead optimistic fixup branches. |
736 | } else { |
737 | prebranchDest = createBasicBlock(name: "forwarded-prebranch" ); |
738 | EmitBlock(BB: prebranchDest); |
739 | } |
740 | |
741 | llvm::BasicBlock *normalEntry = Scope.getNormalBlock(); |
742 | assert(normalEntry && !normalEntry->use_empty()); |
743 | |
744 | ForwardPrebranchedFallthrough(Exit: FallthroughSource, |
745 | From: normalEntry, To: prebranchDest); |
746 | } |
747 | |
748 | // If we don't need the cleanup at all, we're done. |
749 | if (!RequiresNormalCleanup && !RequiresEHCleanup) { |
750 | destroyOptimisticNormalEntry(CGF&: *this, scope&: Scope); |
751 | EHStack.popCleanup(); // safe because there are no fixups |
752 | assert(EHStack.getNumBranchFixups() == 0 || |
753 | EHStack.hasNormalCleanups()); |
754 | return; |
755 | } |
756 | |
757 | // Copy the cleanup emission data out. This uses either a stack |
758 | // array or malloc'd memory, depending on the size, which is |
759 | // behavior that SmallVector would provide, if we could use it |
760 | // here. Unfortunately, if you ask for a SmallVector<char>, the |
761 | // alignment isn't sufficient. |
762 | auto *CleanupSource = reinterpret_cast<char *>(Scope.getCleanupBuffer()); |
763 | alignas(EHScopeStack::ScopeStackAlignment) char |
764 | CleanupBufferStack[8 * sizeof(void *)]; |
765 | std::unique_ptr<char[]> CleanupBufferHeap; |
766 | size_t CleanupSize = Scope.getCleanupSize(); |
767 | EHScopeStack::Cleanup *Fn; |
768 | |
769 | if (CleanupSize <= sizeof(CleanupBufferStack)) { |
770 | memcpy(dest: CleanupBufferStack, src: CleanupSource, n: CleanupSize); |
771 | Fn = reinterpret_cast<EHScopeStack::Cleanup *>(CleanupBufferStack); |
772 | } else { |
773 | CleanupBufferHeap.reset(p: new char[CleanupSize]); |
774 | memcpy(dest: CleanupBufferHeap.get(), src: CleanupSource, n: CleanupSize); |
775 | Fn = reinterpret_cast<EHScopeStack::Cleanup *>(CleanupBufferHeap.get()); |
776 | } |
777 | |
778 | EHScopeStack::Cleanup::Flags cleanupFlags; |
779 | if (Scope.isNormalCleanup()) |
780 | cleanupFlags.setIsNormalCleanupKind(); |
781 | if (Scope.isEHCleanup()) |
782 | cleanupFlags.setIsEHCleanupKind(); |
783 | |
784 | // Under -EHa, invoke seh.scope.end() to mark scope end before dtor |
785 | bool IsEHa = getLangOpts().EHAsynch && !Scope.isLifetimeMarker(); |
786 | const EHPersonality &Personality = EHPersonality::get(CGF&: *this); |
787 | if (!RequiresNormalCleanup) { |
788 | // Mark CPP scope end for passed-by-value Arg temp |
789 | // per Windows ABI which is "normally" Cleanup in callee |
790 | if (IsEHa && getInvokeDest() && Builder.GetInsertBlock()) { |
791 | if (Personality.isMSVCXXPersonality()) |
792 | EmitSehCppScopeEnd(); |
793 | } |
794 | destroyOptimisticNormalEntry(CGF&: *this, scope&: Scope); |
795 | EHStack.popCleanup(); |
796 | } else { |
797 | // If we have a fallthrough and no other need for the cleanup, |
798 | // emit it directly. |
799 | if (HasFallthrough && !HasPrebranchedFallthrough && !HasFixups && |
800 | !HasExistingBranches) { |
801 | |
802 | // mark SEH scope end for fall-through flow |
803 | if (IsEHa && getInvokeDest()) { |
804 | if (Personality.isMSVCXXPersonality()) |
805 | EmitSehCppScopeEnd(); |
806 | else |
807 | EmitSehTryScopeEnd(); |
808 | } |
809 | |
810 | destroyOptimisticNormalEntry(CGF&: *this, scope&: Scope); |
811 | EHStack.popCleanup(); |
812 | |
813 | EmitCleanup(CGF&: *this, Fn, flags: cleanupFlags, ActiveFlag: NormalActiveFlag); |
814 | |
815 | // Otherwise, the best approach is to thread everything through |
816 | // the cleanup block and then try to clean up after ourselves. |
817 | } else { |
818 | // Force the entry block to exist. |
819 | llvm::BasicBlock *NormalEntry = CreateNormalEntry(CGF&: *this, Scope); |
820 | |
821 | // I. Set up the fallthrough edge in. |
822 | |
823 | CGBuilderTy::InsertPoint savedInactiveFallthroughIP; |
824 | |
825 | // If there's a fallthrough, we need to store the cleanup |
826 | // destination index. For fall-throughs this is always zero. |
827 | if (HasFallthrough) { |
828 | if (!HasPrebranchedFallthrough) |
829 | Builder.CreateStore(Val: Builder.getInt32(C: 0), Addr: getNormalCleanupDestSlot()); |
830 | |
831 | // Otherwise, save and clear the IP if we don't have fallthrough |
832 | // because the cleanup is inactive. |
833 | } else if (FallthroughSource) { |
834 | assert(!IsActive && "source without fallthrough for active cleanup" ); |
835 | savedInactiveFallthroughIP = Builder.saveAndClearIP(); |
836 | } |
837 | |
838 | // II. Emit the entry block. This implicitly branches to it if |
839 | // we have fallthrough. All the fixups and existing branches |
840 | // should already be branched to it. |
841 | EmitBlock(BB: NormalEntry); |
842 | |
843 | // intercept normal cleanup to mark SEH scope end |
844 | if (IsEHa && getInvokeDest()) { |
845 | if (Personality.isMSVCXXPersonality()) |
846 | EmitSehCppScopeEnd(); |
847 | else |
848 | EmitSehTryScopeEnd(); |
849 | } |
850 | |
851 | // III. Figure out where we're going and build the cleanup |
852 | // epilogue. |
853 | |
854 | bool HasEnclosingCleanups = |
855 | (Scope.getEnclosingNormalCleanup() != EHStack.stable_end()); |
856 | |
857 | // Compute the branch-through dest if we need it: |
858 | // - if there are branch-throughs threaded through the scope |
859 | // - if fall-through is a branch-through |
860 | // - if there are fixups that will be optimistically forwarded |
861 | // to the enclosing cleanup |
862 | llvm::BasicBlock *BranchThroughDest = nullptr; |
863 | if (Scope.hasBranchThroughs() || |
864 | (FallthroughSource && FallthroughIsBranchThrough) || |
865 | (HasFixups && HasEnclosingCleanups)) { |
866 | assert(HasEnclosingCleanups); |
867 | EHScope &S = *EHStack.find(sp: Scope.getEnclosingNormalCleanup()); |
868 | BranchThroughDest = CreateNormalEntry(CGF&: *this, Scope&: cast<EHCleanupScope>(Val&: S)); |
869 | } |
870 | |
871 | llvm::BasicBlock *FallthroughDest = nullptr; |
872 | SmallVector<llvm::Instruction*, 2> InstsToAppend; |
873 | |
874 | // If there's exactly one branch-after and no other threads, |
875 | // we can route it without a switch. |
876 | // Skip for SEH, since ExitSwitch is used to generate code to indicate |
877 | // abnormal termination. (SEH: Except _leave and fall-through at |
878 | // the end, all other exits in a _try (return/goto/continue/break) |
879 | // are considered as abnormal terminations, using NormalCleanupDestSlot |
880 | // to indicate abnormal termination) |
881 | if (!Scope.hasBranchThroughs() && !HasFixups && !HasFallthrough && |
882 | !currentFunctionUsesSEHTry() && Scope.getNumBranchAfters() == 1) { |
883 | assert(!BranchThroughDest || !IsActive); |
884 | |
885 | // Clean up the possibly dead store to the cleanup dest slot. |
886 | llvm::Instruction *NormalCleanupDestSlot = |
887 | cast<llvm::Instruction>(Val: getNormalCleanupDestSlot().getPointer()); |
888 | if (NormalCleanupDestSlot->hasOneUse()) { |
889 | NormalCleanupDestSlot->user_back()->eraseFromParent(); |
890 | NormalCleanupDestSlot->eraseFromParent(); |
891 | NormalCleanupDest = Address::invalid(); |
892 | } |
893 | |
894 | llvm::BasicBlock *BranchAfter = Scope.getBranchAfterBlock(I: 0); |
895 | InstsToAppend.push_back(Elt: llvm::BranchInst::Create(IfTrue: BranchAfter)); |
896 | |
897 | // Build a switch-out if we need it: |
898 | // - if there are branch-afters threaded through the scope |
899 | // - if fall-through is a branch-after |
900 | // - if there are fixups that have nowhere left to go and |
901 | // so must be immediately resolved |
902 | } else if (Scope.getNumBranchAfters() || |
903 | (HasFallthrough && !FallthroughIsBranchThrough) || |
904 | (HasFixups && !HasEnclosingCleanups)) { |
905 | |
906 | llvm::BasicBlock *Default = |
907 | (BranchThroughDest ? BranchThroughDest : getUnreachableBlock()); |
908 | |
909 | // TODO: base this on the number of branch-afters and fixups |
910 | const unsigned SwitchCapacity = 10; |
911 | |
912 | // pass the abnormal exit flag to Fn (SEH cleanup) |
913 | cleanupFlags.setHasExitSwitch(); |
914 | |
915 | llvm::LoadInst *Load = |
916 | createLoadInstBefore(addr: getNormalCleanupDestSlot(), name: "cleanup.dest" , |
917 | beforeInst: nullptr); |
918 | llvm::SwitchInst *Switch = |
919 | llvm::SwitchInst::Create(Value: Load, Default, NumCases: SwitchCapacity); |
920 | |
921 | InstsToAppend.push_back(Elt: Load); |
922 | InstsToAppend.push_back(Elt: Switch); |
923 | |
924 | // Branch-after fallthrough. |
925 | if (FallthroughSource && !FallthroughIsBranchThrough) { |
926 | FallthroughDest = createBasicBlock(name: "cleanup.cont" ); |
927 | if (HasFallthrough) |
928 | Switch->addCase(OnVal: Builder.getInt32(C: 0), Dest: FallthroughDest); |
929 | } |
930 | |
931 | for (unsigned I = 0, E = Scope.getNumBranchAfters(); I != E; ++I) { |
932 | Switch->addCase(OnVal: Scope.getBranchAfterIndex(I), |
933 | Dest: Scope.getBranchAfterBlock(I)); |
934 | } |
935 | |
936 | // If there aren't any enclosing cleanups, we can resolve all |
937 | // the fixups now. |
938 | if (HasFixups && !HasEnclosingCleanups) |
939 | ResolveAllBranchFixups(CGF&: *this, Switch, CleanupEntry: NormalEntry); |
940 | } else { |
941 | // We should always have a branch-through destination in this case. |
942 | assert(BranchThroughDest); |
943 | InstsToAppend.push_back(Elt: llvm::BranchInst::Create(IfTrue: BranchThroughDest)); |
944 | } |
945 | |
946 | // IV. Pop the cleanup and emit it. |
947 | EHStack.popCleanup(); |
948 | assert(EHStack.hasNormalCleanups() == HasEnclosingCleanups); |
949 | |
950 | EmitCleanup(CGF&: *this, Fn, flags: cleanupFlags, ActiveFlag: NormalActiveFlag); |
951 | |
952 | // Append the prepared cleanup prologue from above. |
953 | llvm::BasicBlock *NormalExit = Builder.GetInsertBlock(); |
954 | for (unsigned I = 0, E = InstsToAppend.size(); I != E; ++I) |
955 | InstsToAppend[I]->insertInto(ParentBB: NormalExit, It: NormalExit->end()); |
956 | |
957 | // Optimistically hope that any fixups will continue falling through. |
958 | for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups(); |
959 | I < E; ++I) { |
960 | BranchFixup &Fixup = EHStack.getBranchFixup(I); |
961 | if (!Fixup.Destination) continue; |
962 | if (!Fixup.OptimisticBranchBlock) { |
963 | createStoreInstBefore(value: Builder.getInt32(C: Fixup.DestinationIndex), |
964 | addr: getNormalCleanupDestSlot(), |
965 | beforeInst: Fixup.InitialBranch); |
966 | Fixup.InitialBranch->setSuccessor(idx: 0, NewSucc: NormalEntry); |
967 | } |
968 | Fixup.OptimisticBranchBlock = NormalExit; |
969 | } |
970 | |
971 | // V. Set up the fallthrough edge out. |
972 | |
973 | // Case 1: a fallthrough source exists but doesn't branch to the |
974 | // cleanup because the cleanup is inactive. |
975 | if (!HasFallthrough && FallthroughSource) { |
976 | // Prebranched fallthrough was forwarded earlier. |
977 | // Non-prebranched fallthrough doesn't need to be forwarded. |
978 | // Either way, all we need to do is restore the IP we cleared before. |
979 | assert(!IsActive); |
980 | Builder.restoreIP(IP: savedInactiveFallthroughIP); |
981 | |
982 | // Case 2: a fallthrough source exists and should branch to the |
983 | // cleanup, but we're not supposed to branch through to the next |
984 | // cleanup. |
985 | } else if (HasFallthrough && FallthroughDest) { |
986 | assert(!FallthroughIsBranchThrough); |
987 | EmitBlock(BB: FallthroughDest); |
988 | |
989 | // Case 3: a fallthrough source exists and should branch to the |
990 | // cleanup and then through to the next. |
991 | } else if (HasFallthrough) { |
992 | // Everything is already set up for this. |
993 | |
994 | // Case 4: no fallthrough source exists. |
995 | } else { |
996 | Builder.ClearInsertionPoint(); |
997 | } |
998 | |
999 | // VI. Assorted cleaning. |
1000 | |
1001 | // Check whether we can merge NormalEntry into a single predecessor. |
1002 | // This might invalidate (non-IR) pointers to NormalEntry. |
1003 | llvm::BasicBlock *NewNormalEntry = |
1004 | SimplifyCleanupEntry(CGF&: *this, Entry: NormalEntry); |
1005 | |
1006 | // If it did invalidate those pointers, and NormalEntry was the same |
1007 | // as NormalExit, go back and patch up the fixups. |
1008 | if (NewNormalEntry != NormalEntry && NormalEntry == NormalExit) |
1009 | for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups(); |
1010 | I < E; ++I) |
1011 | EHStack.getBranchFixup(I).OptimisticBranchBlock = NewNormalEntry; |
1012 | } |
1013 | } |
1014 | |
1015 | assert(EHStack.hasNormalCleanups() || EHStack.getNumBranchFixups() == 0); |
1016 | |
1017 | // Emit the EH cleanup if required. |
1018 | if (RequiresEHCleanup) { |
1019 | CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP(); |
1020 | |
1021 | EmitBlock(BB: EHEntry); |
1022 | |
1023 | llvm::BasicBlock *NextAction = getEHDispatchBlock(scope: EHParent); |
1024 | |
1025 | // Push a terminate scope or cleanupendpad scope around the potentially |
1026 | // throwing cleanups. For funclet EH personalities, the cleanupendpad models |
1027 | // program termination when cleanups throw. |
1028 | bool PushedTerminate = false; |
1029 | SaveAndRestore RestoreCurrentFuncletPad(CurrentFuncletPad); |
1030 | llvm::CleanupPadInst *CPI = nullptr; |
1031 | |
1032 | const EHPersonality &Personality = EHPersonality::get(CGF&: *this); |
1033 | if (Personality.usesFuncletPads()) { |
1034 | llvm::Value *ParentPad = CurrentFuncletPad; |
1035 | if (!ParentPad) |
1036 | ParentPad = llvm::ConstantTokenNone::get(Context&: CGM.getLLVMContext()); |
1037 | CurrentFuncletPad = CPI = Builder.CreateCleanupPad(ParentPad); |
1038 | } |
1039 | |
1040 | // Non-MSVC personalities need to terminate when an EH cleanup throws. |
1041 | if (!Personality.isMSVCPersonality()) { |
1042 | EHStack.pushTerminate(); |
1043 | PushedTerminate = true; |
1044 | } else if (IsEHa && getInvokeDest()) { |
1045 | EmitSehCppScopeEnd(); |
1046 | } |
1047 | |
1048 | // We only actually emit the cleanup code if the cleanup is either |
1049 | // active or was used before it was deactivated. |
1050 | if (EHActiveFlag.isValid() || IsActive) { |
1051 | cleanupFlags.setIsForEHCleanup(); |
1052 | EmitCleanup(CGF&: *this, Fn, flags: cleanupFlags, ActiveFlag: EHActiveFlag); |
1053 | } |
1054 | |
1055 | if (CPI) |
1056 | Builder.CreateCleanupRet(CleanupPad: CPI, UnwindBB: NextAction); |
1057 | else |
1058 | Builder.CreateBr(Dest: NextAction); |
1059 | |
1060 | // Leave the terminate scope. |
1061 | if (PushedTerminate) |
1062 | EHStack.popTerminate(); |
1063 | |
1064 | Builder.restoreIP(IP: SavedIP); |
1065 | |
1066 | SimplifyCleanupEntry(CGF&: *this, Entry: EHEntry); |
1067 | } |
1068 | } |
1069 | |
1070 | /// isObviouslyBranchWithoutCleanups - Return true if a branch to the |
1071 | /// specified destination obviously has no cleanups to run. 'false' is always |
1072 | /// a conservatively correct answer for this method. |
1073 | bool CodeGenFunction::isObviouslyBranchWithoutCleanups(JumpDest Dest) const { |
1074 | assert(Dest.getScopeDepth().encloses(EHStack.stable_begin()) |
1075 | && "stale jump destination" ); |
1076 | |
1077 | // Calculate the innermost active normal cleanup. |
1078 | EHScopeStack::stable_iterator TopCleanup = |
1079 | EHStack.getInnermostActiveNormalCleanup(); |
1080 | |
1081 | // If we're not in an active normal cleanup scope, or if the |
1082 | // destination scope is within the innermost active normal cleanup |
1083 | // scope, we don't need to worry about fixups. |
1084 | if (TopCleanup == EHStack.stable_end() || |
1085 | TopCleanup.encloses(I: Dest.getScopeDepth())) // works for invalid |
1086 | return true; |
1087 | |
1088 | // Otherwise, we might need some cleanups. |
1089 | return false; |
1090 | } |
1091 | |
1092 | |
1093 | /// Terminate the current block by emitting a branch which might leave |
1094 | /// the current cleanup-protected scope. The target scope may not yet |
1095 | /// be known, in which case this will require a fixup. |
1096 | /// |
1097 | /// As a side-effect, this method clears the insertion point. |
1098 | void CodeGenFunction::EmitBranchThroughCleanup(JumpDest Dest) { |
1099 | assert(Dest.getScopeDepth().encloses(EHStack.stable_begin()) |
1100 | && "stale jump destination" ); |
1101 | |
1102 | if (!HaveInsertPoint()) |
1103 | return; |
1104 | |
1105 | // Create the branch. |
1106 | llvm::BranchInst *BI = Builder.CreateBr(Dest: Dest.getBlock()); |
1107 | |
1108 | // Calculate the innermost active normal cleanup. |
1109 | EHScopeStack::stable_iterator |
1110 | TopCleanup = EHStack.getInnermostActiveNormalCleanup(); |
1111 | |
1112 | // If we're not in an active normal cleanup scope, or if the |
1113 | // destination scope is within the innermost active normal cleanup |
1114 | // scope, we don't need to worry about fixups. |
1115 | if (TopCleanup == EHStack.stable_end() || |
1116 | TopCleanup.encloses(I: Dest.getScopeDepth())) { // works for invalid |
1117 | Builder.ClearInsertionPoint(); |
1118 | return; |
1119 | } |
1120 | |
1121 | // If we can't resolve the destination cleanup scope, just add this |
1122 | // to the current cleanup scope as a branch fixup. |
1123 | if (!Dest.getScopeDepth().isValid()) { |
1124 | BranchFixup &Fixup = EHStack.addBranchFixup(); |
1125 | Fixup.Destination = Dest.getBlock(); |
1126 | Fixup.DestinationIndex = Dest.getDestIndex(); |
1127 | Fixup.InitialBranch = BI; |
1128 | Fixup.OptimisticBranchBlock = nullptr; |
1129 | |
1130 | Builder.ClearInsertionPoint(); |
1131 | return; |
1132 | } |
1133 | |
1134 | // Otherwise, thread through all the normal cleanups in scope. |
1135 | |
1136 | // Store the index at the start. |
1137 | llvm::ConstantInt *Index = Builder.getInt32(C: Dest.getDestIndex()); |
1138 | createStoreInstBefore(value: Index, addr: getNormalCleanupDestSlot(), beforeInst: BI); |
1139 | |
1140 | // Adjust BI to point to the first cleanup block. |
1141 | { |
1142 | EHCleanupScope &Scope = |
1143 | cast<EHCleanupScope>(Val&: *EHStack.find(sp: TopCleanup)); |
1144 | BI->setSuccessor(idx: 0, NewSucc: CreateNormalEntry(CGF&: *this, Scope)); |
1145 | } |
1146 | |
1147 | // Add this destination to all the scopes involved. |
1148 | EHScopeStack::stable_iterator I = TopCleanup; |
1149 | EHScopeStack::stable_iterator E = Dest.getScopeDepth(); |
1150 | if (E.strictlyEncloses(I)) { |
1151 | while (true) { |
1152 | EHCleanupScope &Scope = cast<EHCleanupScope>(Val&: *EHStack.find(sp: I)); |
1153 | assert(Scope.isNormalCleanup()); |
1154 | I = Scope.getEnclosingNormalCleanup(); |
1155 | |
1156 | // If this is the last cleanup we're propagating through, tell it |
1157 | // that there's a resolved jump moving through it. |
1158 | if (!E.strictlyEncloses(I)) { |
1159 | Scope.addBranchAfter(Index, Block: Dest.getBlock()); |
1160 | break; |
1161 | } |
1162 | |
1163 | // Otherwise, tell the scope that there's a jump propagating |
1164 | // through it. If this isn't new information, all the rest of |
1165 | // the work has been done before. |
1166 | if (!Scope.addBranchThrough(Block: Dest.getBlock())) |
1167 | break; |
1168 | } |
1169 | } |
1170 | |
1171 | Builder.ClearInsertionPoint(); |
1172 | } |
1173 | |
1174 | static bool IsUsedAsNormalCleanup(EHScopeStack &EHStack, |
1175 | EHScopeStack::stable_iterator C) { |
1176 | // If we needed a normal block for any reason, that counts. |
1177 | if (cast<EHCleanupScope>(Val&: *EHStack.find(sp: C)).getNormalBlock()) |
1178 | return true; |
1179 | |
1180 | // Check whether any enclosed cleanups were needed. |
1181 | for (EHScopeStack::stable_iterator |
1182 | I = EHStack.getInnermostNormalCleanup(); |
1183 | I != C; ) { |
1184 | assert(C.strictlyEncloses(I)); |
1185 | EHCleanupScope &S = cast<EHCleanupScope>(Val&: *EHStack.find(sp: I)); |
1186 | if (S.getNormalBlock()) return true; |
1187 | I = S.getEnclosingNormalCleanup(); |
1188 | } |
1189 | |
1190 | return false; |
1191 | } |
1192 | |
1193 | static bool IsUsedAsEHCleanup(EHScopeStack &EHStack, |
1194 | EHScopeStack::stable_iterator cleanup) { |
1195 | // If we needed an EH block for any reason, that counts. |
1196 | if (EHStack.find(sp: cleanup)->hasEHBranches()) |
1197 | return true; |
1198 | |
1199 | // Check whether any enclosed cleanups were needed. |
1200 | for (EHScopeStack::stable_iterator |
1201 | i = EHStack.getInnermostEHScope(); i != cleanup; ) { |
1202 | assert(cleanup.strictlyEncloses(i)); |
1203 | |
1204 | EHScope &scope = *EHStack.find(sp: i); |
1205 | if (scope.hasEHBranches()) |
1206 | return true; |
1207 | |
1208 | i = scope.getEnclosingEHScope(); |
1209 | } |
1210 | |
1211 | return false; |
1212 | } |
1213 | |
1214 | enum ForActivation_t { |
1215 | ForActivation, |
1216 | ForDeactivation |
1217 | }; |
1218 | |
1219 | /// The given cleanup block is changing activation state. Configure a |
1220 | /// cleanup variable if necessary. |
1221 | /// |
1222 | /// It would be good if we had some way of determining if there were |
1223 | /// extra uses *after* the change-over point. |
1224 | static void SetupCleanupBlockActivation(CodeGenFunction &CGF, |
1225 | EHScopeStack::stable_iterator C, |
1226 | ForActivation_t kind, |
1227 | llvm::Instruction *dominatingIP) { |
1228 | EHCleanupScope &Scope = cast<EHCleanupScope>(Val&: *CGF.EHStack.find(sp: C)); |
1229 | |
1230 | // We always need the flag if we're activating the cleanup in a |
1231 | // conditional context, because we have to assume that the current |
1232 | // location doesn't necessarily dominate the cleanup's code. |
1233 | bool isActivatedInConditional = |
1234 | (kind == ForActivation && CGF.isInConditionalBranch()); |
1235 | |
1236 | bool needFlag = false; |
1237 | |
1238 | // Calculate whether the cleanup was used: |
1239 | |
1240 | // - as a normal cleanup |
1241 | if (Scope.isNormalCleanup() && |
1242 | (isActivatedInConditional || IsUsedAsNormalCleanup(EHStack&: CGF.EHStack, C))) { |
1243 | Scope.setTestFlagInNormalCleanup(); |
1244 | needFlag = true; |
1245 | } |
1246 | |
1247 | // - as an EH cleanup |
1248 | if (Scope.isEHCleanup() && |
1249 | (isActivatedInConditional || IsUsedAsEHCleanup(EHStack&: CGF.EHStack, cleanup: C))) { |
1250 | Scope.setTestFlagInEHCleanup(); |
1251 | needFlag = true; |
1252 | } |
1253 | |
1254 | // If it hasn't yet been used as either, we're done. |
1255 | if (!needFlag) return; |
1256 | |
1257 | Address var = Scope.getActiveFlag(); |
1258 | if (!var.isValid()) { |
1259 | var = CGF.CreateTempAlloca(Ty: CGF.Builder.getInt1Ty(), align: CharUnits::One(), |
1260 | Name: "cleanup.isactive" ); |
1261 | Scope.setActiveFlag(var); |
1262 | |
1263 | assert(dominatingIP && "no existing variable and no dominating IP!" ); |
1264 | |
1265 | // Initialize to true or false depending on whether it was |
1266 | // active up to this point. |
1267 | llvm::Constant *value = CGF.Builder.getInt1(V: kind == ForDeactivation); |
1268 | |
1269 | // If we're in a conditional block, ignore the dominating IP and |
1270 | // use the outermost conditional branch. |
1271 | if (CGF.isInConditionalBranch()) { |
1272 | CGF.setBeforeOutermostConditional(value, addr: var); |
1273 | } else { |
1274 | createStoreInstBefore(value, addr: var, beforeInst: dominatingIP); |
1275 | } |
1276 | } |
1277 | |
1278 | CGF.Builder.CreateStore(Val: CGF.Builder.getInt1(V: kind == ForActivation), Addr: var); |
1279 | } |
1280 | |
1281 | /// Activate a cleanup that was created in an inactivated state. |
1282 | void CodeGenFunction::ActivateCleanupBlock(EHScopeStack::stable_iterator C, |
1283 | llvm::Instruction *dominatingIP) { |
1284 | assert(C != EHStack.stable_end() && "activating bottom of stack?" ); |
1285 | EHCleanupScope &Scope = cast<EHCleanupScope>(Val&: *EHStack.find(sp: C)); |
1286 | assert(!Scope.isActive() && "double activation" ); |
1287 | |
1288 | SetupCleanupBlockActivation(CGF&: *this, C, kind: ForActivation, dominatingIP); |
1289 | |
1290 | Scope.setActive(true); |
1291 | } |
1292 | |
1293 | /// Deactive a cleanup that was created in an active state. |
1294 | void CodeGenFunction::DeactivateCleanupBlock(EHScopeStack::stable_iterator C, |
1295 | llvm::Instruction *dominatingIP) { |
1296 | assert(C != EHStack.stable_end() && "deactivating bottom of stack?" ); |
1297 | EHCleanupScope &Scope = cast<EHCleanupScope>(Val&: *EHStack.find(sp: C)); |
1298 | assert(Scope.isActive() && "double deactivation" ); |
1299 | |
1300 | // If it's the top of the stack, just pop it, but do so only if it belongs |
1301 | // to the current RunCleanupsScope. |
1302 | if (C == EHStack.stable_begin() && |
1303 | CurrentCleanupScopeDepth.strictlyEncloses(I: C)) { |
1304 | // Per comment below, checking EHAsynch is not really necessary |
1305 | // it's there to assure zero-impact w/o EHAsynch option |
1306 | if (!Scope.isNormalCleanup() && getLangOpts().EHAsynch) { |
1307 | PopCleanupBlock(); |
1308 | } else { |
1309 | // If it's a normal cleanup, we need to pretend that the |
1310 | // fallthrough is unreachable. |
1311 | CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP(); |
1312 | PopCleanupBlock(); |
1313 | Builder.restoreIP(IP: SavedIP); |
1314 | } |
1315 | return; |
1316 | } |
1317 | |
1318 | // Otherwise, follow the general case. |
1319 | SetupCleanupBlockActivation(CGF&: *this, C, kind: ForDeactivation, dominatingIP); |
1320 | |
1321 | Scope.setActive(false); |
1322 | } |
1323 | |
1324 | Address CodeGenFunction::getNormalCleanupDestSlot() { |
1325 | if (!NormalCleanupDest.isValid()) |
1326 | NormalCleanupDest = |
1327 | CreateDefaultAlignTempAlloca(Ty: Builder.getInt32Ty(), Name: "cleanup.dest.slot" ); |
1328 | return NormalCleanupDest; |
1329 | } |
1330 | |
1331 | /// Emits all the code to cause the given temporary to be cleaned up. |
1332 | void CodeGenFunction::EmitCXXTemporary(const CXXTemporary *Temporary, |
1333 | QualType TempType, |
1334 | Address Ptr) { |
1335 | pushDestroy(kind: NormalAndEHCleanup, addr: Ptr, type: TempType, destroyer: destroyCXXObject, |
1336 | /*useEHCleanup*/ useEHCleanupForArray: true); |
1337 | } |
1338 | |
1339 | // Need to set "funclet" in OperandBundle properly for noThrow |
1340 | // intrinsic (see CGCall.cpp) |
1341 | static void EmitSehScope(CodeGenFunction &CGF, |
1342 | llvm::FunctionCallee &SehCppScope) { |
1343 | llvm::BasicBlock *InvokeDest = CGF.getInvokeDest(); |
1344 | assert(CGF.Builder.GetInsertBlock() && InvokeDest); |
1345 | llvm::BasicBlock *Cont = CGF.createBasicBlock(name: "invoke.cont" ); |
1346 | SmallVector<llvm::OperandBundleDef, 1> BundleList = |
1347 | CGF.getBundlesForFunclet(Callee: SehCppScope.getCallee()); |
1348 | if (CGF.CurrentFuncletPad) |
1349 | BundleList.emplace_back(Args: "funclet" , Args&: CGF.CurrentFuncletPad); |
1350 | CGF.Builder.CreateInvoke(Callee: SehCppScope, NormalDest: Cont, UnwindDest: InvokeDest, Args: std::nullopt, |
1351 | OpBundles: BundleList); |
1352 | CGF.EmitBlock(BB: Cont); |
1353 | } |
1354 | |
1355 | // Invoke a llvm.seh.scope.begin at the beginning of a CPP scope for -EHa |
1356 | void CodeGenFunction::EmitSehCppScopeBegin() { |
1357 | assert(getLangOpts().EHAsynch); |
1358 | llvm::FunctionType *FTy = |
1359 | llvm::FunctionType::get(Result: CGM.VoidTy, /*isVarArg=*/false); |
1360 | llvm::FunctionCallee SehCppScope = |
1361 | CGM.CreateRuntimeFunction(Ty: FTy, Name: "llvm.seh.scope.begin" ); |
1362 | EmitSehScope(CGF&: *this, SehCppScope); |
1363 | } |
1364 | |
1365 | // Invoke a llvm.seh.scope.end at the end of a CPP scope for -EHa |
1366 | // llvm.seh.scope.end is emitted before popCleanup, so it's "invoked" |
1367 | void CodeGenFunction::EmitSehCppScopeEnd() { |
1368 | assert(getLangOpts().EHAsynch); |
1369 | llvm::FunctionType *FTy = |
1370 | llvm::FunctionType::get(Result: CGM.VoidTy, /*isVarArg=*/false); |
1371 | llvm::FunctionCallee SehCppScope = |
1372 | CGM.CreateRuntimeFunction(Ty: FTy, Name: "llvm.seh.scope.end" ); |
1373 | EmitSehScope(CGF&: *this, SehCppScope); |
1374 | } |
1375 | |
1376 | // Invoke a llvm.seh.try.begin at the beginning of a SEH scope for -EHa |
1377 | void CodeGenFunction::EmitSehTryScopeBegin() { |
1378 | assert(getLangOpts().EHAsynch); |
1379 | llvm::FunctionType *FTy = |
1380 | llvm::FunctionType::get(Result: CGM.VoidTy, /*isVarArg=*/false); |
1381 | llvm::FunctionCallee SehCppScope = |
1382 | CGM.CreateRuntimeFunction(Ty: FTy, Name: "llvm.seh.try.begin" ); |
1383 | EmitSehScope(CGF&: *this, SehCppScope); |
1384 | } |
1385 | |
1386 | // Invoke a llvm.seh.try.end at the end of a SEH scope for -EHa |
1387 | void CodeGenFunction::EmitSehTryScopeEnd() { |
1388 | assert(getLangOpts().EHAsynch); |
1389 | llvm::FunctionType *FTy = |
1390 | llvm::FunctionType::get(Result: CGM.VoidTy, /*isVarArg=*/false); |
1391 | llvm::FunctionCallee SehCppScope = |
1392 | CGM.CreateRuntimeFunction(Ty: FTy, Name: "llvm.seh.try.end" ); |
1393 | EmitSehScope(CGF&: *this, SehCppScope); |
1394 | } |
1395 | |