1//===- StackProtector.cpp - Stack Protector Insertion ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass inserts stack protectors into functions which need them. A variable
10// with a random value in it is stored onto the stack before the local variables
11// are allocated. Upon exiting the block, the stored value is checked. If it's
12// changed, then there was some sort of violation and the program aborts.
13//
14//===----------------------------------------------------------------------===//
15
16#include "llvm/CodeGen/StackProtector.h"
17#include "llvm/ADT/SmallPtrSet.h"
18#include "llvm/ADT/SmallVector.h"
19#include "llvm/ADT/Statistic.h"
20#include "llvm/Analysis/BranchProbabilityInfo.h"
21#include "llvm/Analysis/MemoryLocation.h"
22#include "llvm/Analysis/OptimizationRemarkEmitter.h"
23#include "llvm/CodeGen/Passes.h"
24#include "llvm/CodeGen/TargetLowering.h"
25#include "llvm/CodeGen/TargetPassConfig.h"
26#include "llvm/CodeGen/TargetSubtargetInfo.h"
27#include "llvm/IR/Attributes.h"
28#include "llvm/IR/BasicBlock.h"
29#include "llvm/IR/Constants.h"
30#include "llvm/IR/DataLayout.h"
31#include "llvm/IR/DerivedTypes.h"
32#include "llvm/IR/Dominators.h"
33#include "llvm/IR/EHPersonalities.h"
34#include "llvm/IR/Function.h"
35#include "llvm/IR/IRBuilder.h"
36#include "llvm/IR/Instruction.h"
37#include "llvm/IR/Instructions.h"
38#include "llvm/IR/IntrinsicInst.h"
39#include "llvm/IR/Intrinsics.h"
40#include "llvm/IR/MDBuilder.h"
41#include "llvm/IR/Module.h"
42#include "llvm/IR/Type.h"
43#include "llvm/IR/User.h"
44#include "llvm/InitializePasses.h"
45#include "llvm/Pass.h"
46#include "llvm/Support/Casting.h"
47#include "llvm/Support/CommandLine.h"
48#include "llvm/Target/TargetMachine.h"
49#include "llvm/Target/TargetOptions.h"
50#include "llvm/Transforms/Utils/BasicBlockUtils.h"
51#include <optional>
52#include <utility>
53
54using namespace llvm;
55
56#define DEBUG_TYPE "stack-protector"
57
58STATISTIC(NumFunProtected, "Number of functions protected");
59STATISTIC(NumAddrTaken, "Number of local variables that have their address"
60 " taken.");
61
62static cl::opt<bool> EnableSelectionDAGSP("enable-selectiondag-sp",
63 cl::init(Val: true), cl::Hidden);
64static cl::opt<bool> DisableCheckNoReturn("disable-check-noreturn-call",
65 cl::init(Val: false), cl::Hidden);
66
67/// InsertStackProtectors - Insert code into the prologue and epilogue of the
68/// function.
69///
70/// - The prologue code loads and stores the stack guard onto the stack.
71/// - The epilogue checks the value stored in the prologue against the original
72/// value. It calls __stack_chk_fail if they differ.
73static bool InsertStackProtectors(const TargetMachine *TM, Function *F,
74 DomTreeUpdater *DTU, bool &HasPrologue,
75 bool &HasIRCheck);
76
77/// CreateFailBB - Create a basic block to jump to when the stack protector
78/// check fails.
79static BasicBlock *CreateFailBB(Function *F, const Triple &Trip);
80
81bool SSPLayoutInfo::shouldEmitSDCheck(const BasicBlock &BB) const {
82 return HasPrologue && !HasIRCheck && isa<ReturnInst>(Val: BB.getTerminator());
83}
84
85void SSPLayoutInfo::copyToMachineFrameInfo(MachineFrameInfo &MFI) const {
86 if (Layout.empty())
87 return;
88
89 for (int I = 0, E = MFI.getObjectIndexEnd(); I != E; ++I) {
90 if (MFI.isDeadObjectIndex(ObjectIdx: I))
91 continue;
92
93 const AllocaInst *AI = MFI.getObjectAllocation(ObjectIdx: I);
94 if (!AI)
95 continue;
96
97 SSPLayoutMap::const_iterator LI = Layout.find(Val: AI);
98 if (LI == Layout.end())
99 continue;
100
101 MFI.setObjectSSPLayout(ObjectIdx: I, Kind: LI->second);
102 }
103}
104
105SSPLayoutInfo SSPLayoutAnalysis::run(Function &F,
106 FunctionAnalysisManager &FAM) {
107
108 SSPLayoutInfo Info;
109 Info.RequireStackProtector =
110 SSPLayoutAnalysis::requiresStackProtector(F: &F, Layout: &Info.Layout);
111 Info.SSPBufferSize = F.getFnAttributeAsParsedInteger(
112 Kind: "stack-protector-buffer-size", Default: SSPLayoutInfo::DefaultSSPBufferSize);
113 return Info;
114}
115
116AnalysisKey SSPLayoutAnalysis::Key;
117
118PreservedAnalyses StackProtectorPass::run(Function &F,
119 FunctionAnalysisManager &FAM) {
120 auto &Info = FAM.getResult<SSPLayoutAnalysis>(IR&: F);
121 auto *DT = FAM.getCachedResult<DominatorTreeAnalysis>(IR&: F);
122 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy);
123
124 if (!Info.RequireStackProtector)
125 return PreservedAnalyses::all();
126
127 // TODO(etienneb): Functions with funclets are not correctly supported now.
128 // Do nothing if this is funclet-based personality.
129 if (F.hasPersonalityFn()) {
130 EHPersonality Personality = classifyEHPersonality(Pers: F.getPersonalityFn());
131 if (isFuncletEHPersonality(Pers: Personality))
132 return PreservedAnalyses::all();
133 }
134
135 ++NumFunProtected;
136 bool Changed = InsertStackProtectors(TM, F: &F, DTU: DT ? &DTU : nullptr,
137 HasPrologue&: Info.HasPrologue, HasIRCheck&: Info.HasIRCheck);
138#ifdef EXPENSIVE_CHECKS
139 assert((!DT || DT->verify(DominatorTree::VerificationLevel::Full)) &&
140 "Failed to maintain validity of domtree!");
141#endif
142
143 if (!Changed)
144 return PreservedAnalyses::all();
145 PreservedAnalyses PA;
146 PA.preserve<SSPLayoutAnalysis>();
147 PA.preserve<DominatorTreeAnalysis>();
148 return PA;
149}
150
151char StackProtector::ID = 0;
152
153StackProtector::StackProtector() : FunctionPass(ID) {
154 initializeStackProtectorPass(*PassRegistry::getPassRegistry());
155}
156
157INITIALIZE_PASS_BEGIN(StackProtector, DEBUG_TYPE,
158 "Insert stack protectors", false, true)
159INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
160INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
161INITIALIZE_PASS_END(StackProtector, DEBUG_TYPE,
162 "Insert stack protectors", false, true)
163
164FunctionPass *llvm::createStackProtectorPass() { return new StackProtector(); }
165
166void StackProtector::getAnalysisUsage(AnalysisUsage &AU) const {
167 AU.addRequired<TargetPassConfig>();
168 AU.addPreserved<DominatorTreeWrapperPass>();
169}
170
171bool StackProtector::runOnFunction(Function &Fn) {
172 F = &Fn;
173 M = F->getParent();
174 if (auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>())
175 DTU.emplace(args&: DTWP->getDomTree(), args: DomTreeUpdater::UpdateStrategy::Lazy);
176 TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>();
177 LayoutInfo.HasPrologue = false;
178 LayoutInfo.HasIRCheck = false;
179
180 LayoutInfo.SSPBufferSize = Fn.getFnAttributeAsParsedInteger(
181 Kind: "stack-protector-buffer-size", Default: SSPLayoutInfo::DefaultSSPBufferSize);
182 if (!requiresStackProtector(F, Layout: &LayoutInfo.Layout))
183 return false;
184
185 // TODO(etienneb): Functions with funclets are not correctly supported now.
186 // Do nothing if this is funclet-based personality.
187 if (Fn.hasPersonalityFn()) {
188 EHPersonality Personality = classifyEHPersonality(Pers: Fn.getPersonalityFn());
189 if (isFuncletEHPersonality(Pers: Personality))
190 return false;
191 }
192
193 ++NumFunProtected;
194 bool Changed =
195 InsertStackProtectors(TM, F, DTU: DTU ? &*DTU : nullptr,
196 HasPrologue&: LayoutInfo.HasPrologue, HasIRCheck&: LayoutInfo.HasIRCheck);
197#ifdef EXPENSIVE_CHECKS
198 assert((!DTU ||
199 DTU->getDomTree().verify(DominatorTree::VerificationLevel::Full)) &&
200 "Failed to maintain validity of domtree!");
201#endif
202 DTU.reset();
203 return Changed;
204}
205
206/// \param [out] IsLarge is set to true if a protectable array is found and
207/// it is "large" ( >= ssp-buffer-size). In the case of a structure with
208/// multiple arrays, this gets set if any of them is large.
209static bool ContainsProtectableArray(Type *Ty, Module *M, unsigned SSPBufferSize,
210 bool &IsLarge, bool Strong,
211 bool InStruct) {
212 if (!Ty)
213 return false;
214 if (ArrayType *AT = dyn_cast<ArrayType>(Val: Ty)) {
215 if (!AT->getElementType()->isIntegerTy(Bitwidth: 8)) {
216 // If we're on a non-Darwin platform or we're inside of a structure, don't
217 // add stack protectors unless the array is a character array.
218 // However, in strong mode any array, regardless of type and size,
219 // triggers a protector.
220 if (!Strong && (InStruct || !Triple(M->getTargetTriple()).isOSDarwin()))
221 return false;
222 }
223
224 // If an array has more than SSPBufferSize bytes of allocated space, then we
225 // emit stack protectors.
226 if (SSPBufferSize <= M->getDataLayout().getTypeAllocSize(Ty: AT)) {
227 IsLarge = true;
228 return true;
229 }
230
231 if (Strong)
232 // Require a protector for all arrays in strong mode
233 return true;
234 }
235
236 const StructType *ST = dyn_cast<StructType>(Val: Ty);
237 if (!ST)
238 return false;
239
240 bool NeedsProtector = false;
241 for (Type *ET : ST->elements())
242 if (ContainsProtectableArray(Ty: ET, M, SSPBufferSize, IsLarge, Strong, InStruct: true)) {
243 // If the element is a protectable array and is large (>= SSPBufferSize)
244 // then we are done. If the protectable array is not large, then
245 // keep looking in case a subsequent element is a large array.
246 if (IsLarge)
247 return true;
248 NeedsProtector = true;
249 }
250
251 return NeedsProtector;
252}
253
254/// Check whether a stack allocation has its address taken.
255static bool HasAddressTaken(const Instruction *AI, TypeSize AllocSize,
256 Module *M,
257 SmallPtrSet<const PHINode *, 16> &VisitedPHIs) {
258 const DataLayout &DL = M->getDataLayout();
259 for (const User *U : AI->users()) {
260 const auto *I = cast<Instruction>(Val: U);
261 // If this instruction accesses memory make sure it doesn't access beyond
262 // the bounds of the allocated object.
263 std::optional<MemoryLocation> MemLoc = MemoryLocation::getOrNone(Inst: I);
264 if (MemLoc && MemLoc->Size.hasValue() &&
265 !TypeSize::isKnownGE(LHS: AllocSize, RHS: MemLoc->Size.getValue()))
266 return true;
267 switch (I->getOpcode()) {
268 case Instruction::Store:
269 if (AI == cast<StoreInst>(Val: I)->getValueOperand())
270 return true;
271 break;
272 case Instruction::AtomicCmpXchg:
273 // cmpxchg conceptually includes both a load and store from the same
274 // location. So, like store, the value being stored is what matters.
275 if (AI == cast<AtomicCmpXchgInst>(Val: I)->getNewValOperand())
276 return true;
277 break;
278 case Instruction::PtrToInt:
279 if (AI == cast<PtrToIntInst>(Val: I)->getOperand(i_nocapture: 0))
280 return true;
281 break;
282 case Instruction::Call: {
283 // Ignore intrinsics that do not become real instructions.
284 // TODO: Narrow this to intrinsics that have store-like effects.
285 const auto *CI = cast<CallInst>(Val: I);
286 if (!CI->isDebugOrPseudoInst() && !CI->isLifetimeStartOrEnd())
287 return true;
288 break;
289 }
290 case Instruction::Invoke:
291 return true;
292 case Instruction::GetElementPtr: {
293 // If the GEP offset is out-of-bounds, or is non-constant and so has to be
294 // assumed to be potentially out-of-bounds, then any memory access that
295 // would use it could also be out-of-bounds meaning stack protection is
296 // required.
297 const GetElementPtrInst *GEP = cast<GetElementPtrInst>(Val: I);
298 unsigned IndexSize = DL.getIndexTypeSizeInBits(Ty: I->getType());
299 APInt Offset(IndexSize, 0);
300 if (!GEP->accumulateConstantOffset(DL, Offset))
301 return true;
302 TypeSize OffsetSize = TypeSize::getFixed(ExactSize: Offset.getLimitedValue());
303 if (!TypeSize::isKnownGT(LHS: AllocSize, RHS: OffsetSize))
304 return true;
305 // Adjust AllocSize to be the space remaining after this offset.
306 // We can't subtract a fixed size from a scalable one, so in that case
307 // assume the scalable value is of minimum size.
308 TypeSize NewAllocSize =
309 TypeSize::getFixed(ExactSize: AllocSize.getKnownMinValue()) - OffsetSize;
310 if (HasAddressTaken(AI: I, AllocSize: NewAllocSize, M, VisitedPHIs))
311 return true;
312 break;
313 }
314 case Instruction::BitCast:
315 case Instruction::Select:
316 case Instruction::AddrSpaceCast:
317 if (HasAddressTaken(AI: I, AllocSize, M, VisitedPHIs))
318 return true;
319 break;
320 case Instruction::PHI: {
321 // Keep track of what PHI nodes we have already visited to ensure
322 // they are only visited once.
323 const auto *PN = cast<PHINode>(Val: I);
324 if (VisitedPHIs.insert(Ptr: PN).second)
325 if (HasAddressTaken(AI: PN, AllocSize, M, VisitedPHIs))
326 return true;
327 break;
328 }
329 case Instruction::Load:
330 case Instruction::AtomicRMW:
331 case Instruction::Ret:
332 // These instructions take an address operand, but have load-like or
333 // other innocuous behavior that should not trigger a stack protector.
334 // atomicrmw conceptually has both load and store semantics, but the
335 // value being stored must be integer; so if a pointer is being stored,
336 // we'll catch it in the PtrToInt case above.
337 break;
338 default:
339 // Conservatively return true for any instruction that takes an address
340 // operand, but is not handled above.
341 return true;
342 }
343 }
344 return false;
345}
346
347/// Search for the first call to the llvm.stackprotector intrinsic and return it
348/// if present.
349static const CallInst *findStackProtectorIntrinsic(Function &F) {
350 for (const BasicBlock &BB : F)
351 for (const Instruction &I : BB)
352 if (const auto *II = dyn_cast<IntrinsicInst>(Val: &I))
353 if (II->getIntrinsicID() == Intrinsic::stackprotector)
354 return II;
355 return nullptr;
356}
357
358/// Check whether or not this function needs a stack protector based
359/// upon the stack protector level.
360///
361/// We use two heuristics: a standard (ssp) and strong (sspstrong).
362/// The standard heuristic which will add a guard variable to functions that
363/// call alloca with a either a variable size or a size >= SSPBufferSize,
364/// functions with character buffers larger than SSPBufferSize, and functions
365/// with aggregates containing character buffers larger than SSPBufferSize. The
366/// strong heuristic will add a guard variables to functions that call alloca
367/// regardless of size, functions with any buffer regardless of type and size,
368/// functions with aggregates that contain any buffer regardless of type and
369/// size, and functions that contain stack-based variables that have had their
370/// address taken.
371bool SSPLayoutAnalysis::requiresStackProtector(Function *F,
372 SSPLayoutMap *Layout) {
373 Module *M = F->getParent();
374 bool Strong = false;
375 bool NeedsProtector = false;
376
377 // The set of PHI nodes visited when determining if a variable's reference has
378 // been taken. This set is maintained to ensure we don't visit the same PHI
379 // node multiple times.
380 SmallPtrSet<const PHINode *, 16> VisitedPHIs;
381
382 unsigned SSPBufferSize = F->getFnAttributeAsParsedInteger(
383 Kind: "stack-protector-buffer-size", Default: SSPLayoutInfo::DefaultSSPBufferSize);
384
385 if (F->hasFnAttribute(Attribute::SafeStack))
386 return false;
387
388 // We are constructing the OptimizationRemarkEmitter on the fly rather than
389 // using the analysis pass to avoid building DominatorTree and LoopInfo which
390 // are not available this late in the IR pipeline.
391 OptimizationRemarkEmitter ORE(F);
392
393 if (F->hasFnAttribute(Attribute::StackProtectReq)) {
394 if (!Layout)
395 return true;
396 ORE.emit(RemarkBuilder: [&]() {
397 return OptimizationRemark(DEBUG_TYPE, "StackProtectorRequested", F)
398 << "Stack protection applied to function "
399 << ore::NV("Function", F)
400 << " due to a function attribute or command-line switch";
401 });
402 NeedsProtector = true;
403 Strong = true; // Use the same heuristic as strong to determine SSPLayout
404 } else if (F->hasFnAttribute(Attribute::StackProtectStrong))
405 Strong = true;
406 else if (!F->hasFnAttribute(Attribute::StackProtect))
407 return false;
408
409 for (const BasicBlock &BB : *F) {
410 for (const Instruction &I : BB) {
411 if (const AllocaInst *AI = dyn_cast<AllocaInst>(Val: &I)) {
412 if (AI->isArrayAllocation()) {
413 auto RemarkBuilder = [&]() {
414 return OptimizationRemark(DEBUG_TYPE, "StackProtectorAllocaOrArray",
415 &I)
416 << "Stack protection applied to function "
417 << ore::NV("Function", F)
418 << " due to a call to alloca or use of a variable length "
419 "array";
420 };
421 if (const auto *CI = dyn_cast<ConstantInt>(Val: AI->getArraySize())) {
422 if (CI->getLimitedValue(Limit: SSPBufferSize) >= SSPBufferSize) {
423 // A call to alloca with size >= SSPBufferSize requires
424 // stack protectors.
425 if (!Layout)
426 return true;
427 Layout->insert(
428 KV: std::make_pair(x&: AI, y: MachineFrameInfo::SSPLK_LargeArray));
429 ORE.emit(RemarkBuilder);
430 NeedsProtector = true;
431 } else if (Strong) {
432 // Require protectors for all alloca calls in strong mode.
433 if (!Layout)
434 return true;
435 Layout->insert(
436 KV: std::make_pair(x&: AI, y: MachineFrameInfo::SSPLK_SmallArray));
437 ORE.emit(RemarkBuilder);
438 NeedsProtector = true;
439 }
440 } else {
441 // A call to alloca with a variable size requires protectors.
442 if (!Layout)
443 return true;
444 Layout->insert(
445 KV: std::make_pair(x&: AI, y: MachineFrameInfo::SSPLK_LargeArray));
446 ORE.emit(RemarkBuilder);
447 NeedsProtector = true;
448 }
449 continue;
450 }
451
452 bool IsLarge = false;
453 if (ContainsProtectableArray(Ty: AI->getAllocatedType(), M, SSPBufferSize,
454 IsLarge, Strong, InStruct: false)) {
455 if (!Layout)
456 return true;
457 Layout->insert(KV: std::make_pair(
458 x&: AI, y: IsLarge ? MachineFrameInfo::SSPLK_LargeArray
459 : MachineFrameInfo::SSPLK_SmallArray));
460 ORE.emit(RemarkBuilder: [&]() {
461 return OptimizationRemark(DEBUG_TYPE, "StackProtectorBuffer", &I)
462 << "Stack protection applied to function "
463 << ore::NV("Function", F)
464 << " due to a stack allocated buffer or struct containing a "
465 "buffer";
466 });
467 NeedsProtector = true;
468 continue;
469 }
470
471 if (Strong &&
472 HasAddressTaken(
473 AI, AllocSize: M->getDataLayout().getTypeAllocSize(Ty: AI->getAllocatedType()),
474 M, VisitedPHIs)) {
475 ++NumAddrTaken;
476 if (!Layout)
477 return true;
478 Layout->insert(KV: std::make_pair(x&: AI, y: MachineFrameInfo::SSPLK_AddrOf));
479 ORE.emit(RemarkBuilder: [&]() {
480 return OptimizationRemark(DEBUG_TYPE, "StackProtectorAddressTaken",
481 &I)
482 << "Stack protection applied to function "
483 << ore::NV("Function", F)
484 << " due to the address of a local variable being taken";
485 });
486 NeedsProtector = true;
487 }
488 // Clear any PHIs that we visited, to make sure we examine all uses of
489 // any subsequent allocas that we look at.
490 VisitedPHIs.clear();
491 }
492 }
493 }
494
495 return NeedsProtector;
496}
497
498/// Create a stack guard loading and populate whether SelectionDAG SSP is
499/// supported.
500static Value *getStackGuard(const TargetLoweringBase *TLI, Module *M,
501 IRBuilder<> &B,
502 bool *SupportsSelectionDAGSP = nullptr) {
503 Value *Guard = TLI->getIRStackGuard(IRB&: B);
504 StringRef GuardMode = M->getStackProtectorGuard();
505 if ((GuardMode == "tls" || GuardMode.empty()) && Guard)
506 return B.CreateLoad(Ty: B.getPtrTy(), Ptr: Guard, isVolatile: true, Name: "StackGuard");
507
508 // Use SelectionDAG SSP handling, since there isn't an IR guard.
509 //
510 // This is more or less weird, since we optionally output whether we
511 // should perform a SelectionDAG SP here. The reason is that it's strictly
512 // defined as !TLI->getIRStackGuard(B), where getIRStackGuard is also
513 // mutating. There is no way to get this bit without mutating the IR, so
514 // getting this bit has to happen in this right time.
515 //
516 // We could have define a new function TLI::supportsSelectionDAGSP(), but that
517 // will put more burden on the backends' overriding work, especially when it
518 // actually conveys the same information getIRStackGuard() already gives.
519 if (SupportsSelectionDAGSP)
520 *SupportsSelectionDAGSP = true;
521 TLI->insertSSPDeclarations(M&: *M);
522 return B.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::id: stackguard));
523}
524
525/// Insert code into the entry block that stores the stack guard
526/// variable onto the stack:
527///
528/// entry:
529/// StackGuardSlot = alloca i8*
530/// StackGuard = <stack guard>
531/// call void @llvm.stackprotector(StackGuard, StackGuardSlot)
532///
533/// Returns true if the platform/triple supports the stackprotectorcreate pseudo
534/// node.
535static bool CreatePrologue(Function *F, Module *M, Instruction *CheckLoc,
536 const TargetLoweringBase *TLI, AllocaInst *&AI) {
537 bool SupportsSelectionDAGSP = false;
538 IRBuilder<> B(&F->getEntryBlock().front());
539 PointerType *PtrTy = PointerType::getUnqual(C&: CheckLoc->getContext());
540 AI = B.CreateAlloca(Ty: PtrTy, ArraySize: nullptr, Name: "StackGuardSlot");
541
542 Value *GuardSlot = getStackGuard(TLI, M, B, SupportsSelectionDAGSP: &SupportsSelectionDAGSP);
543 B.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::id: stackprotector),
544 {GuardSlot, AI});
545 return SupportsSelectionDAGSP;
546}
547
548bool InsertStackProtectors(const TargetMachine *TM, Function *F,
549 DomTreeUpdater *DTU, bool &HasPrologue,
550 bool &HasIRCheck) {
551 auto *M = F->getParent();
552 auto *TLI = TM->getSubtargetImpl(*F)->getTargetLowering();
553
554 // If the target wants to XOR the frame pointer into the guard value, it's
555 // impossible to emit the check in IR, so the target *must* support stack
556 // protection in SDAG.
557 bool SupportsSelectionDAGSP =
558 TLI->useStackGuardXorFP() ||
559 (EnableSelectionDAGSP && !TM->Options.EnableFastISel);
560 AllocaInst *AI = nullptr; // Place on stack that stores the stack guard.
561 BasicBlock *FailBB = nullptr;
562
563 for (BasicBlock &BB : llvm::make_early_inc_range(Range&: *F)) {
564 // This is stack protector auto generated check BB, skip it.
565 if (&BB == FailBB)
566 continue;
567 Instruction *CheckLoc = dyn_cast<ReturnInst>(Val: BB.getTerminator());
568 if (!CheckLoc && !DisableCheckNoReturn)
569 for (auto &Inst : BB)
570 if (auto *CB = dyn_cast<CallBase>(Val: &Inst))
571 // Do stack check before noreturn calls that aren't nounwind (e.g:
572 // __cxa_throw).
573 if (CB->doesNotReturn() && !CB->doesNotThrow()) {
574 CheckLoc = CB;
575 break;
576 }
577
578 if (!CheckLoc)
579 continue;
580
581 // Generate prologue instrumentation if not already generated.
582 if (!HasPrologue) {
583 HasPrologue = true;
584 SupportsSelectionDAGSP &= CreatePrologue(F, M, CheckLoc, TLI, AI);
585 }
586
587 // SelectionDAG based code generation. Nothing else needs to be done here.
588 // The epilogue instrumentation is postponed to SelectionDAG.
589 if (SupportsSelectionDAGSP)
590 break;
591
592 // Find the stack guard slot if the prologue was not created by this pass
593 // itself via a previous call to CreatePrologue().
594 if (!AI) {
595 const CallInst *SPCall = findStackProtectorIntrinsic(F&: *F);
596 assert(SPCall && "Call to llvm.stackprotector is missing");
597 AI = cast<AllocaInst>(Val: SPCall->getArgOperand(i: 1));
598 }
599
600 // Set HasIRCheck to true, so that SelectionDAG will not generate its own
601 // version. SelectionDAG called 'shouldEmitSDCheck' to check whether
602 // instrumentation has already been generated.
603 HasIRCheck = true;
604
605 // If we're instrumenting a block with a tail call, the check has to be
606 // inserted before the call rather than between it and the return. The
607 // verifier guarantees that a tail call is either directly before the
608 // return or with a single correct bitcast of the return value in between so
609 // we don't need to worry about many situations here.
610 Instruction *Prev = CheckLoc->getPrevNonDebugInstruction();
611 if (Prev && isa<CallInst>(Val: Prev) && cast<CallInst>(Val: Prev)->isTailCall())
612 CheckLoc = Prev;
613 else if (Prev) {
614 Prev = Prev->getPrevNonDebugInstruction();
615 if (Prev && isa<CallInst>(Val: Prev) && cast<CallInst>(Val: Prev)->isTailCall())
616 CheckLoc = Prev;
617 }
618
619 // Generate epilogue instrumentation. The epilogue intrumentation can be
620 // function-based or inlined depending on which mechanism the target is
621 // providing.
622 if (Function *GuardCheck = TLI->getSSPStackGuardCheck(M: *M)) {
623 // Generate the function-based epilogue instrumentation.
624 // The target provides a guard check function, generate a call to it.
625 IRBuilder<> B(CheckLoc);
626 LoadInst *Guard = B.CreateLoad(Ty: B.getPtrTy(), Ptr: AI, isVolatile: true, Name: "Guard");
627 CallInst *Call = B.CreateCall(Callee: GuardCheck, Args: {Guard});
628 Call->setAttributes(GuardCheck->getAttributes());
629 Call->setCallingConv(GuardCheck->getCallingConv());
630 } else {
631 // Generate the epilogue with inline instrumentation.
632 // If we do not support SelectionDAG based calls, generate IR level
633 // calls.
634 //
635 // For each block with a return instruction, convert this:
636 //
637 // return:
638 // ...
639 // ret ...
640 //
641 // into this:
642 //
643 // return:
644 // ...
645 // %1 = <stack guard>
646 // %2 = load StackGuardSlot
647 // %3 = icmp ne i1 %1, %2
648 // br i1 %3, label %CallStackCheckFailBlk, label %SP_return
649 //
650 // SP_return:
651 // ret ...
652 //
653 // CallStackCheckFailBlk:
654 // call void @__stack_chk_fail()
655 // unreachable
656
657 // Create the FailBB. We duplicate the BB every time since the MI tail
658 // merge pass will merge together all of the various BB into one including
659 // fail BB generated by the stack protector pseudo instruction.
660 if (!FailBB)
661 FailBB = CreateFailBB(F, Trip: TM->getTargetTriple());
662
663 IRBuilder<> B(CheckLoc);
664 Value *Guard = getStackGuard(TLI, M, B);
665 LoadInst *LI2 = B.CreateLoad(Ty: B.getPtrTy(), Ptr: AI, isVolatile: true);
666 auto *Cmp = cast<ICmpInst>(Val: B.CreateICmpNE(LHS: Guard, RHS: LI2));
667 auto SuccessProb =
668 BranchProbabilityInfo::getBranchProbStackProtector(IsLikely: true);
669 auto FailureProb =
670 BranchProbabilityInfo::getBranchProbStackProtector(IsLikely: false);
671 MDNode *Weights = MDBuilder(F->getContext())
672 .createBranchWeights(TrueWeight: FailureProb.getNumerator(),
673 FalseWeight: SuccessProb.getNumerator());
674
675 SplitBlockAndInsertIfThen(Cond: Cmp, SplitBefore: CheckLoc,
676 /*Unreachable=*/false, BranchWeights: Weights, DTU,
677 /*LI=*/nullptr, /*ThenBlock=*/FailBB);
678
679 auto *BI = cast<BranchInst>(Val: Cmp->getParent()->getTerminator());
680 BasicBlock *NewBB = BI->getSuccessor(i: 1);
681 NewBB->setName("SP_return");
682 NewBB->moveAfter(MovePos: &BB);
683
684 Cmp->setPredicate(Cmp->getInversePredicate());
685 BI->swapSuccessors();
686 }
687 }
688
689 // Return if we didn't modify any basic blocks. i.e., there are no return
690 // statements in the function.
691 return HasPrologue;
692}
693
694BasicBlock *CreateFailBB(Function *F, const Triple &Trip) {
695 auto *M = F->getParent();
696 LLVMContext &Context = F->getContext();
697 BasicBlock *FailBB = BasicBlock::Create(Context, Name: "CallStackCheckFailBlk", Parent: F);
698 IRBuilder<> B(FailBB);
699 if (F->getSubprogram())
700 B.SetCurrentDebugLocation(
701 DILocation::get(Context, Line: 0, Column: 0, Scope: F->getSubprogram()));
702 FunctionCallee StackChkFail;
703 SmallVector<Value *, 1> Args;
704 if (Trip.isOSOpenBSD()) {
705 StackChkFail = M->getOrInsertFunction(Name: "__stack_smash_handler",
706 RetTy: Type::getVoidTy(C&: Context),
707 Args: PointerType::getUnqual(C&: Context));
708 Args.push_back(Elt: B.CreateGlobalStringPtr(Str: F->getName(), Name: "SSH"));
709 } else {
710 StackChkFail =
711 M->getOrInsertFunction(Name: "__stack_chk_fail", RetTy: Type::getVoidTy(C&: Context));
712 }
713 cast<Function>(Val: StackChkFail.getCallee())->addFnAttr(Attribute::NoReturn);
714 B.CreateCall(Callee: StackChkFail, Args);
715 B.CreateUnreachable();
716 return FailBB;
717}
718

source code of llvm/lib/CodeGen/StackProtector.cpp