1//== MemoryTaggingSupport.cpp - helpers for memory tagging implementations ===//
2// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
3// See https://llvm.org/LICENSE.txt for license information.
4// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
5//
6//===----------------------------------------------------------------------===//
7//
8// This file declares common infrastructure for HWAddressSanitizer and
9// Aarch64StackTagging.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/Transforms/Utils/MemoryTaggingSupport.h"
14
15#include "llvm/ADT/STLExtras.h"
16#include "llvm/Analysis/CFG.h"
17#include "llvm/Analysis/PostDominators.h"
18#include "llvm/Analysis/StackSafetyAnalysis.h"
19#include "llvm/Analysis/ValueTracking.h"
20#include "llvm/IR/BasicBlock.h"
21#include "llvm/IR/IRBuilder.h"
22#include "llvm/IR/IntrinsicInst.h"
23#include "llvm/TargetParser/Triple.h"
24#include "llvm/Transforms/Utils/PromoteMemToReg.h"
25
26namespace llvm {
27namespace memtag {
28namespace {
29bool maybeReachableFromEachOther(const SmallVectorImpl<IntrinsicInst *> &Insts,
30 const DominatorTree *DT, const LoopInfo *LI,
31 size_t MaxLifetimes) {
32 // If we have too many lifetime ends, give up, as the algorithm below is N^2.
33 if (Insts.size() > MaxLifetimes)
34 return true;
35 for (size_t I = 0; I < Insts.size(); ++I) {
36 for (size_t J = 0; J < Insts.size(); ++J) {
37 if (I == J)
38 continue;
39 if (isPotentiallyReachable(From: Insts[I], To: Insts[J], ExclusionSet: nullptr, DT, LI))
40 return true;
41 }
42 }
43 return false;
44}
45} // namespace
46
47bool forAllReachableExits(const DominatorTree &DT, const PostDominatorTree &PDT,
48 const LoopInfo &LI, const Instruction *Start,
49 const SmallVectorImpl<IntrinsicInst *> &Ends,
50 const SmallVectorImpl<Instruction *> &RetVec,
51 llvm::function_ref<void(Instruction *)> Callback) {
52 if (Ends.size() == 1 && PDT.dominates(I1: Ends[0], I2: Start)) {
53 Callback(Ends[0]);
54 return true;
55 }
56 SmallPtrSet<BasicBlock *, 2> EndBlocks;
57 for (auto *End : Ends) {
58 EndBlocks.insert(Ptr: End->getParent());
59 }
60 SmallVector<Instruction *, 8> ReachableRetVec;
61 unsigned NumCoveredExits = 0;
62 for (auto *RI : RetVec) {
63 if (!isPotentiallyReachable(From: Start, To: RI, ExclusionSet: nullptr, DT: &DT, LI: &LI))
64 continue;
65 ReachableRetVec.push_back(Elt: RI);
66 // If there is an end in the same basic block as the return, we know for
67 // sure that the return is covered. Otherwise, we can check whether there
68 // is a way to reach the RI from the start of the lifetime without passing
69 // through an end.
70 if (EndBlocks.contains(Ptr: RI->getParent()) ||
71 !isPotentiallyReachable(From: Start, To: RI, ExclusionSet: &EndBlocks, DT: &DT, LI: &LI)) {
72 ++NumCoveredExits;
73 }
74 }
75 if (NumCoveredExits == ReachableRetVec.size()) {
76 for_each(Range: Ends, F: Callback);
77 } else {
78 // If there's a mix of covered and non-covered exits, just put the untag
79 // on exits, so we avoid the redundancy of untagging twice.
80 for_each(Range&: ReachableRetVec, F: Callback);
81 // We may have inserted untag outside of the lifetime interval.
82 // Signal the caller to remove the lifetime end call for this alloca.
83 return false;
84 }
85 return true;
86}
87
88bool isStandardLifetime(const SmallVectorImpl<IntrinsicInst *> &LifetimeStart,
89 const SmallVectorImpl<IntrinsicInst *> &LifetimeEnd,
90 const DominatorTree *DT, const LoopInfo *LI,
91 size_t MaxLifetimes) {
92 // An alloca that has exactly one start and end in every possible execution.
93 // If it has multiple ends, they have to be unreachable from each other, so
94 // at most one of them is actually used for each execution of the function.
95 return LifetimeStart.size() == 1 &&
96 (LifetimeEnd.size() == 1 ||
97 (LifetimeEnd.size() > 0 &&
98 !maybeReachableFromEachOther(Insts: LifetimeEnd, DT, LI, MaxLifetimes)));
99}
100
101Instruction *getUntagLocationIfFunctionExit(Instruction &Inst) {
102 if (isa<ReturnInst>(Val: Inst)) {
103 if (CallInst *CI = Inst.getParent()->getTerminatingMustTailCall())
104 return CI;
105 return &Inst;
106 }
107 if (isa<ResumeInst, CleanupReturnInst>(Val: Inst)) {
108 return &Inst;
109 }
110 return nullptr;
111}
112
113void StackInfoBuilder::visit(Instruction &Inst) {
114 // Visit non-intrinsic debug-info records attached to Inst.
115 for (DbgVariableRecord &DVR : filterDbgVars(R: Inst.getDbgRecordRange())) {
116 auto AddIfInteresting = [&](Value *V) {
117 if (auto *AI = dyn_cast_or_null<AllocaInst>(Val: V)) {
118 if (!isInterestingAlloca(AI: *AI))
119 return;
120 AllocaInfo &AInfo = Info.AllocasToInstrument[AI];
121 auto &DVRVec = AInfo.DbgVariableRecords;
122 if (DVRVec.empty() || DVRVec.back() != &DVR)
123 DVRVec.push_back(Elt: &DVR);
124 }
125 };
126
127 for_each(Range: DVR.location_ops(), F: AddIfInteresting);
128 if (DVR.isDbgAssign())
129 AddIfInteresting(DVR.getAddress());
130 }
131
132 if (CallInst *CI = dyn_cast<CallInst>(Val: &Inst)) {
133 if (CI->canReturnTwice()) {
134 Info.CallsReturnTwice = true;
135 }
136 }
137 if (AllocaInst *AI = dyn_cast<AllocaInst>(Val: &Inst)) {
138 if (isInterestingAlloca(AI: *AI)) {
139 Info.AllocasToInstrument[AI].AI = AI;
140 }
141 return;
142 }
143 auto *II = dyn_cast<IntrinsicInst>(Val: &Inst);
144 if (II && (II->getIntrinsicID() == Intrinsic::lifetime_start ||
145 II->getIntrinsicID() == Intrinsic::lifetime_end)) {
146 AllocaInst *AI = findAllocaForValue(V: II->getArgOperand(i: 1));
147 if (!AI) {
148 Info.UnrecognizedLifetimes.push_back(Elt: &Inst);
149 return;
150 }
151 if (!isInterestingAlloca(AI: *AI))
152 return;
153 if (II->getIntrinsicID() == Intrinsic::lifetime_start)
154 Info.AllocasToInstrument[AI].LifetimeStart.push_back(Elt: II);
155 else
156 Info.AllocasToInstrument[AI].LifetimeEnd.push_back(Elt: II);
157 return;
158 }
159 if (auto *DVI = dyn_cast<DbgVariableIntrinsic>(Val: &Inst)) {
160 auto AddIfInteresting = [&](Value *V) {
161 if (auto *AI = dyn_cast_or_null<AllocaInst>(Val: V)) {
162 if (!isInterestingAlloca(AI: *AI))
163 return;
164 AllocaInfo &AInfo = Info.AllocasToInstrument[AI];
165 auto &DVIVec = AInfo.DbgVariableIntrinsics;
166 if (DVIVec.empty() || DVIVec.back() != DVI)
167 DVIVec.push_back(Elt: DVI);
168 }
169 };
170 for_each(Range: DVI->location_ops(), F: AddIfInteresting);
171 if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(Val: DVI))
172 AddIfInteresting(DAI->getAddress());
173 }
174
175 Instruction *ExitUntag = getUntagLocationIfFunctionExit(Inst);
176 if (ExitUntag)
177 Info.RetVec.push_back(Elt: ExitUntag);
178}
179
180bool StackInfoBuilder::isInterestingAlloca(const AllocaInst &AI) {
181 return (AI.getAllocatedType()->isSized() &&
182 // FIXME: instrument dynamic allocas, too
183 AI.isStaticAlloca() &&
184 // alloca() may be called with 0 size, ignore it.
185 memtag::getAllocaSizeInBytes(AI) > 0 &&
186 // We are only interested in allocas not promotable to registers.
187 // Promotable allocas are common under -O0.
188 !isAllocaPromotable(AI: &AI) &&
189 // inalloca allocas are not treated as static, and we don't want
190 // dynamic alloca instrumentation for them as well.
191 !AI.isUsedWithInAlloca() &&
192 // swifterror allocas are register promoted by ISel
193 !AI.isSwiftError()) &&
194 // safe allocas are not interesting
195 !(SSI && SSI->isSafe(AI));
196}
197
198uint64_t getAllocaSizeInBytes(const AllocaInst &AI) {
199 auto DL = AI.getModule()->getDataLayout();
200 return *AI.getAllocationSize(DL);
201}
202
203void alignAndPadAlloca(memtag::AllocaInfo &Info, llvm::Align Alignment) {
204 const Align NewAlignment = std::max(a: Info.AI->getAlign(), b: Alignment);
205 Info.AI->setAlignment(NewAlignment);
206 auto &Ctx = Info.AI->getFunction()->getContext();
207
208 uint64_t Size = getAllocaSizeInBytes(AI: *Info.AI);
209 uint64_t AlignedSize = alignTo(Size, A: Alignment);
210 if (Size == AlignedSize)
211 return;
212
213 // Add padding to the alloca.
214 Type *AllocatedType =
215 Info.AI->isArrayAllocation()
216 ? ArrayType::get(
217 ElementType: Info.AI->getAllocatedType(),
218 NumElements: cast<ConstantInt>(Val: Info.AI->getArraySize())->getZExtValue())
219 : Info.AI->getAllocatedType();
220 Type *PaddingType = ArrayType::get(ElementType: Type::getInt8Ty(C&: Ctx), NumElements: AlignedSize - Size);
221 Type *TypeWithPadding = StructType::get(elt1: AllocatedType, elts: PaddingType);
222 auto *NewAI = new AllocaInst(TypeWithPadding, Info.AI->getAddressSpace(),
223 nullptr, "", Info.AI->getIterator());
224 NewAI->takeName(V: Info.AI);
225 NewAI->setAlignment(Info.AI->getAlign());
226 NewAI->setUsedWithInAlloca(Info.AI->isUsedWithInAlloca());
227 NewAI->setSwiftError(Info.AI->isSwiftError());
228 NewAI->copyMetadata(SrcInst: *Info.AI);
229
230 Value *NewPtr = NewAI;
231
232 // TODO: Remove when typed pointers dropped
233 if (Info.AI->getType() != NewAI->getType())
234 NewPtr = new BitCastInst(NewAI, Info.AI->getType(), "", Info.AI->getIterator());
235
236 Info.AI->replaceAllUsesWith(V: NewPtr);
237 Info.AI->eraseFromParent();
238 Info.AI = NewAI;
239}
240
241bool isLifetimeIntrinsic(Value *V) {
242 auto *II = dyn_cast<IntrinsicInst>(Val: V);
243 return II && II->isLifetimeStartOrEnd();
244}
245
246Value *readRegister(IRBuilder<> &IRB, StringRef Name) {
247 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
248 Function *ReadRegister = Intrinsic::getDeclaration(
249 M, Intrinsic::id: read_register, Tys: IRB.getIntPtrTy(DL: M->getDataLayout()));
250 MDNode *MD =
251 MDNode::get(Context&: M->getContext(), MDs: {MDString::get(Context&: M->getContext(), Str: Name)});
252 Value *Args[] = {MetadataAsValue::get(Context&: M->getContext(), MD)};
253 return IRB.CreateCall(Callee: ReadRegister, Args);
254}
255
256Value *getPC(const Triple &TargetTriple, IRBuilder<> &IRB) {
257 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
258 if (TargetTriple.getArch() == Triple::aarch64)
259 return memtag::readRegister(IRB, Name: "pc");
260 return IRB.CreatePtrToInt(V: IRB.GetInsertBlock()->getParent(),
261 DestTy: IRB.getIntPtrTy(DL: M->getDataLayout()));
262}
263
264Value *getFP(IRBuilder<> &IRB) {
265 Function *F = IRB.GetInsertBlock()->getParent();
266 Module *M = F->getParent();
267 auto *GetStackPointerFn = Intrinsic::getDeclaration(
268 M, Intrinsic::frameaddress,
269 IRB.getPtrTy(M->getDataLayout().getAllocaAddrSpace()));
270 return IRB.CreatePtrToInt(
271 V: IRB.CreateCall(GetStackPointerFn,
272 {Constant::getNullValue(Ty: IRB.getInt32Ty())}),
273 DestTy: IRB.getIntPtrTy(DL: M->getDataLayout()));
274}
275
276Value *getAndroidSlotPtr(IRBuilder<> &IRB, int Slot) {
277 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
278 // Android provides a fixed TLS slot for sanitizers. See TLS_SLOT_SANITIZER
279 // in Bionic's libc/private/bionic_tls.h.
280 Function *ThreadPointerFunc =
281 Intrinsic::getDeclaration(M, Intrinsic::id: thread_pointer);
282 return IRB.CreateConstGEP1_32(Ty: IRB.getInt8Ty(),
283 Ptr: IRB.CreateCall(Callee: ThreadPointerFunc), Idx0: 8 * Slot);
284}
285
286} // namespace memtag
287} // namespace llvm
288

source code of llvm/lib/Transforms/Utils/MemoryTaggingSupport.cpp