1 | //===- Loads.cpp - Local load analysis ------------------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file defines simple local analyses for load instructions. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "llvm/Analysis/Loads.h" |
14 | #include "llvm/Analysis/AliasAnalysis.h" |
15 | #include "llvm/Analysis/AssumeBundleQueries.h" |
16 | #include "llvm/Analysis/LoopInfo.h" |
17 | #include "llvm/Analysis/MemoryBuiltins.h" |
18 | #include "llvm/Analysis/MemoryLocation.h" |
19 | #include "llvm/Analysis/ScalarEvolution.h" |
20 | #include "llvm/Analysis/ScalarEvolutionExpressions.h" |
21 | #include "llvm/Analysis/ValueTracking.h" |
22 | #include "llvm/IR/DataLayout.h" |
23 | #include "llvm/IR/IntrinsicInst.h" |
24 | #include "llvm/IR/Module.h" |
25 | #include "llvm/IR/Operator.h" |
26 | |
27 | using namespace llvm; |
28 | |
29 | static bool isAligned(const Value *Base, const APInt &Offset, Align Alignment, |
30 | const DataLayout &DL) { |
31 | Align BA = Base->getPointerAlignment(DL); |
32 | return BA >= Alignment && Offset.isAligned(A: BA); |
33 | } |
34 | |
35 | /// Test if V is always a pointer to allocated and suitably aligned memory for |
36 | /// a simple load or store. |
37 | static bool isDereferenceableAndAlignedPointer( |
38 | const Value *V, Align Alignment, const APInt &Size, const DataLayout &DL, |
39 | const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT, |
40 | const TargetLibraryInfo *TLI, SmallPtrSetImpl<const Value *> &Visited, |
41 | unsigned MaxDepth) { |
42 | assert(V->getType()->isPointerTy() && "Base must be pointer" ); |
43 | |
44 | // Recursion limit. |
45 | if (MaxDepth-- == 0) |
46 | return false; |
47 | |
48 | // Already visited? Bail out, we've likely hit unreachable code. |
49 | if (!Visited.insert(Ptr: V).second) |
50 | return false; |
51 | |
52 | // Note that it is not safe to speculate into a malloc'd region because |
53 | // malloc may return null. |
54 | |
55 | // For GEPs, determine if the indexing lands within the allocated object. |
56 | if (const GEPOperator *GEP = dyn_cast<GEPOperator>(Val: V)) { |
57 | const Value *Base = GEP->getPointerOperand(); |
58 | |
59 | APInt Offset(DL.getIndexTypeSizeInBits(Ty: GEP->getType()), 0); |
60 | if (!GEP->accumulateConstantOffset(DL, Offset) || Offset.isNegative() || |
61 | !Offset.urem(RHS: APInt(Offset.getBitWidth(), Alignment.value())) |
62 | .isMinValue()) |
63 | return false; |
64 | |
65 | // If the base pointer is dereferenceable for Offset+Size bytes, then the |
66 | // GEP (== Base + Offset) is dereferenceable for Size bytes. If the base |
67 | // pointer is aligned to Align bytes, and the Offset is divisible by Align |
68 | // then the GEP (== Base + Offset == k_0 * Align + k_1 * Align) is also |
69 | // aligned to Align bytes. |
70 | |
71 | // Offset and Size may have different bit widths if we have visited an |
72 | // addrspacecast, so we can't do arithmetic directly on the APInt values. |
73 | return isDereferenceableAndAlignedPointer( |
74 | V: Base, Alignment, Size: Offset + Size.sextOrTrunc(width: Offset.getBitWidth()), DL, |
75 | CtxI, AC, DT, TLI, Visited, MaxDepth); |
76 | } |
77 | |
78 | // bitcast instructions are no-ops as far as dereferenceability is concerned. |
79 | if (const BitCastOperator *BC = dyn_cast<BitCastOperator>(Val: V)) { |
80 | if (BC->getSrcTy()->isPointerTy()) |
81 | return isDereferenceableAndAlignedPointer( |
82 | V: BC->getOperand(i_nocapture: 0), Alignment, Size, DL, CtxI, AC, DT, TLI, |
83 | Visited, MaxDepth); |
84 | } |
85 | |
86 | // Recurse into both hands of select. |
87 | if (const SelectInst *Sel = dyn_cast<SelectInst>(Val: V)) { |
88 | return isDereferenceableAndAlignedPointer(V: Sel->getTrueValue(), Alignment, |
89 | Size, DL, CtxI, AC, DT, TLI, |
90 | Visited, MaxDepth) && |
91 | isDereferenceableAndAlignedPointer(V: Sel->getFalseValue(), Alignment, |
92 | Size, DL, CtxI, AC, DT, TLI, |
93 | Visited, MaxDepth); |
94 | } |
95 | |
96 | bool CheckForNonNull, CheckForFreed; |
97 | APInt KnownDerefBytes(Size.getBitWidth(), |
98 | V->getPointerDereferenceableBytes(DL, CanBeNull&: CheckForNonNull, |
99 | CanBeFreed&: CheckForFreed)); |
100 | if (KnownDerefBytes.getBoolValue() && KnownDerefBytes.uge(RHS: Size) && |
101 | !CheckForFreed) |
102 | if (!CheckForNonNull || isKnownNonZero(V, DL, Depth: 0, AC, CxtI: CtxI, DT)) { |
103 | // As we recursed through GEPs to get here, we've incrementally checked |
104 | // that each step advanced by a multiple of the alignment. If our base is |
105 | // properly aligned, then the original offset accessed must also be. |
106 | APInt Offset(DL.getTypeStoreSizeInBits(Ty: V->getType()), 0); |
107 | return isAligned(Base: V, Offset, Alignment, DL); |
108 | } |
109 | |
110 | /// TODO refactor this function to be able to search independently for |
111 | /// Dereferencability and Alignment requirements. |
112 | |
113 | |
114 | if (const auto *Call = dyn_cast<CallBase>(Val: V)) { |
115 | if (auto *RP = getArgumentAliasingToReturnedPointer(Call, MustPreserveNullness: true)) |
116 | return isDereferenceableAndAlignedPointer(V: RP, Alignment, Size, DL, CtxI, |
117 | AC, DT, TLI, Visited, MaxDepth); |
118 | |
119 | // If we have a call we can't recurse through, check to see if this is an |
120 | // allocation function for which we can establish an minimum object size. |
121 | // Such a minimum object size is analogous to a deref_or_null attribute in |
122 | // that we still need to prove the result non-null at point of use. |
123 | // NOTE: We can only use the object size as a base fact as we a) need to |
124 | // prove alignment too, and b) don't want the compile time impact of a |
125 | // separate recursive walk. |
126 | ObjectSizeOpts Opts; |
127 | // TODO: It may be okay to round to align, but that would imply that |
128 | // accessing slightly out of bounds was legal, and we're currently |
129 | // inconsistent about that. For the moment, be conservative. |
130 | Opts.RoundToAlign = false; |
131 | Opts.NullIsUnknownSize = true; |
132 | uint64_t ObjSize; |
133 | if (getObjectSize(Ptr: V, Size&: ObjSize, DL, TLI, Opts)) { |
134 | APInt KnownDerefBytes(Size.getBitWidth(), ObjSize); |
135 | if (KnownDerefBytes.getBoolValue() && KnownDerefBytes.uge(RHS: Size) && |
136 | isKnownNonZero(V, DL, Depth: 0, AC, CxtI: CtxI, DT) && !V->canBeFreed()) { |
137 | // As we recursed through GEPs to get here, we've incrementally |
138 | // checked that each step advanced by a multiple of the alignment. If |
139 | // our base is properly aligned, then the original offset accessed |
140 | // must also be. |
141 | APInt Offset(DL.getTypeStoreSizeInBits(Ty: V->getType()), 0); |
142 | return isAligned(Base: V, Offset, Alignment, DL); |
143 | } |
144 | } |
145 | } |
146 | |
147 | // For gc.relocate, look through relocations |
148 | if (const GCRelocateInst *RelocateInst = dyn_cast<GCRelocateInst>(Val: V)) |
149 | return isDereferenceableAndAlignedPointer(V: RelocateInst->getDerivedPtr(), |
150 | Alignment, Size, DL, CtxI, AC, DT, |
151 | TLI, Visited, MaxDepth); |
152 | |
153 | if (const AddrSpaceCastOperator *ASC = dyn_cast<AddrSpaceCastOperator>(Val: V)) |
154 | return isDereferenceableAndAlignedPointer(V: ASC->getOperand(i_nocapture: 0), Alignment, |
155 | Size, DL, CtxI, AC, DT, TLI, |
156 | Visited, MaxDepth); |
157 | |
158 | if (CtxI) { |
159 | /// Look through assumes to see if both dereferencability and alignment can |
160 | /// be provent by an assume |
161 | RetainedKnowledge AlignRK; |
162 | RetainedKnowledge DerefRK; |
163 | if (getKnowledgeForValue( |
164 | V, {Attribute::Dereferenceable, Attribute::Alignment}, AC, |
165 | [&](RetainedKnowledge RK, Instruction *Assume, auto) { |
166 | if (!isValidAssumeForContext(Assume, CtxI)) |
167 | return false; |
168 | if (RK.AttrKind == Attribute::Alignment) |
169 | AlignRK = std::max(AlignRK, RK); |
170 | if (RK.AttrKind == Attribute::Dereferenceable) |
171 | DerefRK = std::max(DerefRK, RK); |
172 | if (AlignRK && DerefRK && AlignRK.ArgValue >= Alignment.value() && |
173 | DerefRK.ArgValue >= Size.getZExtValue()) |
174 | return true; // We have found what we needed so we stop looking |
175 | return false; // Other assumes may have better information. so |
176 | // keep looking |
177 | })) |
178 | return true; |
179 | } |
180 | |
181 | // If we don't know, assume the worst. |
182 | return false; |
183 | } |
184 | |
185 | bool llvm::isDereferenceableAndAlignedPointer( |
186 | const Value *V, Align Alignment, const APInt &Size, const DataLayout &DL, |
187 | const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT, |
188 | const TargetLibraryInfo *TLI) { |
189 | // Note: At the moment, Size can be zero. This ends up being interpreted as |
190 | // a query of whether [Base, V] is dereferenceable and V is aligned (since |
191 | // that's what the implementation happened to do). It's unclear if this is |
192 | // the desired semantic, but at least SelectionDAG does exercise this case. |
193 | |
194 | SmallPtrSet<const Value *, 32> Visited; |
195 | return ::isDereferenceableAndAlignedPointer(V, Alignment, Size, DL, CtxI, AC, |
196 | DT, TLI, Visited, MaxDepth: 16); |
197 | } |
198 | |
199 | bool llvm::isDereferenceableAndAlignedPointer( |
200 | const Value *V, Type *Ty, Align Alignment, const DataLayout &DL, |
201 | const Instruction *CtxI, AssumptionCache *AC, const DominatorTree *DT, |
202 | const TargetLibraryInfo *TLI) { |
203 | // For unsized types or scalable vectors we don't know exactly how many bytes |
204 | // are dereferenced, so bail out. |
205 | if (!Ty->isSized() || Ty->isScalableTy()) |
206 | return false; |
207 | |
208 | // When dereferenceability information is provided by a dereferenceable |
209 | // attribute, we know exactly how many bytes are dereferenceable. If we can |
210 | // determine the exact offset to the attributed variable, we can use that |
211 | // information here. |
212 | |
213 | APInt AccessSize(DL.getPointerTypeSizeInBits(V->getType()), |
214 | DL.getTypeStoreSize(Ty)); |
215 | return isDereferenceableAndAlignedPointer(V, Alignment, Size: AccessSize, DL, CtxI, |
216 | AC, DT, TLI); |
217 | } |
218 | |
219 | bool llvm::isDereferenceablePointer(const Value *V, Type *Ty, |
220 | const DataLayout &DL, |
221 | const Instruction *CtxI, |
222 | AssumptionCache *AC, |
223 | const DominatorTree *DT, |
224 | const TargetLibraryInfo *TLI) { |
225 | return isDereferenceableAndAlignedPointer(V, Ty, Alignment: Align(1), DL, CtxI, AC, DT, |
226 | TLI); |
227 | } |
228 | |
229 | /// Test if A and B will obviously have the same value. |
230 | /// |
231 | /// This includes recognizing that %t0 and %t1 will have the same |
232 | /// value in code like this: |
233 | /// \code |
234 | /// %t0 = getelementptr \@a, 0, 3 |
235 | /// store i32 0, i32* %t0 |
236 | /// %t1 = getelementptr \@a, 0, 3 |
237 | /// %t2 = load i32* %t1 |
238 | /// \endcode |
239 | /// |
240 | static bool AreEquivalentAddressValues(const Value *A, const Value *B) { |
241 | // Test if the values are trivially equivalent. |
242 | if (A == B) |
243 | return true; |
244 | |
245 | // Test if the values come from identical arithmetic instructions. |
246 | // Use isIdenticalToWhenDefined instead of isIdenticalTo because |
247 | // this function is only used when one address use dominates the |
248 | // other, which means that they'll always either have the same |
249 | // value or one of them will have an undefined value. |
250 | if (isa<BinaryOperator>(Val: A) || isa<CastInst>(Val: A) || isa<PHINode>(Val: A) || |
251 | isa<GetElementPtrInst>(Val: A)) |
252 | if (const Instruction *BI = dyn_cast<Instruction>(Val: B)) |
253 | if (cast<Instruction>(Val: A)->isIdenticalToWhenDefined(I: BI)) |
254 | return true; |
255 | |
256 | // Otherwise they may not be equivalent. |
257 | return false; |
258 | } |
259 | |
260 | bool llvm::isDereferenceableAndAlignedInLoop(LoadInst *LI, Loop *L, |
261 | ScalarEvolution &SE, |
262 | DominatorTree &DT, |
263 | AssumptionCache *AC) { |
264 | auto &DL = LI->getModule()->getDataLayout(); |
265 | Value *Ptr = LI->getPointerOperand(); |
266 | |
267 | APInt EltSize(DL.getIndexTypeSizeInBits(Ty: Ptr->getType()), |
268 | DL.getTypeStoreSize(Ty: LI->getType()).getFixedValue()); |
269 | const Align Alignment = LI->getAlign(); |
270 | |
271 | Instruction * = L->getHeader()->getFirstNonPHI(); |
272 | |
273 | // If given a uniform (i.e. non-varying) address, see if we can prove the |
274 | // access is safe within the loop w/o needing predication. |
275 | if (L->isLoopInvariant(V: Ptr)) |
276 | return isDereferenceableAndAlignedPointer(V: Ptr, Alignment, Size: EltSize, DL, |
277 | CtxI: HeaderFirstNonPHI, AC, DT: &DT); |
278 | |
279 | // Otherwise, check to see if we have a repeating access pattern where we can |
280 | // prove that all accesses are well aligned and dereferenceable. |
281 | auto *AddRec = dyn_cast<SCEVAddRecExpr>(Val: SE.getSCEV(V: Ptr)); |
282 | if (!AddRec || AddRec->getLoop() != L || !AddRec->isAffine()) |
283 | return false; |
284 | auto* Step = dyn_cast<SCEVConstant>(Val: AddRec->getStepRecurrence(SE)); |
285 | if (!Step) |
286 | return false; |
287 | |
288 | auto TC = SE.getSmallConstantMaxTripCount(L); |
289 | if (!TC) |
290 | return false; |
291 | |
292 | // TODO: Handle overlapping accesses. |
293 | // We should be computing AccessSize as (TC - 1) * Step + EltSize. |
294 | if (EltSize.sgt(RHS: Step->getAPInt())) |
295 | return false; |
296 | |
297 | // Compute the total access size for access patterns with unit stride and |
298 | // patterns with gaps. For patterns with unit stride, Step and EltSize are the |
299 | // same. |
300 | // For patterns with gaps (i.e. non unit stride), we are |
301 | // accessing EltSize bytes at every Step. |
302 | APInt AccessSize = TC * Step->getAPInt(); |
303 | |
304 | assert(SE.isLoopInvariant(AddRec->getStart(), L) && |
305 | "implied by addrec definition" ); |
306 | Value *Base = nullptr; |
307 | if (auto *StartS = dyn_cast<SCEVUnknown>(Val: AddRec->getStart())) { |
308 | Base = StartS->getValue(); |
309 | } else if (auto *StartS = dyn_cast<SCEVAddExpr>(Val: AddRec->getStart())) { |
310 | // Handle (NewBase + offset) as start value. |
311 | const auto *Offset = dyn_cast<SCEVConstant>(Val: StartS->getOperand(i: 0)); |
312 | const auto *NewBase = dyn_cast<SCEVUnknown>(Val: StartS->getOperand(i: 1)); |
313 | if (StartS->getNumOperands() == 2 && Offset && NewBase) { |
314 | // For the moment, restrict ourselves to the case where the offset is a |
315 | // multiple of the requested alignment and the base is aligned. |
316 | // TODO: generalize if a case found which warrants |
317 | if (Offset->getAPInt().urem(RHS: Alignment.value()) != 0) |
318 | return false; |
319 | Base = NewBase->getValue(); |
320 | bool Overflow = false; |
321 | AccessSize = AccessSize.uadd_ov(RHS: Offset->getAPInt(), Overflow); |
322 | if (Overflow) |
323 | return false; |
324 | } |
325 | } |
326 | |
327 | if (!Base) |
328 | return false; |
329 | |
330 | // For the moment, restrict ourselves to the case where the access size is a |
331 | // multiple of the requested alignment and the base is aligned. |
332 | // TODO: generalize if a case found which warrants |
333 | if (EltSize.urem(RHS: Alignment.value()) != 0) |
334 | return false; |
335 | return isDereferenceableAndAlignedPointer(V: Base, Alignment, Size: AccessSize, DL, |
336 | CtxI: HeaderFirstNonPHI, AC, DT: &DT); |
337 | } |
338 | |
339 | /// Check if executing a load of this pointer value cannot trap. |
340 | /// |
341 | /// If DT and ScanFrom are specified this method performs context-sensitive |
342 | /// analysis and returns true if it is safe to load immediately before ScanFrom. |
343 | /// |
344 | /// If it is not obviously safe to load from the specified pointer, we do |
345 | /// a quick local scan of the basic block containing \c ScanFrom, to determine |
346 | /// if the address is already accessed. |
347 | /// |
348 | /// This uses the pointee type to determine how many bytes need to be safe to |
349 | /// load from the pointer. |
350 | bool llvm::isSafeToLoadUnconditionally(Value *V, Align Alignment, APInt &Size, |
351 | const DataLayout &DL, |
352 | Instruction *ScanFrom, |
353 | AssumptionCache *AC, |
354 | const DominatorTree *DT, |
355 | const TargetLibraryInfo *TLI) { |
356 | // If DT is not specified we can't make context-sensitive query |
357 | const Instruction* CtxI = DT ? ScanFrom : nullptr; |
358 | if (isDereferenceableAndAlignedPointer(V, Alignment, Size, DL, CtxI, AC, DT, |
359 | TLI)) |
360 | return true; |
361 | |
362 | if (!ScanFrom) |
363 | return false; |
364 | |
365 | if (Size.getBitWidth() > 64) |
366 | return false; |
367 | const uint64_t LoadSize = Size.getZExtValue(); |
368 | |
369 | // Otherwise, be a little bit aggressive by scanning the local block where we |
370 | // want to check to see if the pointer is already being loaded or stored |
371 | // from/to. If so, the previous load or store would have already trapped, |
372 | // so there is no harm doing an extra load (also, CSE will later eliminate |
373 | // the load entirely). |
374 | BasicBlock::iterator BBI = ScanFrom->getIterator(), |
375 | E = ScanFrom->getParent()->begin(); |
376 | |
377 | // We can at least always strip pointer casts even though we can't use the |
378 | // base here. |
379 | V = V->stripPointerCasts(); |
380 | |
381 | while (BBI != E) { |
382 | --BBI; |
383 | |
384 | // If we see a free or a call which may write to memory (i.e. which might do |
385 | // a free) the pointer could be marked invalid. |
386 | if (isa<CallInst>(Val: BBI) && BBI->mayWriteToMemory() && |
387 | !isa<LifetimeIntrinsic>(Val: BBI) && !isa<DbgInfoIntrinsic>(Val: BBI)) |
388 | return false; |
389 | |
390 | Value *AccessedPtr; |
391 | Type *AccessedTy; |
392 | Align AccessedAlign; |
393 | if (LoadInst *LI = dyn_cast<LoadInst>(Val&: BBI)) { |
394 | // Ignore volatile loads. The execution of a volatile load cannot |
395 | // be used to prove an address is backed by regular memory; it can, |
396 | // for example, point to an MMIO register. |
397 | if (LI->isVolatile()) |
398 | continue; |
399 | AccessedPtr = LI->getPointerOperand(); |
400 | AccessedTy = LI->getType(); |
401 | AccessedAlign = LI->getAlign(); |
402 | } else if (StoreInst *SI = dyn_cast<StoreInst>(Val&: BBI)) { |
403 | // Ignore volatile stores (see comment for loads). |
404 | if (SI->isVolatile()) |
405 | continue; |
406 | AccessedPtr = SI->getPointerOperand(); |
407 | AccessedTy = SI->getValueOperand()->getType(); |
408 | AccessedAlign = SI->getAlign(); |
409 | } else |
410 | continue; |
411 | |
412 | if (AccessedAlign < Alignment) |
413 | continue; |
414 | |
415 | // Handle trivial cases. |
416 | if (AccessedPtr == V && |
417 | LoadSize <= DL.getTypeStoreSize(Ty: AccessedTy)) |
418 | return true; |
419 | |
420 | if (AreEquivalentAddressValues(A: AccessedPtr->stripPointerCasts(), B: V) && |
421 | LoadSize <= DL.getTypeStoreSize(Ty: AccessedTy)) |
422 | return true; |
423 | } |
424 | return false; |
425 | } |
426 | |
427 | bool llvm::isSafeToLoadUnconditionally(Value *V, Type *Ty, Align Alignment, |
428 | const DataLayout &DL, |
429 | Instruction *ScanFrom, |
430 | AssumptionCache *AC, |
431 | const DominatorTree *DT, |
432 | const TargetLibraryInfo *TLI) { |
433 | TypeSize TySize = DL.getTypeStoreSize(Ty); |
434 | if (TySize.isScalable()) |
435 | return false; |
436 | APInt Size(DL.getIndexTypeSizeInBits(Ty: V->getType()), TySize.getFixedValue()); |
437 | return isSafeToLoadUnconditionally(V, Alignment, Size, DL, ScanFrom, AC, DT, |
438 | TLI); |
439 | } |
440 | |
441 | /// DefMaxInstsToScan - the default number of maximum instructions |
442 | /// to scan in the block, used by FindAvailableLoadedValue(). |
443 | /// FindAvailableLoadedValue() was introduced in r60148, to improve jump |
444 | /// threading in part by eliminating partially redundant loads. |
445 | /// At that point, the value of MaxInstsToScan was already set to '6' |
446 | /// without documented explanation. |
447 | cl::opt<unsigned> |
448 | llvm::DefMaxInstsToScan("available-load-scan-limit" , cl::init(Val: 6), cl::Hidden, |
449 | cl::desc("Use this to specify the default maximum number of instructions " |
450 | "to scan backward from a given instruction, when searching for " |
451 | "available loaded value" )); |
452 | |
453 | Value *llvm::FindAvailableLoadedValue(LoadInst *Load, BasicBlock *ScanBB, |
454 | BasicBlock::iterator &ScanFrom, |
455 | unsigned MaxInstsToScan, |
456 | BatchAAResults *AA, bool *IsLoad, |
457 | unsigned *NumScanedInst) { |
458 | // Don't CSE load that is volatile or anything stronger than unordered. |
459 | if (!Load->isUnordered()) |
460 | return nullptr; |
461 | |
462 | MemoryLocation Loc = MemoryLocation::get(LI: Load); |
463 | return findAvailablePtrLoadStore(Loc, AccessTy: Load->getType(), AtLeastAtomic: Load->isAtomic(), |
464 | ScanBB, ScanFrom, MaxInstsToScan, AA, IsLoadCSE: IsLoad, |
465 | NumScanedInst); |
466 | } |
467 | |
468 | // Check if the load and the store have the same base, constant offsets and |
469 | // non-overlapping access ranges. |
470 | static bool areNonOverlapSameBaseLoadAndStore(const Value *LoadPtr, |
471 | Type *LoadTy, |
472 | const Value *StorePtr, |
473 | Type *StoreTy, |
474 | const DataLayout &DL) { |
475 | APInt LoadOffset(DL.getIndexTypeSizeInBits(Ty: LoadPtr->getType()), 0); |
476 | APInt StoreOffset(DL.getIndexTypeSizeInBits(Ty: StorePtr->getType()), 0); |
477 | const Value *LoadBase = LoadPtr->stripAndAccumulateConstantOffsets( |
478 | DL, Offset&: LoadOffset, /* AllowNonInbounds */ false); |
479 | const Value *StoreBase = StorePtr->stripAndAccumulateConstantOffsets( |
480 | DL, Offset&: StoreOffset, /* AllowNonInbounds */ false); |
481 | if (LoadBase != StoreBase) |
482 | return false; |
483 | auto LoadAccessSize = LocationSize::precise(Value: DL.getTypeStoreSize(Ty: LoadTy)); |
484 | auto StoreAccessSize = LocationSize::precise(Value: DL.getTypeStoreSize(Ty: StoreTy)); |
485 | ConstantRange LoadRange(LoadOffset, |
486 | LoadOffset + LoadAccessSize.toRaw()); |
487 | ConstantRange StoreRange(StoreOffset, |
488 | StoreOffset + StoreAccessSize.toRaw()); |
489 | return LoadRange.intersectWith(CR: StoreRange).isEmptySet(); |
490 | } |
491 | |
492 | static Value *getAvailableLoadStore(Instruction *Inst, const Value *Ptr, |
493 | Type *AccessTy, bool AtLeastAtomic, |
494 | const DataLayout &DL, bool *IsLoadCSE) { |
495 | // If this is a load of Ptr, the loaded value is available. |
496 | // (This is true even if the load is volatile or atomic, although |
497 | // those cases are unlikely.) |
498 | if (LoadInst *LI = dyn_cast<LoadInst>(Val: Inst)) { |
499 | // We can value forward from an atomic to a non-atomic, but not the |
500 | // other way around. |
501 | if (LI->isAtomic() < AtLeastAtomic) |
502 | return nullptr; |
503 | |
504 | Value *LoadPtr = LI->getPointerOperand()->stripPointerCasts(); |
505 | if (!AreEquivalentAddressValues(A: LoadPtr, B: Ptr)) |
506 | return nullptr; |
507 | |
508 | if (CastInst::isBitOrNoopPointerCastable(SrcTy: LI->getType(), DestTy: AccessTy, DL)) { |
509 | if (IsLoadCSE) |
510 | *IsLoadCSE = true; |
511 | return LI; |
512 | } |
513 | } |
514 | |
515 | // If this is a store through Ptr, the value is available! |
516 | // (This is true even if the store is volatile or atomic, although |
517 | // those cases are unlikely.) |
518 | if (StoreInst *SI = dyn_cast<StoreInst>(Val: Inst)) { |
519 | // We can value forward from an atomic to a non-atomic, but not the |
520 | // other way around. |
521 | if (SI->isAtomic() < AtLeastAtomic) |
522 | return nullptr; |
523 | |
524 | Value *StorePtr = SI->getPointerOperand()->stripPointerCasts(); |
525 | if (!AreEquivalentAddressValues(A: StorePtr, B: Ptr)) |
526 | return nullptr; |
527 | |
528 | if (IsLoadCSE) |
529 | *IsLoadCSE = false; |
530 | |
531 | Value *Val = SI->getValueOperand(); |
532 | if (CastInst::isBitOrNoopPointerCastable(SrcTy: Val->getType(), DestTy: AccessTy, DL)) |
533 | return Val; |
534 | |
535 | TypeSize StoreSize = DL.getTypeSizeInBits(Ty: Val->getType()); |
536 | TypeSize LoadSize = DL.getTypeSizeInBits(Ty: AccessTy); |
537 | if (TypeSize::isKnownLE(LHS: LoadSize, RHS: StoreSize)) |
538 | if (auto *C = dyn_cast<Constant>(Val)) |
539 | return ConstantFoldLoadFromConst(C, Ty: AccessTy, DL); |
540 | } |
541 | |
542 | if (auto *MSI = dyn_cast<MemSetInst>(Val: Inst)) { |
543 | // Don't forward from (non-atomic) memset to atomic load. |
544 | if (AtLeastAtomic) |
545 | return nullptr; |
546 | |
547 | // Only handle constant memsets. |
548 | auto *Val = dyn_cast<ConstantInt>(Val: MSI->getValue()); |
549 | auto *Len = dyn_cast<ConstantInt>(Val: MSI->getLength()); |
550 | if (!Val || !Len) |
551 | return nullptr; |
552 | |
553 | // TODO: Handle offsets. |
554 | Value *Dst = MSI->getDest(); |
555 | if (!AreEquivalentAddressValues(A: Dst, B: Ptr)) |
556 | return nullptr; |
557 | |
558 | if (IsLoadCSE) |
559 | *IsLoadCSE = false; |
560 | |
561 | TypeSize LoadTypeSize = DL.getTypeSizeInBits(Ty: AccessTy); |
562 | if (LoadTypeSize.isScalable()) |
563 | return nullptr; |
564 | |
565 | // Make sure the read bytes are contained in the memset. |
566 | uint64_t LoadSize = LoadTypeSize.getFixedValue(); |
567 | if ((Len->getValue() * 8).ult(RHS: LoadSize)) |
568 | return nullptr; |
569 | |
570 | APInt Splat = LoadSize >= 8 ? APInt::getSplat(NewLen: LoadSize, V: Val->getValue()) |
571 | : Val->getValue().trunc(width: LoadSize); |
572 | ConstantInt *SplatC = ConstantInt::get(Context&: MSI->getContext(), V: Splat); |
573 | if (CastInst::isBitOrNoopPointerCastable(SrcTy: SplatC->getType(), DestTy: AccessTy, DL)) |
574 | return SplatC; |
575 | |
576 | return nullptr; |
577 | } |
578 | |
579 | return nullptr; |
580 | } |
581 | |
582 | Value *llvm::findAvailablePtrLoadStore( |
583 | const MemoryLocation &Loc, Type *AccessTy, bool AtLeastAtomic, |
584 | BasicBlock *ScanBB, BasicBlock::iterator &ScanFrom, unsigned MaxInstsToScan, |
585 | BatchAAResults *AA, bool *IsLoadCSE, unsigned *NumScanedInst) { |
586 | if (MaxInstsToScan == 0) |
587 | MaxInstsToScan = ~0U; |
588 | |
589 | const DataLayout &DL = ScanBB->getModule()->getDataLayout(); |
590 | const Value *StrippedPtr = Loc.Ptr->stripPointerCasts(); |
591 | |
592 | while (ScanFrom != ScanBB->begin()) { |
593 | // We must ignore debug info directives when counting (otherwise they |
594 | // would affect codegen). |
595 | Instruction *Inst = &*--ScanFrom; |
596 | if (Inst->isDebugOrPseudoInst()) |
597 | continue; |
598 | |
599 | // Restore ScanFrom to expected value in case next test succeeds |
600 | ScanFrom++; |
601 | |
602 | if (NumScanedInst) |
603 | ++(*NumScanedInst); |
604 | |
605 | // Don't scan huge blocks. |
606 | if (MaxInstsToScan-- == 0) |
607 | return nullptr; |
608 | |
609 | --ScanFrom; |
610 | |
611 | if (Value *Available = getAvailableLoadStore(Inst, Ptr: StrippedPtr, AccessTy, |
612 | AtLeastAtomic, DL, IsLoadCSE)) |
613 | return Available; |
614 | |
615 | // Try to get the store size for the type. |
616 | if (StoreInst *SI = dyn_cast<StoreInst>(Val: Inst)) { |
617 | Value *StorePtr = SI->getPointerOperand()->stripPointerCasts(); |
618 | |
619 | // If both StrippedPtr and StorePtr reach all the way to an alloca or |
620 | // global and they are different, ignore the store. This is a trivial form |
621 | // of alias analysis that is important for reg2mem'd code. |
622 | if ((isa<AllocaInst>(Val: StrippedPtr) || isa<GlobalVariable>(Val: StrippedPtr)) && |
623 | (isa<AllocaInst>(Val: StorePtr) || isa<GlobalVariable>(Val: StorePtr)) && |
624 | StrippedPtr != StorePtr) |
625 | continue; |
626 | |
627 | if (!AA) { |
628 | // When AA isn't available, but if the load and the store have the same |
629 | // base, constant offsets and non-overlapping access ranges, ignore the |
630 | // store. This is a simple form of alias analysis that is used by the |
631 | // inliner. FIXME: use BasicAA if possible. |
632 | if (areNonOverlapSameBaseLoadAndStore( |
633 | LoadPtr: Loc.Ptr, LoadTy: AccessTy, StorePtr: SI->getPointerOperand(), |
634 | StoreTy: SI->getValueOperand()->getType(), DL)) |
635 | continue; |
636 | } else { |
637 | // If we have alias analysis and it says the store won't modify the |
638 | // loaded value, ignore the store. |
639 | if (!isModSet(MRI: AA->getModRefInfo(I: SI, OptLoc: Loc))) |
640 | continue; |
641 | } |
642 | |
643 | // Otherwise the store that may or may not alias the pointer, bail out. |
644 | ++ScanFrom; |
645 | return nullptr; |
646 | } |
647 | |
648 | // If this is some other instruction that may clobber Ptr, bail out. |
649 | if (Inst->mayWriteToMemory()) { |
650 | // If alias analysis claims that it really won't modify the load, |
651 | // ignore it. |
652 | if (AA && !isModSet(MRI: AA->getModRefInfo(I: Inst, OptLoc: Loc))) |
653 | continue; |
654 | |
655 | // May modify the pointer, bail out. |
656 | ++ScanFrom; |
657 | return nullptr; |
658 | } |
659 | } |
660 | |
661 | // Got to the start of the block, we didn't find it, but are done for this |
662 | // block. |
663 | return nullptr; |
664 | } |
665 | |
666 | Value *llvm::FindAvailableLoadedValue(LoadInst *Load, BatchAAResults &AA, |
667 | bool *IsLoadCSE, |
668 | unsigned MaxInstsToScan) { |
669 | const DataLayout &DL = Load->getModule()->getDataLayout(); |
670 | Value *StrippedPtr = Load->getPointerOperand()->stripPointerCasts(); |
671 | BasicBlock *ScanBB = Load->getParent(); |
672 | Type *AccessTy = Load->getType(); |
673 | bool AtLeastAtomic = Load->isAtomic(); |
674 | |
675 | if (!Load->isUnordered()) |
676 | return nullptr; |
677 | |
678 | // Try to find an available value first, and delay expensive alias analysis |
679 | // queries until later. |
680 | Value *Available = nullptr; |
681 | SmallVector<Instruction *> MustNotAliasInsts; |
682 | for (Instruction &Inst : make_range(x: ++Load->getReverseIterator(), |
683 | y: ScanBB->rend())) { |
684 | if (Inst.isDebugOrPseudoInst()) |
685 | continue; |
686 | |
687 | if (MaxInstsToScan-- == 0) |
688 | return nullptr; |
689 | |
690 | Available = getAvailableLoadStore(Inst: &Inst, Ptr: StrippedPtr, AccessTy, |
691 | AtLeastAtomic, DL, IsLoadCSE); |
692 | if (Available) |
693 | break; |
694 | |
695 | if (Inst.mayWriteToMemory()) |
696 | MustNotAliasInsts.push_back(Elt: &Inst); |
697 | } |
698 | |
699 | // If we found an available value, ensure that the instructions in between |
700 | // did not modify the memory location. |
701 | if (Available) { |
702 | MemoryLocation Loc = MemoryLocation::get(LI: Load); |
703 | for (Instruction *Inst : MustNotAliasInsts) |
704 | if (isModSet(MRI: AA.getModRefInfo(I: Inst, OptLoc: Loc))) |
705 | return nullptr; |
706 | } |
707 | |
708 | return Available; |
709 | } |
710 | |
711 | bool llvm::canReplacePointersIfEqual(Value *A, Value *B, const DataLayout &DL, |
712 | Instruction *CtxI) { |
713 | Type *Ty = A->getType(); |
714 | assert(Ty == B->getType() && Ty->isPointerTy() && |
715 | "values must have matching pointer types" ); |
716 | |
717 | // NOTE: The checks in the function are incomplete and currently miss illegal |
718 | // cases! The current implementation is a starting point and the |
719 | // implementation should be made stricter over time. |
720 | if (auto *C = dyn_cast<Constant>(Val: B)) { |
721 | // Do not allow replacing a pointer with a constant pointer, unless it is |
722 | // either null or at least one byte is dereferenceable. |
723 | APInt OneByte(DL.getPointerTypeSizeInBits(Ty), 1); |
724 | return C->isNullValue() || |
725 | isDereferenceableAndAlignedPointer(V: B, Alignment: Align(1), Size: OneByte, DL, CtxI); |
726 | } |
727 | |
728 | return true; |
729 | } |
730 | |