1//===- LazyValueInfo.cpp - Value constraint analysis ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interface for lazy computation of value constraint
10// information.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/Analysis/LazyValueInfo.h"
15#include "llvm/ADT/DenseSet.h"
16#include "llvm/ADT/STLExtras.h"
17#include "llvm/Analysis/AssumptionCache.h"
18#include "llvm/Analysis/ConstantFolding.h"
19#include "llvm/Analysis/InstructionSimplify.h"
20#include "llvm/Analysis/TargetLibraryInfo.h"
21#include "llvm/Analysis/ValueLattice.h"
22#include "llvm/Analysis/ValueTracking.h"
23#include "llvm/IR/AssemblyAnnotationWriter.h"
24#include "llvm/IR/CFG.h"
25#include "llvm/IR/ConstantRange.h"
26#include "llvm/IR/Constants.h"
27#include "llvm/IR/DataLayout.h"
28#include "llvm/IR/Dominators.h"
29#include "llvm/IR/InstrTypes.h"
30#include "llvm/IR/Instructions.h"
31#include "llvm/IR/IntrinsicInst.h"
32#include "llvm/IR/Intrinsics.h"
33#include "llvm/IR/LLVMContext.h"
34#include "llvm/IR/PatternMatch.h"
35#include "llvm/IR/ValueHandle.h"
36#include "llvm/InitializePasses.h"
37#include "llvm/Support/Debug.h"
38#include "llvm/Support/FormattedStream.h"
39#include "llvm/Support/KnownBits.h"
40#include "llvm/Support/raw_ostream.h"
41#include <optional>
42using namespace llvm;
43using namespace PatternMatch;
44
45#define DEBUG_TYPE "lazy-value-info"
46
47// This is the number of worklist items we will process to try to discover an
48// answer for a given value.
49static const unsigned MaxProcessedPerValue = 500;
50
51char LazyValueInfoWrapperPass::ID = 0;
52LazyValueInfoWrapperPass::LazyValueInfoWrapperPass() : FunctionPass(ID) {
53 initializeLazyValueInfoWrapperPassPass(*PassRegistry::getPassRegistry());
54}
55INITIALIZE_PASS_BEGIN(LazyValueInfoWrapperPass, "lazy-value-info",
56 "Lazy Value Information Analysis", false, true)
57INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
58INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
59INITIALIZE_PASS_END(LazyValueInfoWrapperPass, "lazy-value-info",
60 "Lazy Value Information Analysis", false, true)
61
62namespace llvm {
63 FunctionPass *createLazyValueInfoPass() { return new LazyValueInfoWrapperPass(); }
64}
65
66AnalysisKey LazyValueAnalysis::Key;
67
68/// Returns true if this lattice value represents at most one possible value.
69/// This is as precise as any lattice value can get while still representing
70/// reachable code.
71static bool hasSingleValue(const ValueLatticeElement &Val) {
72 if (Val.isConstantRange() &&
73 Val.getConstantRange().isSingleElement())
74 // Integer constants are single element ranges
75 return true;
76 if (Val.isConstant())
77 // Non integer constants
78 return true;
79 return false;
80}
81
82/// Combine two sets of facts about the same value into a single set of
83/// facts. Note that this method is not suitable for merging facts along
84/// different paths in a CFG; that's what the mergeIn function is for. This
85/// is for merging facts gathered about the same value at the same location
86/// through two independent means.
87/// Notes:
88/// * This method does not promise to return the most precise possible lattice
89/// value implied by A and B. It is allowed to return any lattice element
90/// which is at least as strong as *either* A or B (unless our facts
91/// conflict, see below).
92/// * Due to unreachable code, the intersection of two lattice values could be
93/// contradictory. If this happens, we return some valid lattice value so as
94/// not confuse the rest of LVI. Ideally, we'd always return Undefined, but
95/// we do not make this guarantee. TODO: This would be a useful enhancement.
96static ValueLatticeElement intersect(const ValueLatticeElement &A,
97 const ValueLatticeElement &B) {
98 // Undefined is the strongest state. It means the value is known to be along
99 // an unreachable path.
100 if (A.isUnknown())
101 return A;
102 if (B.isUnknown())
103 return B;
104
105 // If we gave up for one, but got a useable fact from the other, use it.
106 if (A.isOverdefined())
107 return B;
108 if (B.isOverdefined())
109 return A;
110
111 // Can't get any more precise than constants.
112 if (hasSingleValue(Val: A))
113 return A;
114 if (hasSingleValue(Val: B))
115 return B;
116
117 // Could be either constant range or not constant here.
118 if (!A.isConstantRange() || !B.isConstantRange()) {
119 // TODO: Arbitrary choice, could be improved
120 return A;
121 }
122
123 // Intersect two constant ranges
124 ConstantRange Range =
125 A.getConstantRange().intersectWith(CR: B.getConstantRange());
126 // Note: An empty range is implicitly converted to unknown or undef depending
127 // on MayIncludeUndef internally.
128 return ValueLatticeElement::getRange(
129 CR: std::move(Range), /*MayIncludeUndef=*/A.isConstantRangeIncludingUndef() ||
130 B.isConstantRangeIncludingUndef());
131}
132
133//===----------------------------------------------------------------------===//
134// LazyValueInfoCache Decl
135//===----------------------------------------------------------------------===//
136
137namespace {
138 /// A callback value handle updates the cache when values are erased.
139 class LazyValueInfoCache;
140 struct LVIValueHandle final : public CallbackVH {
141 LazyValueInfoCache *Parent;
142
143 LVIValueHandle(Value *V, LazyValueInfoCache *P = nullptr)
144 : CallbackVH(V), Parent(P) { }
145
146 void deleted() override;
147 void allUsesReplacedWith(Value *V) override {
148 deleted();
149 }
150 };
151} // end anonymous namespace
152
153namespace {
154 using NonNullPointerSet = SmallDenseSet<AssertingVH<Value>, 2>;
155
156 /// This is the cache kept by LazyValueInfo which
157 /// maintains information about queries across the clients' queries.
158 class LazyValueInfoCache {
159 /// This is all of the cached information for one basic block. It contains
160 /// the per-value lattice elements, as well as a separate set for
161 /// overdefined values to reduce memory usage. Additionally pointers
162 /// dereferenced in the block are cached for nullability queries.
163 struct BlockCacheEntry {
164 SmallDenseMap<AssertingVH<Value>, ValueLatticeElement, 4> LatticeElements;
165 SmallDenseSet<AssertingVH<Value>, 4> OverDefined;
166 // std::nullopt indicates that the nonnull pointers for this basic block
167 // block have not been computed yet.
168 std::optional<NonNullPointerSet> NonNullPointers;
169 };
170
171 /// Cached information per basic block.
172 DenseMap<PoisoningVH<BasicBlock>, std::unique_ptr<BlockCacheEntry>>
173 BlockCache;
174 /// Set of value handles used to erase values from the cache on deletion.
175 DenseSet<LVIValueHandle, DenseMapInfo<Value *>> ValueHandles;
176
177 const BlockCacheEntry *getBlockEntry(BasicBlock *BB) const {
178 auto It = BlockCache.find_as(Val: BB);
179 if (It == BlockCache.end())
180 return nullptr;
181 return It->second.get();
182 }
183
184 BlockCacheEntry *getOrCreateBlockEntry(BasicBlock *BB) {
185 auto It = BlockCache.find_as(Val: BB);
186 if (It == BlockCache.end())
187 It = BlockCache.insert(KV: { BB, std::make_unique<BlockCacheEntry>() })
188 .first;
189
190 return It->second.get();
191 }
192
193 void addValueHandle(Value *Val) {
194 auto HandleIt = ValueHandles.find_as(Val);
195 if (HandleIt == ValueHandles.end())
196 ValueHandles.insert(V: { Val, this });
197 }
198
199 public:
200 void insertResult(Value *Val, BasicBlock *BB,
201 const ValueLatticeElement &Result) {
202 BlockCacheEntry *Entry = getOrCreateBlockEntry(BB);
203
204 // Insert over-defined values into their own cache to reduce memory
205 // overhead.
206 if (Result.isOverdefined())
207 Entry->OverDefined.insert(V: Val);
208 else
209 Entry->LatticeElements.insert(KV: { Val, Result });
210
211 addValueHandle(Val);
212 }
213
214 std::optional<ValueLatticeElement>
215 getCachedValueInfo(Value *V, BasicBlock *BB) const {
216 const BlockCacheEntry *Entry = getBlockEntry(BB);
217 if (!Entry)
218 return std::nullopt;
219
220 if (Entry->OverDefined.count(V))
221 return ValueLatticeElement::getOverdefined();
222
223 auto LatticeIt = Entry->LatticeElements.find_as(Val: V);
224 if (LatticeIt == Entry->LatticeElements.end())
225 return std::nullopt;
226
227 return LatticeIt->second;
228 }
229
230 bool isNonNullAtEndOfBlock(
231 Value *V, BasicBlock *BB,
232 function_ref<NonNullPointerSet(BasicBlock *)> InitFn) {
233 BlockCacheEntry *Entry = getOrCreateBlockEntry(BB);
234 if (!Entry->NonNullPointers) {
235 Entry->NonNullPointers = InitFn(BB);
236 for (Value *V : *Entry->NonNullPointers)
237 addValueHandle(Val: V);
238 }
239
240 return Entry->NonNullPointers->count(V);
241 }
242
243 /// clear - Empty the cache.
244 void clear() {
245 BlockCache.clear();
246 ValueHandles.clear();
247 }
248
249 /// Inform the cache that a given value has been deleted.
250 void eraseValue(Value *V);
251
252 /// This is part of the update interface to inform the cache
253 /// that a block has been deleted.
254 void eraseBlock(BasicBlock *BB);
255
256 /// Updates the cache to remove any influence an overdefined value in
257 /// OldSucc might have (unless also overdefined in NewSucc). This just
258 /// flushes elements from the cache and does not add any.
259 void threadEdgeImpl(BasicBlock *OldSucc,BasicBlock *NewSucc);
260 };
261}
262
263void LazyValueInfoCache::eraseValue(Value *V) {
264 for (auto &Pair : BlockCache) {
265 Pair.second->LatticeElements.erase(Val: V);
266 Pair.second->OverDefined.erase(V);
267 if (Pair.second->NonNullPointers)
268 Pair.second->NonNullPointers->erase(V);
269 }
270
271 auto HandleIt = ValueHandles.find_as(Val: V);
272 if (HandleIt != ValueHandles.end())
273 ValueHandles.erase(I: HandleIt);
274}
275
276void LVIValueHandle::deleted() {
277 // This erasure deallocates *this, so it MUST happen after we're done
278 // using any and all members of *this.
279 Parent->eraseValue(V: *this);
280}
281
282void LazyValueInfoCache::eraseBlock(BasicBlock *BB) {
283 BlockCache.erase(Val: BB);
284}
285
286void LazyValueInfoCache::threadEdgeImpl(BasicBlock *OldSucc,
287 BasicBlock *NewSucc) {
288 // When an edge in the graph has been threaded, values that we could not
289 // determine a value for before (i.e. were marked overdefined) may be
290 // possible to solve now. We do NOT try to proactively update these values.
291 // Instead, we clear their entries from the cache, and allow lazy updating to
292 // recompute them when needed.
293
294 // The updating process is fairly simple: we need to drop cached info
295 // for all values that were marked overdefined in OldSucc, and for those same
296 // values in any successor of OldSucc (except NewSucc) in which they were
297 // also marked overdefined.
298 std::vector<BasicBlock*> worklist;
299 worklist.push_back(x: OldSucc);
300
301 const BlockCacheEntry *Entry = getBlockEntry(BB: OldSucc);
302 if (!Entry || Entry->OverDefined.empty())
303 return; // Nothing to process here.
304 SmallVector<Value *, 4> ValsToClear(Entry->OverDefined.begin(),
305 Entry->OverDefined.end());
306
307 // Use a worklist to perform a depth-first search of OldSucc's successors.
308 // NOTE: We do not need a visited list since any blocks we have already
309 // visited will have had their overdefined markers cleared already, and we
310 // thus won't loop to their successors.
311 while (!worklist.empty()) {
312 BasicBlock *ToUpdate = worklist.back();
313 worklist.pop_back();
314
315 // Skip blocks only accessible through NewSucc.
316 if (ToUpdate == NewSucc) continue;
317
318 // If a value was marked overdefined in OldSucc, and is here too...
319 auto OI = BlockCache.find_as(Val: ToUpdate);
320 if (OI == BlockCache.end() || OI->second->OverDefined.empty())
321 continue;
322 auto &ValueSet = OI->second->OverDefined;
323
324 bool changed = false;
325 for (Value *V : ValsToClear) {
326 if (!ValueSet.erase(V))
327 continue;
328
329 // If we removed anything, then we potentially need to update
330 // blocks successors too.
331 changed = true;
332 }
333
334 if (!changed) continue;
335
336 llvm::append_range(C&: worklist, R: successors(BB: ToUpdate));
337 }
338}
339
340namespace llvm {
341namespace {
342/// An assembly annotator class to print LazyValueCache information in
343/// comments.
344class LazyValueInfoAnnotatedWriter : public AssemblyAnnotationWriter {
345 LazyValueInfoImpl *LVIImpl;
346 // While analyzing which blocks we can solve values for, we need the dominator
347 // information.
348 DominatorTree &DT;
349
350public:
351 LazyValueInfoAnnotatedWriter(LazyValueInfoImpl *L, DominatorTree &DTree)
352 : LVIImpl(L), DT(DTree) {}
353
354 void emitBasicBlockStartAnnot(const BasicBlock *BB,
355 formatted_raw_ostream &OS) override;
356
357 void emitInstructionAnnot(const Instruction *I,
358 formatted_raw_ostream &OS) override;
359};
360} // namespace
361// The actual implementation of the lazy analysis and update. Note that the
362// inheritance from LazyValueInfoCache is intended to be temporary while
363// splitting the code and then transitioning to a has-a relationship.
364class LazyValueInfoImpl {
365
366 /// Cached results from previous queries
367 LazyValueInfoCache TheCache;
368
369 /// This stack holds the state of the value solver during a query.
370 /// It basically emulates the callstack of the naive
371 /// recursive value lookup process.
372 SmallVector<std::pair<BasicBlock*, Value*>, 8> BlockValueStack;
373
374 /// Keeps track of which block-value pairs are in BlockValueStack.
375 DenseSet<std::pair<BasicBlock*, Value*> > BlockValueSet;
376
377 /// Push BV onto BlockValueStack unless it's already in there.
378 /// Returns true on success.
379 bool pushBlockValue(const std::pair<BasicBlock *, Value *> &BV) {
380 if (!BlockValueSet.insert(V: BV).second)
381 return false; // It's already in the stack.
382
383 LLVM_DEBUG(dbgs() << "PUSH: " << *BV.second << " in "
384 << BV.first->getName() << "\n");
385 BlockValueStack.push_back(Elt: BV);
386 return true;
387 }
388
389 AssumptionCache *AC; ///< A pointer to the cache of @llvm.assume calls.
390 const DataLayout &DL; ///< A mandatory DataLayout
391
392 /// Declaration of the llvm.experimental.guard() intrinsic,
393 /// if it exists in the module.
394 Function *GuardDecl;
395
396 std::optional<ValueLatticeElement> getBlockValue(Value *Val, BasicBlock *BB,
397 Instruction *CxtI);
398 std::optional<ValueLatticeElement> getEdgeValue(Value *V, BasicBlock *F,
399 BasicBlock *T,
400 Instruction *CxtI = nullptr);
401
402 // These methods process one work item and may add more. A false value
403 // returned means that the work item was not completely processed and must
404 // be revisited after going through the new items.
405 bool solveBlockValue(Value *Val, BasicBlock *BB);
406 std::optional<ValueLatticeElement> solveBlockValueImpl(Value *Val,
407 BasicBlock *BB);
408 std::optional<ValueLatticeElement> solveBlockValueNonLocal(Value *Val,
409 BasicBlock *BB);
410 std::optional<ValueLatticeElement> solveBlockValuePHINode(PHINode *PN,
411 BasicBlock *BB);
412 std::optional<ValueLatticeElement> solveBlockValueSelect(SelectInst *S,
413 BasicBlock *BB);
414 std::optional<ConstantRange> getRangeFor(Value *V, Instruction *CxtI,
415 BasicBlock *BB);
416 std::optional<ValueLatticeElement> solveBlockValueBinaryOpImpl(
417 Instruction *I, BasicBlock *BB,
418 std::function<ConstantRange(const ConstantRange &, const ConstantRange &)>
419 OpFn);
420 std::optional<ValueLatticeElement>
421 solveBlockValueBinaryOp(BinaryOperator *BBI, BasicBlock *BB);
422 std::optional<ValueLatticeElement> solveBlockValueCast(CastInst *CI,
423 BasicBlock *BB);
424 std::optional<ValueLatticeElement>
425 solveBlockValueOverflowIntrinsic(WithOverflowInst *WO, BasicBlock *BB);
426 std::optional<ValueLatticeElement> solveBlockValueIntrinsic(IntrinsicInst *II,
427 BasicBlock *BB);
428 std::optional<ValueLatticeElement>
429 solveBlockValueExtractValue(ExtractValueInst *EVI, BasicBlock *BB);
430 bool isNonNullAtEndOfBlock(Value *Val, BasicBlock *BB);
431 void intersectAssumeOrGuardBlockValueConstantRange(Value *Val,
432 ValueLatticeElement &BBLV,
433 Instruction *BBI);
434
435 void solve();
436
437 // For the following methods, if UseBlockValue is true, the function may
438 // push additional values to the worklist and return nullopt. If
439 // UseBlockValue is false, it will never return nullopt.
440
441 std::optional<ValueLatticeElement>
442 getValueFromSimpleICmpCondition(CmpInst::Predicate Pred, Value *RHS,
443 const APInt &Offset, Instruction *CxtI,
444 bool UseBlockValue);
445
446 std::optional<ValueLatticeElement>
447 getValueFromICmpCondition(Value *Val, ICmpInst *ICI, bool isTrueDest,
448 bool UseBlockValue);
449
450 std::optional<ValueLatticeElement>
451 getValueFromCondition(Value *Val, Value *Cond, bool IsTrueDest,
452 bool UseBlockValue, unsigned Depth = 0);
453
454 std::optional<ValueLatticeElement> getEdgeValueLocal(Value *Val,
455 BasicBlock *BBFrom,
456 BasicBlock *BBTo,
457 bool UseBlockValue);
458
459public:
460 /// This is the query interface to determine the lattice value for the
461 /// specified Value* at the context instruction (if specified) or at the
462 /// start of the block.
463 ValueLatticeElement getValueInBlock(Value *V, BasicBlock *BB,
464 Instruction *CxtI = nullptr);
465
466 /// This is the query interface to determine the lattice value for the
467 /// specified Value* at the specified instruction using only information
468 /// from assumes/guards and range metadata. Unlike getValueInBlock(), no
469 /// recursive query is performed.
470 ValueLatticeElement getValueAt(Value *V, Instruction *CxtI);
471
472 /// This is the query interface to determine the lattice
473 /// value for the specified Value* that is true on the specified edge.
474 ValueLatticeElement getValueOnEdge(Value *V, BasicBlock *FromBB,
475 BasicBlock *ToBB,
476 Instruction *CxtI = nullptr);
477
478 ValueLatticeElement getValueAtUse(const Use &U);
479
480 /// Complete flush all previously computed values
481 void clear() {
482 TheCache.clear();
483 }
484
485 /// Printing the LazyValueInfo Analysis.
486 void printLVI(Function &F, DominatorTree &DTree, raw_ostream &OS) {
487 LazyValueInfoAnnotatedWriter Writer(this, DTree);
488 F.print(OS, AAW: &Writer);
489 }
490
491 /// This is part of the update interface to remove information related to this
492 /// value from the cache.
493 void forgetValue(Value *V) { TheCache.eraseValue(V); }
494
495 /// This is part of the update interface to inform the cache
496 /// that a block has been deleted.
497 void eraseBlock(BasicBlock *BB) {
498 TheCache.eraseBlock(BB);
499 }
500
501 /// This is the update interface to inform the cache that an edge from
502 /// PredBB to OldSucc has been threaded to be from PredBB to NewSucc.
503 void threadEdge(BasicBlock *PredBB,BasicBlock *OldSucc,BasicBlock *NewSucc);
504
505 LazyValueInfoImpl(AssumptionCache *AC, const DataLayout &DL,
506 Function *GuardDecl)
507 : AC(AC), DL(DL), GuardDecl(GuardDecl) {}
508};
509} // namespace llvm
510
511void LazyValueInfoImpl::solve() {
512 SmallVector<std::pair<BasicBlock *, Value *>, 8> StartingStack(
513 BlockValueStack.begin(), BlockValueStack.end());
514
515 unsigned processedCount = 0;
516 while (!BlockValueStack.empty()) {
517 processedCount++;
518 // Abort if we have to process too many values to get a result for this one.
519 // Because of the design of the overdefined cache currently being per-block
520 // to avoid naming-related issues (IE it wants to try to give different
521 // results for the same name in different blocks), overdefined results don't
522 // get cached globally, which in turn means we will often try to rediscover
523 // the same overdefined result again and again. Once something like
524 // PredicateInfo is used in LVI or CVP, we should be able to make the
525 // overdefined cache global, and remove this throttle.
526 if (processedCount > MaxProcessedPerValue) {
527 LLVM_DEBUG(
528 dbgs() << "Giving up on stack because we are getting too deep\n");
529 // Fill in the original values
530 while (!StartingStack.empty()) {
531 std::pair<BasicBlock *, Value *> &e = StartingStack.back();
532 TheCache.insertResult(Val: e.second, BB: e.first,
533 Result: ValueLatticeElement::getOverdefined());
534 StartingStack.pop_back();
535 }
536 BlockValueSet.clear();
537 BlockValueStack.clear();
538 return;
539 }
540 std::pair<BasicBlock *, Value *> e = BlockValueStack.back();
541 assert(BlockValueSet.count(e) && "Stack value should be in BlockValueSet!");
542 unsigned StackSize = BlockValueStack.size();
543 (void) StackSize;
544
545 if (solveBlockValue(Val: e.second, BB: e.first)) {
546 // The work item was completely processed.
547 assert(BlockValueStack.size() == StackSize &&
548 BlockValueStack.back() == e && "Nothing should have been pushed!");
549#ifndef NDEBUG
550 std::optional<ValueLatticeElement> BBLV =
551 TheCache.getCachedValueInfo(V: e.second, BB: e.first);
552 assert(BBLV && "Result should be in cache!");
553 LLVM_DEBUG(
554 dbgs() << "POP " << *e.second << " in " << e.first->getName() << " = "
555 << *BBLV << "\n");
556#endif
557
558 BlockValueStack.pop_back();
559 BlockValueSet.erase(V: e);
560 } else {
561 // More work needs to be done before revisiting.
562 assert(BlockValueStack.size() == StackSize + 1 &&
563 "Exactly one element should have been pushed!");
564 }
565 }
566}
567
568std::optional<ValueLatticeElement>
569LazyValueInfoImpl::getBlockValue(Value *Val, BasicBlock *BB,
570 Instruction *CxtI) {
571 // If already a constant, there is nothing to compute.
572 if (Constant *VC = dyn_cast<Constant>(Val))
573 return ValueLatticeElement::get(C: VC);
574
575 if (std::optional<ValueLatticeElement> OptLatticeVal =
576 TheCache.getCachedValueInfo(V: Val, BB)) {
577 intersectAssumeOrGuardBlockValueConstantRange(Val, BBLV&: *OptLatticeVal, BBI: CxtI);
578 return OptLatticeVal;
579 }
580
581 // We have hit a cycle, assume overdefined.
582 if (!pushBlockValue(BV: { BB, Val }))
583 return ValueLatticeElement::getOverdefined();
584
585 // Yet to be resolved.
586 return std::nullopt;
587}
588
589static ValueLatticeElement getFromRangeMetadata(Instruction *BBI) {
590 switch (BBI->getOpcode()) {
591 default: break;
592 case Instruction::Load:
593 case Instruction::Call:
594 case Instruction::Invoke:
595 if (MDNode *Ranges = BBI->getMetadata(KindID: LLVMContext::MD_range))
596 if (isa<IntegerType>(Val: BBI->getType())) {
597 return ValueLatticeElement::getRange(
598 CR: getConstantRangeFromMetadata(RangeMD: *Ranges));
599 }
600 break;
601 };
602 // Nothing known - will be intersected with other facts
603 return ValueLatticeElement::getOverdefined();
604}
605
606bool LazyValueInfoImpl::solveBlockValue(Value *Val, BasicBlock *BB) {
607 assert(!isa<Constant>(Val) && "Value should not be constant");
608 assert(!TheCache.getCachedValueInfo(Val, BB) &&
609 "Value should not be in cache");
610
611 // Hold off inserting this value into the Cache in case we have to return
612 // false and come back later.
613 std::optional<ValueLatticeElement> Res = solveBlockValueImpl(Val, BB);
614 if (!Res)
615 // Work pushed, will revisit
616 return false;
617
618 TheCache.insertResult(Val, BB, Result: *Res);
619 return true;
620}
621
622std::optional<ValueLatticeElement>
623LazyValueInfoImpl::solveBlockValueImpl(Value *Val, BasicBlock *BB) {
624 Instruction *BBI = dyn_cast<Instruction>(Val);
625 if (!BBI || BBI->getParent() != BB)
626 return solveBlockValueNonLocal(Val, BB);
627
628 if (PHINode *PN = dyn_cast<PHINode>(Val: BBI))
629 return solveBlockValuePHINode(PN, BB);
630
631 if (auto *SI = dyn_cast<SelectInst>(Val: BBI))
632 return solveBlockValueSelect(S: SI, BB);
633
634 // If this value is a nonnull pointer, record it's range and bailout. Note
635 // that for all other pointer typed values, we terminate the search at the
636 // definition. We could easily extend this to look through geps, bitcasts,
637 // and the like to prove non-nullness, but it's not clear that's worth it
638 // compile time wise. The context-insensitive value walk done inside
639 // isKnownNonZero gets most of the profitable cases at much less expense.
640 // This does mean that we have a sensitivity to where the defining
641 // instruction is placed, even if it could legally be hoisted much higher.
642 // That is unfortunate.
643 PointerType *PT = dyn_cast<PointerType>(Val: BBI->getType());
644 if (PT && isKnownNonZero(V: BBI, DL))
645 return ValueLatticeElement::getNot(C: ConstantPointerNull::get(T: PT));
646
647 if (BBI->getType()->isIntegerTy()) {
648 if (auto *CI = dyn_cast<CastInst>(Val: BBI))
649 return solveBlockValueCast(CI, BB);
650
651 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Val: BBI))
652 return solveBlockValueBinaryOp(BBI: BO, BB);
653
654 if (auto *EVI = dyn_cast<ExtractValueInst>(Val: BBI))
655 return solveBlockValueExtractValue(EVI, BB);
656
657 if (auto *II = dyn_cast<IntrinsicInst>(Val: BBI))
658 return solveBlockValueIntrinsic(II, BB);
659 }
660
661 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
662 << "' - unknown inst def found.\n");
663 return getFromRangeMetadata(BBI);
664}
665
666static void AddNonNullPointer(Value *Ptr, NonNullPointerSet &PtrSet) {
667 // TODO: Use NullPointerIsDefined instead.
668 if (Ptr->getType()->getPointerAddressSpace() == 0)
669 PtrSet.insert(V: getUnderlyingObject(V: Ptr));
670}
671
672static void AddNonNullPointersByInstruction(
673 Instruction *I, NonNullPointerSet &PtrSet) {
674 if (LoadInst *L = dyn_cast<LoadInst>(Val: I)) {
675 AddNonNullPointer(Ptr: L->getPointerOperand(), PtrSet);
676 } else if (StoreInst *S = dyn_cast<StoreInst>(Val: I)) {
677 AddNonNullPointer(Ptr: S->getPointerOperand(), PtrSet);
678 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(Val: I)) {
679 if (MI->isVolatile()) return;
680
681 // FIXME: check whether it has a valuerange that excludes zero?
682 ConstantInt *Len = dyn_cast<ConstantInt>(Val: MI->getLength());
683 if (!Len || Len->isZero()) return;
684
685 AddNonNullPointer(Ptr: MI->getRawDest(), PtrSet);
686 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(Val: MI))
687 AddNonNullPointer(Ptr: MTI->getRawSource(), PtrSet);
688 }
689}
690
691bool LazyValueInfoImpl::isNonNullAtEndOfBlock(Value *Val, BasicBlock *BB) {
692 if (NullPointerIsDefined(F: BB->getParent(),
693 AS: Val->getType()->getPointerAddressSpace()))
694 return false;
695
696 Val = Val->stripInBoundsOffsets();
697 return TheCache.isNonNullAtEndOfBlock(V: Val, BB, InitFn: [](BasicBlock *BB) {
698 NonNullPointerSet NonNullPointers;
699 for (Instruction &I : *BB)
700 AddNonNullPointersByInstruction(I: &I, PtrSet&: NonNullPointers);
701 return NonNullPointers;
702 });
703}
704
705std::optional<ValueLatticeElement>
706LazyValueInfoImpl::solveBlockValueNonLocal(Value *Val, BasicBlock *BB) {
707 ValueLatticeElement Result; // Start Undefined.
708
709 // If this is the entry block, we must be asking about an argument. The
710 // value is overdefined.
711 if (BB->isEntryBlock()) {
712 assert(isa<Argument>(Val) && "Unknown live-in to the entry block");
713 return ValueLatticeElement::getOverdefined();
714 }
715
716 // Loop over all of our predecessors, merging what we know from them into
717 // result. If we encounter an unexplored predecessor, we eagerly explore it
718 // in a depth first manner. In practice, this has the effect of discovering
719 // paths we can't analyze eagerly without spending compile times analyzing
720 // other paths. This heuristic benefits from the fact that predecessors are
721 // frequently arranged such that dominating ones come first and we quickly
722 // find a path to function entry. TODO: We should consider explicitly
723 // canonicalizing to make this true rather than relying on this happy
724 // accident.
725 for (BasicBlock *Pred : predecessors(BB)) {
726 std::optional<ValueLatticeElement> EdgeResult = getEdgeValue(V: Val, F: Pred, T: BB);
727 if (!EdgeResult)
728 // Explore that input, then return here
729 return std::nullopt;
730
731 Result.mergeIn(RHS: *EdgeResult);
732
733 // If we hit overdefined, exit early. The BlockVals entry is already set
734 // to overdefined.
735 if (Result.isOverdefined()) {
736 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
737 << "' - overdefined because of pred '"
738 << Pred->getName() << "' (non local).\n");
739 return Result;
740 }
741 }
742
743 // Return the merged value, which is more precise than 'overdefined'.
744 assert(!Result.isOverdefined());
745 return Result;
746}
747
748std::optional<ValueLatticeElement>
749LazyValueInfoImpl::solveBlockValuePHINode(PHINode *PN, BasicBlock *BB) {
750 ValueLatticeElement Result; // Start Undefined.
751
752 // Loop over all of our predecessors, merging what we know from them into
753 // result. See the comment about the chosen traversal order in
754 // solveBlockValueNonLocal; the same reasoning applies here.
755 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
756 BasicBlock *PhiBB = PN->getIncomingBlock(i);
757 Value *PhiVal = PN->getIncomingValue(i);
758 // Note that we can provide PN as the context value to getEdgeValue, even
759 // though the results will be cached, because PN is the value being used as
760 // the cache key in the caller.
761 std::optional<ValueLatticeElement> EdgeResult =
762 getEdgeValue(V: PhiVal, F: PhiBB, T: BB, CxtI: PN);
763 if (!EdgeResult)
764 // Explore that input, then return here
765 return std::nullopt;
766
767 Result.mergeIn(RHS: *EdgeResult);
768
769 // If we hit overdefined, exit early. The BlockVals entry is already set
770 // to overdefined.
771 if (Result.isOverdefined()) {
772 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
773 << "' - overdefined because of pred (local).\n");
774
775 return Result;
776 }
777 }
778
779 // Return the merged value, which is more precise than 'overdefined'.
780 assert(!Result.isOverdefined() && "Possible PHI in entry block?");
781 return Result;
782}
783
784// If we can determine a constraint on the value given conditions assumed by
785// the program, intersect those constraints with BBLV
786void LazyValueInfoImpl::intersectAssumeOrGuardBlockValueConstantRange(
787 Value *Val, ValueLatticeElement &BBLV, Instruction *BBI) {
788 BBI = BBI ? BBI : dyn_cast<Instruction>(Val);
789 if (!BBI)
790 return;
791
792 BasicBlock *BB = BBI->getParent();
793 for (auto &AssumeVH : AC->assumptionsFor(V: Val)) {
794 if (!AssumeVH)
795 continue;
796
797 // Only check assumes in the block of the context instruction. Other
798 // assumes will have already been taken into account when the value was
799 // propagated from predecessor blocks.
800 auto *I = cast<CallInst>(Val&: AssumeVH);
801 if (I->getParent() != BB || !isValidAssumeForContext(I, CxtI: BBI))
802 continue;
803
804 BBLV = intersect(A: BBLV, B: *getValueFromCondition(Val, Cond: I->getArgOperand(i: 0),
805 /*IsTrueDest*/ true,
806 /*UseBlockValue*/ false));
807 }
808
809 // If guards are not used in the module, don't spend time looking for them
810 if (GuardDecl && !GuardDecl->use_empty() &&
811 BBI->getIterator() != BB->begin()) {
812 for (Instruction &I :
813 make_range(x: std::next(x: BBI->getIterator().getReverse()), y: BB->rend())) {
814 Value *Cond = nullptr;
815 if (match(&I, m_Intrinsic<Intrinsic::experimental_guard>(m_Value(Cond))))
816 BBLV = intersect(A: BBLV,
817 B: *getValueFromCondition(Val, Cond, /*IsTrueDest*/ true,
818 /*UseBlockValue*/ false));
819 }
820 }
821
822 if (BBLV.isOverdefined()) {
823 // Check whether we're checking at the terminator, and the pointer has
824 // been dereferenced in this block.
825 PointerType *PTy = dyn_cast<PointerType>(Val: Val->getType());
826 if (PTy && BB->getTerminator() == BBI &&
827 isNonNullAtEndOfBlock(Val, BB))
828 BBLV = ValueLatticeElement::getNot(C: ConstantPointerNull::get(T: PTy));
829 }
830}
831
832static ConstantRange toConstantRange(const ValueLatticeElement &Val,
833 Type *Ty, bool UndefAllowed = false) {
834 assert(Ty->isIntOrIntVectorTy() && "Must be integer type");
835 if (Val.isConstantRange(UndefAllowed))
836 return Val.getConstantRange();
837 unsigned BW = Ty->getScalarSizeInBits();
838 if (Val.isUnknown())
839 return ConstantRange::getEmpty(BitWidth: BW);
840 return ConstantRange::getFull(BitWidth: BW);
841}
842
843std::optional<ValueLatticeElement>
844LazyValueInfoImpl::solveBlockValueSelect(SelectInst *SI, BasicBlock *BB) {
845 // Recurse on our inputs if needed
846 std::optional<ValueLatticeElement> OptTrueVal =
847 getBlockValue(Val: SI->getTrueValue(), BB, CxtI: SI);
848 if (!OptTrueVal)
849 return std::nullopt;
850 ValueLatticeElement &TrueVal = *OptTrueVal;
851
852 std::optional<ValueLatticeElement> OptFalseVal =
853 getBlockValue(Val: SI->getFalseValue(), BB, CxtI: SI);
854 if (!OptFalseVal)
855 return std::nullopt;
856 ValueLatticeElement &FalseVal = *OptFalseVal;
857
858 if (TrueVal.isConstantRange() || FalseVal.isConstantRange()) {
859 const ConstantRange &TrueCR = toConstantRange(Val: TrueVal, Ty: SI->getType());
860 const ConstantRange &FalseCR = toConstantRange(Val: FalseVal, Ty: SI->getType());
861 Value *LHS = nullptr;
862 Value *RHS = nullptr;
863 SelectPatternResult SPR = matchSelectPattern(V: SI, LHS, RHS);
864 // Is this a min specifically of our two inputs? (Avoid the risk of
865 // ValueTracking getting smarter looking back past our immediate inputs.)
866 if (SelectPatternResult::isMinOrMax(SPF: SPR.Flavor) &&
867 ((LHS == SI->getTrueValue() && RHS == SI->getFalseValue()) ||
868 (RHS == SI->getTrueValue() && LHS == SI->getFalseValue()))) {
869 ConstantRange ResultCR = [&]() {
870 switch (SPR.Flavor) {
871 default:
872 llvm_unreachable("unexpected minmax type!");
873 case SPF_SMIN: /// Signed minimum
874 return TrueCR.smin(Other: FalseCR);
875 case SPF_UMIN: /// Unsigned minimum
876 return TrueCR.umin(Other: FalseCR);
877 case SPF_SMAX: /// Signed maximum
878 return TrueCR.smax(Other: FalseCR);
879 case SPF_UMAX: /// Unsigned maximum
880 return TrueCR.umax(Other: FalseCR);
881 };
882 }();
883 return ValueLatticeElement::getRange(
884 CR: ResultCR, MayIncludeUndef: TrueVal.isConstantRangeIncludingUndef() ||
885 FalseVal.isConstantRangeIncludingUndef());
886 }
887
888 if (SPR.Flavor == SPF_ABS) {
889 if (LHS == SI->getTrueValue())
890 return ValueLatticeElement::getRange(
891 CR: TrueCR.abs(), MayIncludeUndef: TrueVal.isConstantRangeIncludingUndef());
892 if (LHS == SI->getFalseValue())
893 return ValueLatticeElement::getRange(
894 CR: FalseCR.abs(), MayIncludeUndef: FalseVal.isConstantRangeIncludingUndef());
895 }
896
897 if (SPR.Flavor == SPF_NABS) {
898 ConstantRange Zero(APInt::getZero(numBits: TrueCR.getBitWidth()));
899 if (LHS == SI->getTrueValue())
900 return ValueLatticeElement::getRange(
901 CR: Zero.sub(Other: TrueCR.abs()), MayIncludeUndef: FalseVal.isConstantRangeIncludingUndef());
902 if (LHS == SI->getFalseValue())
903 return ValueLatticeElement::getRange(
904 CR: Zero.sub(Other: FalseCR.abs()), MayIncludeUndef: FalseVal.isConstantRangeIncludingUndef());
905 }
906 }
907
908 // Can we constrain the facts about the true and false values by using the
909 // condition itself? This shows up with idioms like e.g. select(a > 5, a, 5).
910 // TODO: We could potentially refine an overdefined true value above.
911 Value *Cond = SI->getCondition();
912 // If the value is undef, a different value may be chosen in
913 // the select condition.
914 if (isGuaranteedNotToBeUndef(V: Cond, AC)) {
915 TrueVal =
916 intersect(A: TrueVal, B: *getValueFromCondition(Val: SI->getTrueValue(), Cond,
917 /*IsTrueDest*/ true,
918 /*UseBlockValue*/ false));
919 FalseVal =
920 intersect(A: FalseVal, B: *getValueFromCondition(Val: SI->getFalseValue(), Cond,
921 /*IsTrueDest*/ false,
922 /*UseBlockValue*/ false));
923 }
924
925 ValueLatticeElement Result = TrueVal;
926 Result.mergeIn(RHS: FalseVal);
927 return Result;
928}
929
930std::optional<ConstantRange>
931LazyValueInfoImpl::getRangeFor(Value *V, Instruction *CxtI, BasicBlock *BB) {
932 std::optional<ValueLatticeElement> OptVal = getBlockValue(Val: V, BB, CxtI);
933 if (!OptVal)
934 return std::nullopt;
935 return toConstantRange(Val: *OptVal, Ty: V->getType());
936}
937
938std::optional<ValueLatticeElement>
939LazyValueInfoImpl::solveBlockValueCast(CastInst *CI, BasicBlock *BB) {
940 // Filter out casts we don't know how to reason about before attempting to
941 // recurse on our operand. This can cut a long search short if we know we're
942 // not going to be able to get any useful information anways.
943 switch (CI->getOpcode()) {
944 case Instruction::Trunc:
945 case Instruction::SExt:
946 case Instruction::ZExt:
947 break;
948 default:
949 // Unhandled instructions are overdefined.
950 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
951 << "' - overdefined (unknown cast).\n");
952 return ValueLatticeElement::getOverdefined();
953 }
954
955 // Figure out the range of the LHS. If that fails, we still apply the
956 // transfer rule on the full set since we may be able to locally infer
957 // interesting facts.
958 std::optional<ConstantRange> LHSRes = getRangeFor(V: CI->getOperand(i_nocapture: 0), CxtI: CI, BB);
959 if (!LHSRes)
960 // More work to do before applying this transfer rule.
961 return std::nullopt;
962 const ConstantRange &LHSRange = *LHSRes;
963
964 const unsigned ResultBitWidth = CI->getType()->getIntegerBitWidth();
965
966 // NOTE: We're currently limited by the set of operations that ConstantRange
967 // can evaluate symbolically. Enhancing that set will allows us to analyze
968 // more definitions.
969 return ValueLatticeElement::getRange(CR: LHSRange.castOp(CastOp: CI->getOpcode(),
970 BitWidth: ResultBitWidth));
971}
972
973std::optional<ValueLatticeElement>
974LazyValueInfoImpl::solveBlockValueBinaryOpImpl(
975 Instruction *I, BasicBlock *BB,
976 std::function<ConstantRange(const ConstantRange &, const ConstantRange &)>
977 OpFn) {
978 // Figure out the ranges of the operands. If that fails, use a
979 // conservative range, but apply the transfer rule anyways. This
980 // lets us pick up facts from expressions like "and i32 (call i32
981 // @foo()), 32"
982 std::optional<ConstantRange> LHSRes = getRangeFor(V: I->getOperand(i: 0), CxtI: I, BB);
983 if (!LHSRes)
984 return std::nullopt;
985
986 std::optional<ConstantRange> RHSRes = getRangeFor(V: I->getOperand(i: 1), CxtI: I, BB);
987 if (!RHSRes)
988 return std::nullopt;
989
990 const ConstantRange &LHSRange = *LHSRes;
991 const ConstantRange &RHSRange = *RHSRes;
992 return ValueLatticeElement::getRange(CR: OpFn(LHSRange, RHSRange));
993}
994
995std::optional<ValueLatticeElement>
996LazyValueInfoImpl::solveBlockValueBinaryOp(BinaryOperator *BO, BasicBlock *BB) {
997 assert(BO->getOperand(0)->getType()->isSized() &&
998 "all operands to binary operators are sized");
999 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(Val: BO)) {
1000 unsigned NoWrapKind = 0;
1001 if (OBO->hasNoUnsignedWrap())
1002 NoWrapKind |= OverflowingBinaryOperator::NoUnsignedWrap;
1003 if (OBO->hasNoSignedWrap())
1004 NoWrapKind |= OverflowingBinaryOperator::NoSignedWrap;
1005
1006 return solveBlockValueBinaryOpImpl(
1007 I: BO, BB,
1008 OpFn: [BO, NoWrapKind](const ConstantRange &CR1, const ConstantRange &CR2) {
1009 return CR1.overflowingBinaryOp(BinOp: BO->getOpcode(), Other: CR2, NoWrapKind);
1010 });
1011 }
1012
1013 return solveBlockValueBinaryOpImpl(
1014 I: BO, BB, OpFn: [BO](const ConstantRange &CR1, const ConstantRange &CR2) {
1015 return CR1.binaryOp(BinOp: BO->getOpcode(), Other: CR2);
1016 });
1017}
1018
1019std::optional<ValueLatticeElement>
1020LazyValueInfoImpl::solveBlockValueOverflowIntrinsic(WithOverflowInst *WO,
1021 BasicBlock *BB) {
1022 return solveBlockValueBinaryOpImpl(
1023 I: WO, BB, OpFn: [WO](const ConstantRange &CR1, const ConstantRange &CR2) {
1024 return CR1.binaryOp(BinOp: WO->getBinaryOp(), Other: CR2);
1025 });
1026}
1027
1028std::optional<ValueLatticeElement>
1029LazyValueInfoImpl::solveBlockValueIntrinsic(IntrinsicInst *II, BasicBlock *BB) {
1030 ValueLatticeElement MetadataVal = getFromRangeMetadata(BBI: II);
1031 if (!ConstantRange::isIntrinsicSupported(IntrinsicID: II->getIntrinsicID())) {
1032 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
1033 << "' - unknown intrinsic.\n");
1034 return MetadataVal;
1035 }
1036
1037 SmallVector<ConstantRange, 2> OpRanges;
1038 for (Value *Op : II->args()) {
1039 std::optional<ConstantRange> Range = getRangeFor(V: Op, CxtI: II, BB);
1040 if (!Range)
1041 return std::nullopt;
1042 OpRanges.push_back(Elt: *Range);
1043 }
1044
1045 return intersect(A: ValueLatticeElement::getRange(CR: ConstantRange::intrinsic(
1046 IntrinsicID: II->getIntrinsicID(), Ops: OpRanges)),
1047 B: MetadataVal);
1048}
1049
1050std::optional<ValueLatticeElement>
1051LazyValueInfoImpl::solveBlockValueExtractValue(ExtractValueInst *EVI,
1052 BasicBlock *BB) {
1053 if (auto *WO = dyn_cast<WithOverflowInst>(Val: EVI->getAggregateOperand()))
1054 if (EVI->getNumIndices() == 1 && *EVI->idx_begin() == 0)
1055 return solveBlockValueOverflowIntrinsic(WO, BB);
1056
1057 // Handle extractvalue of insertvalue to allow further simplification
1058 // based on replaced with.overflow intrinsics.
1059 if (Value *V = simplifyExtractValueInst(
1060 Agg: EVI->getAggregateOperand(), Idxs: EVI->getIndices(),
1061 Q: EVI->getModule()->getDataLayout()))
1062 return getBlockValue(Val: V, BB, CxtI: EVI);
1063
1064 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
1065 << "' - overdefined (unknown extractvalue).\n");
1066 return ValueLatticeElement::getOverdefined();
1067}
1068
1069static bool matchICmpOperand(APInt &Offset, Value *LHS, Value *Val,
1070 ICmpInst::Predicate Pred) {
1071 if (LHS == Val)
1072 return true;
1073
1074 // Handle range checking idiom produced by InstCombine. We will subtract the
1075 // offset from the allowed range for RHS in this case.
1076 const APInt *C;
1077 if (match(V: LHS, P: m_Add(L: m_Specific(V: Val), R: m_APInt(Res&: C)))) {
1078 Offset = *C;
1079 return true;
1080 }
1081
1082 // Handle the symmetric case. This appears in saturation patterns like
1083 // (x == 16) ? 16 : (x + 1).
1084 if (match(V: Val, P: m_Add(L: m_Specific(V: LHS), R: m_APInt(Res&: C)))) {
1085 Offset = -*C;
1086 return true;
1087 }
1088
1089 // If (x | y) < C, then (x < C) && (y < C).
1090 if (match(V: LHS, P: m_c_Or(L: m_Specific(V: Val), R: m_Value())) &&
1091 (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE))
1092 return true;
1093
1094 // If (x & y) > C, then (x > C) && (y > C).
1095 if (match(V: LHS, P: m_c_And(L: m_Specific(V: Val), R: m_Value())) &&
1096 (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE))
1097 return true;
1098
1099 return false;
1100}
1101
1102/// Get value range for a "(Val + Offset) Pred RHS" condition.
1103std::optional<ValueLatticeElement>
1104LazyValueInfoImpl::getValueFromSimpleICmpCondition(CmpInst::Predicate Pred,
1105 Value *RHS,
1106 const APInt &Offset,
1107 Instruction *CxtI,
1108 bool UseBlockValue) {
1109 ConstantRange RHSRange(RHS->getType()->getIntegerBitWidth(),
1110 /*isFullSet=*/true);
1111 if (ConstantInt *CI = dyn_cast<ConstantInt>(Val: RHS)) {
1112 RHSRange = ConstantRange(CI->getValue());
1113 } else if (UseBlockValue) {
1114 std::optional<ValueLatticeElement> R =
1115 getBlockValue(Val: RHS, BB: CxtI->getParent(), CxtI);
1116 if (!R)
1117 return std::nullopt;
1118 RHSRange = toConstantRange(Val: *R, Ty: RHS->getType());
1119 } else if (Instruction *I = dyn_cast<Instruction>(Val: RHS)) {
1120 if (auto *Ranges = I->getMetadata(KindID: LLVMContext::MD_range))
1121 RHSRange = getConstantRangeFromMetadata(RangeMD: *Ranges);
1122 }
1123
1124 ConstantRange TrueValues =
1125 ConstantRange::makeAllowedICmpRegion(Pred, Other: RHSRange);
1126 return ValueLatticeElement::getRange(CR: TrueValues.subtract(CI: Offset));
1127}
1128
1129static std::optional<ConstantRange>
1130getRangeViaSLT(CmpInst::Predicate Pred, APInt RHS,
1131 function_ref<std::optional<ConstantRange>(const APInt &)> Fn) {
1132 bool Invert = false;
1133 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) {
1134 Pred = ICmpInst::getInversePredicate(pred: Pred);
1135 Invert = true;
1136 }
1137 if (Pred == ICmpInst::ICMP_SLE) {
1138 Pred = ICmpInst::ICMP_SLT;
1139 if (RHS.isMaxSignedValue())
1140 return std::nullopt; // Could also return full/empty here, if we wanted.
1141 ++RHS;
1142 }
1143 assert(Pred == ICmpInst::ICMP_SLT && "Must be signed predicate");
1144 if (auto CR = Fn(RHS))
1145 return Invert ? CR->inverse() : CR;
1146 return std::nullopt;
1147}
1148
1149std::optional<ValueLatticeElement> LazyValueInfoImpl::getValueFromICmpCondition(
1150 Value *Val, ICmpInst *ICI, bool isTrueDest, bool UseBlockValue) {
1151 Value *LHS = ICI->getOperand(i_nocapture: 0);
1152 Value *RHS = ICI->getOperand(i_nocapture: 1);
1153
1154 // Get the predicate that must hold along the considered edge.
1155 CmpInst::Predicate EdgePred =
1156 isTrueDest ? ICI->getPredicate() : ICI->getInversePredicate();
1157
1158 if (isa<Constant>(Val: RHS)) {
1159 if (ICI->isEquality() && LHS == Val) {
1160 if (EdgePred == ICmpInst::ICMP_EQ)
1161 return ValueLatticeElement::get(C: cast<Constant>(Val: RHS));
1162 else if (!isa<UndefValue>(Val: RHS))
1163 return ValueLatticeElement::getNot(C: cast<Constant>(Val: RHS));
1164 }
1165 }
1166
1167 Type *Ty = Val->getType();
1168 if (!Ty->isIntegerTy())
1169 return ValueLatticeElement::getOverdefined();
1170
1171 unsigned BitWidth = Ty->getScalarSizeInBits();
1172 APInt Offset(BitWidth, 0);
1173 if (matchICmpOperand(Offset, LHS, Val, Pred: EdgePred))
1174 return getValueFromSimpleICmpCondition(Pred: EdgePred, RHS, Offset, CxtI: ICI,
1175 UseBlockValue);
1176
1177 CmpInst::Predicate SwappedPred = CmpInst::getSwappedPredicate(pred: EdgePred);
1178 if (matchICmpOperand(Offset, LHS: RHS, Val, Pred: SwappedPred))
1179 return getValueFromSimpleICmpCondition(Pred: SwappedPred, RHS: LHS, Offset, CxtI: ICI,
1180 UseBlockValue);
1181
1182 const APInt *Mask, *C;
1183 if (match(V: LHS, P: m_And(L: m_Specific(V: Val), R: m_APInt(Res&: Mask))) &&
1184 match(V: RHS, P: m_APInt(Res&: C))) {
1185 // If (Val & Mask) == C then all the masked bits are known and we can
1186 // compute a value range based on that.
1187 if (EdgePred == ICmpInst::ICMP_EQ) {
1188 KnownBits Known;
1189 Known.Zero = ~*C & *Mask;
1190 Known.One = *C & *Mask;
1191 return ValueLatticeElement::getRange(
1192 CR: ConstantRange::fromKnownBits(Known, /*IsSigned*/ false));
1193 }
1194 // If (Val & Mask) != 0 then the value must be larger than the lowest set
1195 // bit of Mask.
1196 if (EdgePred == ICmpInst::ICMP_NE && !Mask->isZero() && C->isZero()) {
1197 return ValueLatticeElement::getRange(CR: ConstantRange::getNonEmpty(
1198 Lower: APInt::getOneBitSet(numBits: BitWidth, BitNo: Mask->countr_zero()),
1199 Upper: APInt::getZero(numBits: BitWidth)));
1200 }
1201 }
1202
1203 // If (X urem Modulus) >= C, then X >= C.
1204 // If trunc X >= C, then X >= C.
1205 // TODO: An upper bound could be computed as well.
1206 if (match(V: LHS, P: m_CombineOr(L: m_URem(L: m_Specific(V: Val), R: m_Value()),
1207 R: m_Trunc(Op: m_Specific(V: Val)))) &&
1208 match(V: RHS, P: m_APInt(Res&: C))) {
1209 // Use the icmp region so we don't have to deal with different predicates.
1210 ConstantRange CR = ConstantRange::makeExactICmpRegion(Pred: EdgePred, Other: *C);
1211 if (!CR.isEmptySet())
1212 return ValueLatticeElement::getRange(CR: ConstantRange::getNonEmpty(
1213 Lower: CR.getUnsignedMin().zext(width: BitWidth), Upper: APInt(BitWidth, 0)));
1214 }
1215
1216 // Recognize:
1217 // icmp slt (ashr X, ShAmtC), C --> icmp slt X, C << ShAmtC
1218 // Preconditions: (C << ShAmtC) >> ShAmtC == C
1219 const APInt *ShAmtC;
1220 if (CmpInst::isSigned(predicate: EdgePred) &&
1221 match(V: LHS, P: m_AShr(L: m_Specific(V: Val), R: m_APInt(Res&: ShAmtC))) &&
1222 match(V: RHS, P: m_APInt(Res&: C))) {
1223 auto CR = getRangeViaSLT(
1224 Pred: EdgePred, RHS: *C, Fn: [&](const APInt &RHS) -> std::optional<ConstantRange> {
1225 APInt New = RHS << *ShAmtC;
1226 if ((New.ashr(ShiftAmt: *ShAmtC)) != RHS)
1227 return std::nullopt;
1228 return ConstantRange::getNonEmpty(
1229 Lower: APInt::getSignedMinValue(numBits: New.getBitWidth()), Upper: New);
1230 });
1231 if (CR)
1232 return ValueLatticeElement::getRange(CR: *CR);
1233 }
1234
1235 return ValueLatticeElement::getOverdefined();
1236}
1237
1238// Handle conditions of the form
1239// extractvalue(op.with.overflow(%x, C), 1).
1240static ValueLatticeElement getValueFromOverflowCondition(
1241 Value *Val, WithOverflowInst *WO, bool IsTrueDest) {
1242 // TODO: This only works with a constant RHS for now. We could also compute
1243 // the range of the RHS, but this doesn't fit into the current structure of
1244 // the edge value calculation.
1245 const APInt *C;
1246 if (WO->getLHS() != Val || !match(V: WO->getRHS(), P: m_APInt(Res&: C)))
1247 return ValueLatticeElement::getOverdefined();
1248
1249 // Calculate the possible values of %x for which no overflow occurs.
1250 ConstantRange NWR = ConstantRange::makeExactNoWrapRegion(
1251 BinOp: WO->getBinaryOp(), Other: *C, NoWrapKind: WO->getNoWrapKind());
1252
1253 // If overflow is false, %x is constrained to NWR. If overflow is true, %x is
1254 // constrained to it's inverse (all values that might cause overflow).
1255 if (IsTrueDest)
1256 NWR = NWR.inverse();
1257 return ValueLatticeElement::getRange(CR: NWR);
1258}
1259
1260std::optional<ValueLatticeElement>
1261LazyValueInfoImpl::getValueFromCondition(Value *Val, Value *Cond,
1262 bool IsTrueDest, bool UseBlockValue,
1263 unsigned Depth) {
1264 if (ICmpInst *ICI = dyn_cast<ICmpInst>(Val: Cond))
1265 return getValueFromICmpCondition(Val, ICI, isTrueDest: IsTrueDest, UseBlockValue);
1266
1267 if (auto *EVI = dyn_cast<ExtractValueInst>(Val: Cond))
1268 if (auto *WO = dyn_cast<WithOverflowInst>(Val: EVI->getAggregateOperand()))
1269 if (EVI->getNumIndices() == 1 && *EVI->idx_begin() == 1)
1270 return getValueFromOverflowCondition(Val, WO, IsTrueDest);
1271
1272 if (++Depth == MaxAnalysisRecursionDepth)
1273 return ValueLatticeElement::getOverdefined();
1274
1275 Value *N;
1276 if (match(V: Cond, P: m_Not(V: m_Value(V&: N))))
1277 return getValueFromCondition(Val, Cond: N, IsTrueDest: !IsTrueDest, UseBlockValue, Depth);
1278
1279 Value *L, *R;
1280 bool IsAnd;
1281 if (match(V: Cond, P: m_LogicalAnd(L: m_Value(V&: L), R: m_Value(V&: R))))
1282 IsAnd = true;
1283 else if (match(V: Cond, P: m_LogicalOr(L: m_Value(V&: L), R: m_Value(V&: R))))
1284 IsAnd = false;
1285 else
1286 return ValueLatticeElement::getOverdefined();
1287
1288 std::optional<ValueLatticeElement> LV =
1289 getValueFromCondition(Val, Cond: L, IsTrueDest, UseBlockValue, Depth);
1290 if (!LV)
1291 return std::nullopt;
1292 std::optional<ValueLatticeElement> RV =
1293 getValueFromCondition(Val, Cond: R, IsTrueDest, UseBlockValue, Depth);
1294 if (!RV)
1295 return std::nullopt;
1296
1297 // if (L && R) -> intersect L and R
1298 // if (!(L || R)) -> intersect !L and !R
1299 // if (L || R) -> union L and R
1300 // if (!(L && R)) -> union !L and !R
1301 if (IsTrueDest ^ IsAnd) {
1302 LV->mergeIn(RHS: *RV);
1303 return *LV;
1304 }
1305
1306 return intersect(A: *LV, B: *RV);
1307}
1308
1309// Return true if Usr has Op as an operand, otherwise false.
1310static bool usesOperand(User *Usr, Value *Op) {
1311 return is_contained(Range: Usr->operands(), Element: Op);
1312}
1313
1314// Return true if the instruction type of Val is supported by
1315// constantFoldUser(). Currently CastInst, BinaryOperator and FreezeInst only.
1316// Call this before calling constantFoldUser() to find out if it's even worth
1317// attempting to call it.
1318static bool isOperationFoldable(User *Usr) {
1319 return isa<CastInst>(Val: Usr) || isa<BinaryOperator>(Val: Usr) || isa<FreezeInst>(Val: Usr);
1320}
1321
1322// Check if Usr can be simplified to an integer constant when the value of one
1323// of its operands Op is an integer constant OpConstVal. If so, return it as an
1324// lattice value range with a single element or otherwise return an overdefined
1325// lattice value.
1326static ValueLatticeElement constantFoldUser(User *Usr, Value *Op,
1327 const APInt &OpConstVal,
1328 const DataLayout &DL) {
1329 assert(isOperationFoldable(Usr) && "Precondition");
1330 Constant* OpConst = Constant::getIntegerValue(Ty: Op->getType(), V: OpConstVal);
1331 // Check if Usr can be simplified to a constant.
1332 if (auto *CI = dyn_cast<CastInst>(Val: Usr)) {
1333 assert(CI->getOperand(0) == Op && "Operand 0 isn't Op");
1334 if (auto *C = dyn_cast_or_null<ConstantInt>(
1335 Val: simplifyCastInst(CastOpc: CI->getOpcode(), Op: OpConst,
1336 Ty: CI->getDestTy(), Q: DL))) {
1337 return ValueLatticeElement::getRange(CR: ConstantRange(C->getValue()));
1338 }
1339 } else if (auto *BO = dyn_cast<BinaryOperator>(Val: Usr)) {
1340 bool Op0Match = BO->getOperand(i_nocapture: 0) == Op;
1341 bool Op1Match = BO->getOperand(i_nocapture: 1) == Op;
1342 assert((Op0Match || Op1Match) &&
1343 "Operand 0 nor Operand 1 isn't a match");
1344 Value *LHS = Op0Match ? OpConst : BO->getOperand(i_nocapture: 0);
1345 Value *RHS = Op1Match ? OpConst : BO->getOperand(i_nocapture: 1);
1346 if (auto *C = dyn_cast_or_null<ConstantInt>(
1347 Val: simplifyBinOp(Opcode: BO->getOpcode(), LHS, RHS, Q: DL))) {
1348 return ValueLatticeElement::getRange(CR: ConstantRange(C->getValue()));
1349 }
1350 } else if (isa<FreezeInst>(Val: Usr)) {
1351 assert(cast<FreezeInst>(Usr)->getOperand(0) == Op && "Operand 0 isn't Op");
1352 return ValueLatticeElement::getRange(CR: ConstantRange(OpConstVal));
1353 }
1354 return ValueLatticeElement::getOverdefined();
1355}
1356
1357/// Compute the value of Val on the edge BBFrom -> BBTo.
1358std::optional<ValueLatticeElement>
1359LazyValueInfoImpl::getEdgeValueLocal(Value *Val, BasicBlock *BBFrom,
1360 BasicBlock *BBTo, bool UseBlockValue) {
1361 // TODO: Handle more complex conditionals. If (v == 0 || v2 < 1) is false, we
1362 // know that v != 0.
1363 if (BranchInst *BI = dyn_cast<BranchInst>(Val: BBFrom->getTerminator())) {
1364 // If this is a conditional branch and only one successor goes to BBTo, then
1365 // we may be able to infer something from the condition.
1366 if (BI->isConditional() &&
1367 BI->getSuccessor(i: 0) != BI->getSuccessor(i: 1)) {
1368 bool isTrueDest = BI->getSuccessor(i: 0) == BBTo;
1369 assert(BI->getSuccessor(!isTrueDest) == BBTo &&
1370 "BBTo isn't a successor of BBFrom");
1371 Value *Condition = BI->getCondition();
1372
1373 // If V is the condition of the branch itself, then we know exactly what
1374 // it is.
1375 if (Condition == Val)
1376 return ValueLatticeElement::get(C: ConstantInt::get(
1377 Ty: Type::getInt1Ty(C&: Val->getContext()), V: isTrueDest));
1378
1379 // If the condition of the branch is an equality comparison, we may be
1380 // able to infer the value.
1381 std::optional<ValueLatticeElement> Result =
1382 getValueFromCondition(Val, Cond: Condition, IsTrueDest: isTrueDest, UseBlockValue);
1383 if (!Result)
1384 return std::nullopt;
1385
1386 if (!Result->isOverdefined())
1387 return Result;
1388
1389 if (User *Usr = dyn_cast<User>(Val)) {
1390 assert(Result->isOverdefined() && "Result isn't overdefined");
1391 // Check with isOperationFoldable() first to avoid linearly iterating
1392 // over the operands unnecessarily which can be expensive for
1393 // instructions with many operands.
1394 if (isa<IntegerType>(Val: Usr->getType()) && isOperationFoldable(Usr)) {
1395 const DataLayout &DL = BBTo->getModule()->getDataLayout();
1396 if (usesOperand(Usr, Op: Condition)) {
1397 // If Val has Condition as an operand and Val can be folded into a
1398 // constant with either Condition == true or Condition == false,
1399 // propagate the constant.
1400 // eg.
1401 // ; %Val is true on the edge to %then.
1402 // %Val = and i1 %Condition, true.
1403 // br %Condition, label %then, label %else
1404 APInt ConditionVal(1, isTrueDest ? 1 : 0);
1405 Result = constantFoldUser(Usr, Op: Condition, OpConstVal: ConditionVal, DL);
1406 } else {
1407 // If one of Val's operand has an inferred value, we may be able to
1408 // infer the value of Val.
1409 // eg.
1410 // ; %Val is 94 on the edge to %then.
1411 // %Val = add i8 %Op, 1
1412 // %Condition = icmp eq i8 %Op, 93
1413 // br i1 %Condition, label %then, label %else
1414 for (unsigned i = 0; i < Usr->getNumOperands(); ++i) {
1415 Value *Op = Usr->getOperand(i);
1416 ValueLatticeElement OpLatticeVal = *getValueFromCondition(
1417 Val: Op, Cond: Condition, IsTrueDest: isTrueDest, /*UseBlockValue*/ false);
1418 if (std::optional<APInt> OpConst =
1419 OpLatticeVal.asConstantInteger()) {
1420 Result = constantFoldUser(Usr, Op, OpConstVal: *OpConst, DL);
1421 break;
1422 }
1423 }
1424 }
1425 }
1426 }
1427 if (!Result->isOverdefined())
1428 return Result;
1429 }
1430 }
1431
1432 // If the edge was formed by a switch on the value, then we may know exactly
1433 // what it is.
1434 if (SwitchInst *SI = dyn_cast<SwitchInst>(Val: BBFrom->getTerminator())) {
1435 Value *Condition = SI->getCondition();
1436 if (!isa<IntegerType>(Val: Val->getType()))
1437 return ValueLatticeElement::getOverdefined();
1438 bool ValUsesConditionAndMayBeFoldable = false;
1439 if (Condition != Val) {
1440 // Check if Val has Condition as an operand.
1441 if (User *Usr = dyn_cast<User>(Val))
1442 ValUsesConditionAndMayBeFoldable = isOperationFoldable(Usr) &&
1443 usesOperand(Usr, Op: Condition);
1444 if (!ValUsesConditionAndMayBeFoldable)
1445 return ValueLatticeElement::getOverdefined();
1446 }
1447 assert((Condition == Val || ValUsesConditionAndMayBeFoldable) &&
1448 "Condition != Val nor Val doesn't use Condition");
1449
1450 bool DefaultCase = SI->getDefaultDest() == BBTo;
1451 unsigned BitWidth = Val->getType()->getIntegerBitWidth();
1452 ConstantRange EdgesVals(BitWidth, DefaultCase/*isFullSet*/);
1453
1454 for (auto Case : SI->cases()) {
1455 APInt CaseValue = Case.getCaseValue()->getValue();
1456 ConstantRange EdgeVal(CaseValue);
1457 if (ValUsesConditionAndMayBeFoldable) {
1458 User *Usr = cast<User>(Val);
1459 const DataLayout &DL = BBTo->getModule()->getDataLayout();
1460 ValueLatticeElement EdgeLatticeVal =
1461 constantFoldUser(Usr, Op: Condition, OpConstVal: CaseValue, DL);
1462 if (EdgeLatticeVal.isOverdefined())
1463 return ValueLatticeElement::getOverdefined();
1464 EdgeVal = EdgeLatticeVal.getConstantRange();
1465 }
1466 if (DefaultCase) {
1467 // It is possible that the default destination is the destination of
1468 // some cases. We cannot perform difference for those cases.
1469 // We know Condition != CaseValue in BBTo. In some cases we can use
1470 // this to infer Val == f(Condition) is != f(CaseValue). For now, we
1471 // only do this when f is identity (i.e. Val == Condition), but we
1472 // should be able to do this for any injective f.
1473 if (Case.getCaseSuccessor() != BBTo && Condition == Val)
1474 EdgesVals = EdgesVals.difference(CR: EdgeVal);
1475 } else if (Case.getCaseSuccessor() == BBTo)
1476 EdgesVals = EdgesVals.unionWith(CR: EdgeVal);
1477 }
1478 return ValueLatticeElement::getRange(CR: std::move(EdgesVals));
1479 }
1480 return ValueLatticeElement::getOverdefined();
1481}
1482
1483/// Compute the value of Val on the edge BBFrom -> BBTo or the value at
1484/// the basic block if the edge does not constrain Val.
1485std::optional<ValueLatticeElement>
1486LazyValueInfoImpl::getEdgeValue(Value *Val, BasicBlock *BBFrom,
1487 BasicBlock *BBTo, Instruction *CxtI) {
1488 // If already a constant, there is nothing to compute.
1489 if (Constant *VC = dyn_cast<Constant>(Val))
1490 return ValueLatticeElement::get(C: VC);
1491
1492 std::optional<ValueLatticeElement> LocalResult =
1493 getEdgeValueLocal(Val, BBFrom, BBTo, /*UseBlockValue*/ true);
1494 if (!LocalResult)
1495 return std::nullopt;
1496
1497 if (hasSingleValue(Val: *LocalResult))
1498 // Can't get any more precise here
1499 return LocalResult;
1500
1501 std::optional<ValueLatticeElement> OptInBlock =
1502 getBlockValue(Val, BB: BBFrom, CxtI: BBFrom->getTerminator());
1503 if (!OptInBlock)
1504 return std::nullopt;
1505 ValueLatticeElement &InBlock = *OptInBlock;
1506
1507 // We can use the context instruction (generically the ultimate instruction
1508 // the calling pass is trying to simplify) here, even though the result of
1509 // this function is generally cached when called from the solve* functions
1510 // (and that cached result might be used with queries using a different
1511 // context instruction), because when this function is called from the solve*
1512 // functions, the context instruction is not provided. When called from
1513 // LazyValueInfoImpl::getValueOnEdge, the context instruction is provided,
1514 // but then the result is not cached.
1515 intersectAssumeOrGuardBlockValueConstantRange(Val, BBLV&: InBlock, BBI: CxtI);
1516
1517 return intersect(A: *LocalResult, B: InBlock);
1518}
1519
1520ValueLatticeElement LazyValueInfoImpl::getValueInBlock(Value *V, BasicBlock *BB,
1521 Instruction *CxtI) {
1522 LLVM_DEBUG(dbgs() << "LVI Getting block end value " << *V << " at '"
1523 << BB->getName() << "'\n");
1524
1525 assert(BlockValueStack.empty() && BlockValueSet.empty());
1526 std::optional<ValueLatticeElement> OptResult = getBlockValue(Val: V, BB, CxtI);
1527 if (!OptResult) {
1528 solve();
1529 OptResult = getBlockValue(Val: V, BB, CxtI);
1530 assert(OptResult && "Value not available after solving");
1531 }
1532
1533 ValueLatticeElement Result = *OptResult;
1534 LLVM_DEBUG(dbgs() << " Result = " << Result << "\n");
1535 return Result;
1536}
1537
1538ValueLatticeElement LazyValueInfoImpl::getValueAt(Value *V, Instruction *CxtI) {
1539 LLVM_DEBUG(dbgs() << "LVI Getting value " << *V << " at '" << CxtI->getName()
1540 << "'\n");
1541
1542 if (auto *C = dyn_cast<Constant>(Val: V))
1543 return ValueLatticeElement::get(C);
1544
1545 ValueLatticeElement Result = ValueLatticeElement::getOverdefined();
1546 if (auto *I = dyn_cast<Instruction>(Val: V))
1547 Result = getFromRangeMetadata(BBI: I);
1548 intersectAssumeOrGuardBlockValueConstantRange(Val: V, BBLV&: Result, BBI: CxtI);
1549
1550 LLVM_DEBUG(dbgs() << " Result = " << Result << "\n");
1551 return Result;
1552}
1553
1554ValueLatticeElement LazyValueInfoImpl::
1555getValueOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB,
1556 Instruction *CxtI) {
1557 LLVM_DEBUG(dbgs() << "LVI Getting edge value " << *V << " from '"
1558 << FromBB->getName() << "' to '" << ToBB->getName()
1559 << "'\n");
1560
1561 std::optional<ValueLatticeElement> Result =
1562 getEdgeValue(Val: V, BBFrom: FromBB, BBTo: ToBB, CxtI);
1563 while (!Result) {
1564 // As the worklist only explicitly tracks block values (but not edge values)
1565 // we may have to call solve() multiple times, as the edge value calculation
1566 // may request additional block values.
1567 solve();
1568 Result = getEdgeValue(Val: V, BBFrom: FromBB, BBTo: ToBB, CxtI);
1569 }
1570
1571 LLVM_DEBUG(dbgs() << " Result = " << *Result << "\n");
1572 return *Result;
1573}
1574
1575ValueLatticeElement LazyValueInfoImpl::getValueAtUse(const Use &U) {
1576 Value *V = U.get();
1577 auto *CxtI = cast<Instruction>(Val: U.getUser());
1578 ValueLatticeElement VL = getValueInBlock(V, BB: CxtI->getParent(), CxtI);
1579
1580 // Check whether the only (possibly transitive) use of the value is in a
1581 // position where V can be constrained by a select or branch condition.
1582 const Use *CurrU = &U;
1583 // TODO: Increase limit?
1584 const unsigned MaxUsesToInspect = 3;
1585 for (unsigned I = 0; I < MaxUsesToInspect; ++I) {
1586 std::optional<ValueLatticeElement> CondVal;
1587 auto *CurrI = cast<Instruction>(Val: CurrU->getUser());
1588 if (auto *SI = dyn_cast<SelectInst>(Val: CurrI)) {
1589 // If the value is undef, a different value may be chosen in
1590 // the select condition and at use.
1591 if (!isGuaranteedNotToBeUndef(V: SI->getCondition(), AC))
1592 break;
1593 if (CurrU->getOperandNo() == 1)
1594 CondVal =
1595 *getValueFromCondition(Val: V, Cond: SI->getCondition(), /*IsTrueDest*/ true,
1596 /*UseBlockValue*/ false);
1597 else if (CurrU->getOperandNo() == 2)
1598 CondVal =
1599 *getValueFromCondition(Val: V, Cond: SI->getCondition(), /*IsTrueDest*/ false,
1600 /*UseBlockValue*/ false);
1601 } else if (auto *PHI = dyn_cast<PHINode>(Val: CurrI)) {
1602 // TODO: Use non-local query?
1603 CondVal = *getEdgeValueLocal(Val: V, BBFrom: PHI->getIncomingBlock(U: *CurrU),
1604 BBTo: PHI->getParent(), /*UseBlockValue*/ false);
1605 }
1606 if (CondVal)
1607 VL = intersect(A: VL, B: *CondVal);
1608
1609 // Only follow one-use chain, to allow direct intersection of conditions.
1610 // If there are multiple uses, we would have to intersect with the union of
1611 // all conditions at different uses.
1612 // Stop walking if we hit a non-speculatable instruction. Even if the
1613 // result is only used under a specific condition, executing the
1614 // instruction itself may cause side effects or UB already.
1615 // This also disallows looking through phi nodes: If the phi node is part
1616 // of a cycle, we might end up reasoning about values from different cycle
1617 // iterations (PR60629).
1618 if (!CurrI->hasOneUse() || !isSafeToSpeculativelyExecute(I: CurrI))
1619 break;
1620 CurrU = &*CurrI->use_begin();
1621 }
1622 return VL;
1623}
1624
1625void LazyValueInfoImpl::threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc,
1626 BasicBlock *NewSucc) {
1627 TheCache.threadEdgeImpl(OldSucc, NewSucc);
1628}
1629
1630//===----------------------------------------------------------------------===//
1631// LazyValueInfo Impl
1632//===----------------------------------------------------------------------===//
1633
1634bool LazyValueInfoWrapperPass::runOnFunction(Function &F) {
1635 Info.AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1636
1637 if (auto *Impl = Info.getImpl())
1638 Impl->clear();
1639
1640 // Fully lazy.
1641 return false;
1642}
1643
1644void LazyValueInfoWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
1645 AU.setPreservesAll();
1646 AU.addRequired<AssumptionCacheTracker>();
1647 AU.addRequired<TargetLibraryInfoWrapperPass>();
1648}
1649
1650LazyValueInfo &LazyValueInfoWrapperPass::getLVI() { return Info; }
1651
1652/// This lazily constructs the LazyValueInfoImpl.
1653LazyValueInfoImpl &LazyValueInfo::getOrCreateImpl(const Module *M) {
1654 if (!PImpl) {
1655 assert(M && "getCache() called with a null Module");
1656 const DataLayout &DL = M->getDataLayout();
1657 Function *GuardDecl =
1658 M->getFunction(Name: Intrinsic::getName(Intrinsic::experimental_guard));
1659 PImpl = new LazyValueInfoImpl(AC, DL, GuardDecl);
1660 }
1661 return *static_cast<LazyValueInfoImpl *>(PImpl);
1662}
1663
1664LazyValueInfoImpl *LazyValueInfo::getImpl() {
1665 if (!PImpl)
1666 return nullptr;
1667 return static_cast<LazyValueInfoImpl *>(PImpl);
1668}
1669
1670LazyValueInfo::~LazyValueInfo() { releaseMemory(); }
1671
1672void LazyValueInfo::releaseMemory() {
1673 // If the cache was allocated, free it.
1674 if (auto *Impl = getImpl()) {
1675 delete &*Impl;
1676 PImpl = nullptr;
1677 }
1678}
1679
1680bool LazyValueInfo::invalidate(Function &F, const PreservedAnalyses &PA,
1681 FunctionAnalysisManager::Invalidator &Inv) {
1682 // We need to invalidate if we have either failed to preserve this analyses
1683 // result directly or if any of its dependencies have been invalidated.
1684 auto PAC = PA.getChecker<LazyValueAnalysis>();
1685 if (!(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()))
1686 return true;
1687
1688 return false;
1689}
1690
1691void LazyValueInfoWrapperPass::releaseMemory() { Info.releaseMemory(); }
1692
1693LazyValueInfo LazyValueAnalysis::run(Function &F,
1694 FunctionAnalysisManager &FAM) {
1695 auto &AC = FAM.getResult<AssumptionAnalysis>(IR&: F);
1696
1697 return LazyValueInfo(&AC, &F.getParent()->getDataLayout());
1698}
1699
1700/// Returns true if we can statically tell that this value will never be a
1701/// "useful" constant. In practice, this means we've got something like an
1702/// alloca or a malloc call for which a comparison against a constant can
1703/// only be guarding dead code. Note that we are potentially giving up some
1704/// precision in dead code (a constant result) in favour of avoiding a
1705/// expensive search for a easily answered common query.
1706static bool isKnownNonConstant(Value *V) {
1707 V = V->stripPointerCasts();
1708 // The return val of alloc cannot be a Constant.
1709 if (isa<AllocaInst>(Val: V))
1710 return true;
1711 return false;
1712}
1713
1714Constant *LazyValueInfo::getConstant(Value *V, Instruction *CxtI) {
1715 // Bail out early if V is known not to be a Constant.
1716 if (isKnownNonConstant(V))
1717 return nullptr;
1718
1719 BasicBlock *BB = CxtI->getParent();
1720 ValueLatticeElement Result =
1721 getOrCreateImpl(M: BB->getModule()).getValueInBlock(V, BB, CxtI);
1722
1723 if (Result.isConstant())
1724 return Result.getConstant();
1725 if (Result.isConstantRange()) {
1726 const ConstantRange &CR = Result.getConstantRange();
1727 if (const APInt *SingleVal = CR.getSingleElement())
1728 return ConstantInt::get(Context&: V->getContext(), V: *SingleVal);
1729 }
1730 return nullptr;
1731}
1732
1733ConstantRange LazyValueInfo::getConstantRange(Value *V, Instruction *CxtI,
1734 bool UndefAllowed) {
1735 assert(V->getType()->isIntegerTy());
1736 BasicBlock *BB = CxtI->getParent();
1737 ValueLatticeElement Result =
1738 getOrCreateImpl(M: BB->getModule()).getValueInBlock(V, BB, CxtI);
1739 return toConstantRange(Val: Result, Ty: V->getType(), UndefAllowed);
1740}
1741
1742ConstantRange LazyValueInfo::getConstantRangeAtUse(const Use &U,
1743 bool UndefAllowed) {
1744 auto *Inst = cast<Instruction>(Val: U.getUser());
1745 ValueLatticeElement Result =
1746 getOrCreateImpl(M: Inst->getModule()).getValueAtUse(U);
1747 return toConstantRange(Val: Result, Ty: U->getType(), UndefAllowed);
1748}
1749
1750/// Determine whether the specified value is known to be a
1751/// constant on the specified edge. Return null if not.
1752Constant *LazyValueInfo::getConstantOnEdge(Value *V, BasicBlock *FromBB,
1753 BasicBlock *ToBB,
1754 Instruction *CxtI) {
1755 Module *M = FromBB->getModule();
1756 ValueLatticeElement Result =
1757 getOrCreateImpl(M).getValueOnEdge(V, FromBB, ToBB, CxtI);
1758
1759 if (Result.isConstant())
1760 return Result.getConstant();
1761 if (Result.isConstantRange()) {
1762 const ConstantRange &CR = Result.getConstantRange();
1763 if (const APInt *SingleVal = CR.getSingleElement())
1764 return ConstantInt::get(Context&: V->getContext(), V: *SingleVal);
1765 }
1766 return nullptr;
1767}
1768
1769ConstantRange LazyValueInfo::getConstantRangeOnEdge(Value *V,
1770 BasicBlock *FromBB,
1771 BasicBlock *ToBB,
1772 Instruction *CxtI) {
1773 Module *M = FromBB->getModule();
1774 ValueLatticeElement Result =
1775 getOrCreateImpl(M).getValueOnEdge(V, FromBB, ToBB, CxtI);
1776 // TODO: Should undef be allowed here?
1777 return toConstantRange(Val: Result, Ty: V->getType(), /*UndefAllowed*/ true);
1778}
1779
1780static LazyValueInfo::Tristate
1781getPredicateResult(unsigned Pred, Constant *C, const ValueLatticeElement &Val,
1782 const DataLayout &DL) {
1783 // If we know the value is a constant, evaluate the conditional.
1784 Constant *Res = nullptr;
1785 if (Val.isConstant()) {
1786 Res = ConstantFoldCompareInstOperands(Predicate: Pred, LHS: Val.getConstant(), RHS: C, DL);
1787 if (ConstantInt *ResCI = dyn_cast_or_null<ConstantInt>(Val: Res))
1788 return ResCI->isZero() ? LazyValueInfo::False : LazyValueInfo::True;
1789 return LazyValueInfo::Unknown;
1790 }
1791
1792 if (Val.isConstantRange()) {
1793 ConstantInt *CI = dyn_cast<ConstantInt>(Val: C);
1794 if (!CI) return LazyValueInfo::Unknown;
1795
1796 const ConstantRange &CR = Val.getConstantRange();
1797 if (Pred == ICmpInst::ICMP_EQ) {
1798 if (!CR.contains(Val: CI->getValue()))
1799 return LazyValueInfo::False;
1800
1801 if (CR.isSingleElement())
1802 return LazyValueInfo::True;
1803 } else if (Pred == ICmpInst::ICMP_NE) {
1804 if (!CR.contains(Val: CI->getValue()))
1805 return LazyValueInfo::True;
1806
1807 if (CR.isSingleElement())
1808 return LazyValueInfo::False;
1809 } else {
1810 // Handle more complex predicates.
1811 ConstantRange TrueValues = ConstantRange::makeExactICmpRegion(
1812 Pred: (ICmpInst::Predicate)Pred, Other: CI->getValue());
1813 if (TrueValues.contains(CR))
1814 return LazyValueInfo::True;
1815 if (TrueValues.inverse().contains(CR))
1816 return LazyValueInfo::False;
1817 }
1818 return LazyValueInfo::Unknown;
1819 }
1820
1821 if (Val.isNotConstant()) {
1822 // If this is an equality comparison, we can try to fold it knowing that
1823 // "V != C1".
1824 if (Pred == ICmpInst::ICMP_EQ) {
1825 // !C1 == C -> false iff C1 == C.
1826 Res = ConstantFoldCompareInstOperands(Predicate: ICmpInst::ICMP_NE,
1827 LHS: Val.getNotConstant(), RHS: C, DL);
1828 if (Res && Res->isNullValue())
1829 return LazyValueInfo::False;
1830 } else if (Pred == ICmpInst::ICMP_NE) {
1831 // !C1 != C -> true iff C1 == C.
1832 Res = ConstantFoldCompareInstOperands(Predicate: ICmpInst::ICMP_NE,
1833 LHS: Val.getNotConstant(), RHS: C, DL);
1834 if (Res && Res->isNullValue())
1835 return LazyValueInfo::True;
1836 }
1837 return LazyValueInfo::Unknown;
1838 }
1839
1840 return LazyValueInfo::Unknown;
1841}
1842
1843/// Determine whether the specified value comparison with a constant is known to
1844/// be true or false on the specified CFG edge. Pred is a CmpInst predicate.
1845LazyValueInfo::Tristate
1846LazyValueInfo::getPredicateOnEdge(unsigned Pred, Value *V, Constant *C,
1847 BasicBlock *FromBB, BasicBlock *ToBB,
1848 Instruction *CxtI) {
1849 Module *M = FromBB->getModule();
1850 ValueLatticeElement Result =
1851 getOrCreateImpl(M).getValueOnEdge(V, FromBB, ToBB, CxtI);
1852
1853 return getPredicateResult(Pred, C, Val: Result, DL: M->getDataLayout());
1854}
1855
1856LazyValueInfo::Tristate
1857LazyValueInfo::getPredicateAt(unsigned Pred, Value *V, Constant *C,
1858 Instruction *CxtI, bool UseBlockValue) {
1859 // Is or is not NonNull are common predicates being queried. If
1860 // isKnownNonZero can tell us the result of the predicate, we can
1861 // return it quickly. But this is only a fastpath, and falling
1862 // through would still be correct.
1863 Module *M = CxtI->getModule();
1864 const DataLayout &DL = M->getDataLayout();
1865 if (V->getType()->isPointerTy() && C->isNullValue() &&
1866 isKnownNonZero(V: V->stripPointerCastsSameRepresentation(), DL)) {
1867 if (Pred == ICmpInst::ICMP_EQ)
1868 return LazyValueInfo::False;
1869 else if (Pred == ICmpInst::ICMP_NE)
1870 return LazyValueInfo::True;
1871 }
1872
1873 auto &Impl = getOrCreateImpl(M);
1874 ValueLatticeElement Result =
1875 UseBlockValue ? Impl.getValueInBlock(V, BB: CxtI->getParent(), CxtI)
1876 : Impl.getValueAt(V, CxtI);
1877 Tristate Ret = getPredicateResult(Pred, C, Val: Result, DL);
1878 if (Ret != Unknown)
1879 return Ret;
1880
1881 // Note: The following bit of code is somewhat distinct from the rest of LVI;
1882 // LVI as a whole tries to compute a lattice value which is conservatively
1883 // correct at a given location. In this case, we have a predicate which we
1884 // weren't able to prove about the merged result, and we're pushing that
1885 // predicate back along each incoming edge to see if we can prove it
1886 // separately for each input. As a motivating example, consider:
1887 // bb1:
1888 // %v1 = ... ; constantrange<1, 5>
1889 // br label %merge
1890 // bb2:
1891 // %v2 = ... ; constantrange<10, 20>
1892 // br label %merge
1893 // merge:
1894 // %phi = phi [%v1, %v2] ; constantrange<1,20>
1895 // %pred = icmp eq i32 %phi, 8
1896 // We can't tell from the lattice value for '%phi' that '%pred' is false
1897 // along each path, but by checking the predicate over each input separately,
1898 // we can.
1899 // We limit the search to one step backwards from the current BB and value.
1900 // We could consider extending this to search further backwards through the
1901 // CFG and/or value graph, but there are non-obvious compile time vs quality
1902 // tradeoffs.
1903 BasicBlock *BB = CxtI->getParent();
1904
1905 // Function entry or an unreachable block. Bail to avoid confusing
1906 // analysis below.
1907 pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
1908 if (PI == PE)
1909 return Unknown;
1910
1911 // If V is a PHI node in the same block as the context, we need to ask
1912 // questions about the predicate as applied to the incoming value along
1913 // each edge. This is useful for eliminating cases where the predicate is
1914 // known along all incoming edges.
1915 if (auto *PHI = dyn_cast<PHINode>(Val: V))
1916 if (PHI->getParent() == BB) {
1917 Tristate Baseline = Unknown;
1918 for (unsigned i = 0, e = PHI->getNumIncomingValues(); i < e; i++) {
1919 Value *Incoming = PHI->getIncomingValue(i);
1920 BasicBlock *PredBB = PHI->getIncomingBlock(i);
1921 // Note that PredBB may be BB itself.
1922 Tristate Result =
1923 getPredicateOnEdge(Pred, V: Incoming, C, FromBB: PredBB, ToBB: BB, CxtI);
1924
1925 // Keep going as long as we've seen a consistent known result for
1926 // all inputs.
1927 Baseline = (i == 0) ? Result /* First iteration */
1928 : (Baseline == Result ? Baseline
1929 : Unknown); /* All others */
1930 if (Baseline == Unknown)
1931 break;
1932 }
1933 if (Baseline != Unknown)
1934 return Baseline;
1935 }
1936
1937 // For a comparison where the V is outside this block, it's possible
1938 // that we've branched on it before. Look to see if the value is known
1939 // on all incoming edges.
1940 if (!isa<Instruction>(Val: V) || cast<Instruction>(Val: V)->getParent() != BB) {
1941 // For predecessor edge, determine if the comparison is true or false
1942 // on that edge. If they're all true or all false, we can conclude
1943 // the value of the comparison in this block.
1944 Tristate Baseline = getPredicateOnEdge(Pred, V, C, FromBB: *PI, ToBB: BB, CxtI);
1945 if (Baseline != Unknown) {
1946 // Check that all remaining incoming values match the first one.
1947 while (++PI != PE) {
1948 Tristate Ret = getPredicateOnEdge(Pred, V, C, FromBB: *PI, ToBB: BB, CxtI);
1949 if (Ret != Baseline)
1950 break;
1951 }
1952 // If we terminated early, then one of the values didn't match.
1953 if (PI == PE) {
1954 return Baseline;
1955 }
1956 }
1957 }
1958
1959 return Unknown;
1960}
1961
1962LazyValueInfo::Tristate LazyValueInfo::getPredicateAt(unsigned P, Value *LHS,
1963 Value *RHS,
1964 Instruction *CxtI,
1965 bool UseBlockValue) {
1966 CmpInst::Predicate Pred = (CmpInst::Predicate)P;
1967
1968 if (auto *C = dyn_cast<Constant>(Val: RHS))
1969 return getPredicateAt(Pred: P, V: LHS, C, CxtI, UseBlockValue);
1970 if (auto *C = dyn_cast<Constant>(Val: LHS))
1971 return getPredicateAt(Pred: CmpInst::getSwappedPredicate(pred: Pred), V: RHS, C, CxtI,
1972 UseBlockValue);
1973
1974 // Got two non-Constant values. Try to determine the comparison results based
1975 // on the block values of the two operands, e.g. because they have
1976 // non-overlapping ranges.
1977 if (UseBlockValue) {
1978 Module *M = CxtI->getModule();
1979 ValueLatticeElement L =
1980 getOrCreateImpl(M).getValueInBlock(V: LHS, BB: CxtI->getParent(), CxtI);
1981 if (L.isOverdefined())
1982 return LazyValueInfo::Unknown;
1983
1984 ValueLatticeElement R =
1985 getOrCreateImpl(M).getValueInBlock(V: RHS, BB: CxtI->getParent(), CxtI);
1986 Type *Ty = CmpInst::makeCmpResultType(opnd_type: LHS->getType());
1987 if (Constant *Res = L.getCompare(Pred: (CmpInst::Predicate)P, Ty, Other: R,
1988 DL: M->getDataLayout())) {
1989 if (Res->isNullValue())
1990 return LazyValueInfo::False;
1991 if (Res->isOneValue())
1992 return LazyValueInfo::True;
1993 }
1994 }
1995 return LazyValueInfo::Unknown;
1996}
1997
1998void LazyValueInfo::threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc,
1999 BasicBlock *NewSucc) {
2000 if (auto *Impl = getImpl())
2001 Impl->threadEdge(PredBB, OldSucc, NewSucc);
2002}
2003
2004void LazyValueInfo::forgetValue(Value *V) {
2005 if (auto *Impl = getImpl())
2006 Impl->forgetValue(V);
2007}
2008
2009void LazyValueInfo::eraseBlock(BasicBlock *BB) {
2010 if (auto *Impl = getImpl())
2011 Impl->eraseBlock(BB);
2012}
2013
2014void LazyValueInfo::clear() {
2015 if (auto *Impl = getImpl())
2016 Impl->clear();
2017}
2018
2019void LazyValueInfo::printLVI(Function &F, DominatorTree &DTree, raw_ostream &OS) {
2020 if (auto *Impl = getImpl())
2021 Impl->printLVI(F, DTree, OS);
2022}
2023
2024// Print the LVI for the function arguments at the start of each basic block.
2025void LazyValueInfoAnnotatedWriter::emitBasicBlockStartAnnot(
2026 const BasicBlock *BB, formatted_raw_ostream &OS) {
2027 // Find if there are latticevalues defined for arguments of the function.
2028 auto *F = BB->getParent();
2029 for (const auto &Arg : F->args()) {
2030 ValueLatticeElement Result = LVIImpl->getValueInBlock(
2031 V: const_cast<Argument *>(&Arg), BB: const_cast<BasicBlock *>(BB));
2032 if (Result.isUnknown())
2033 continue;
2034 OS << "; LatticeVal for: '" << Arg << "' is: " << Result << "\n";
2035 }
2036}
2037
2038// This function prints the LVI analysis for the instruction I at the beginning
2039// of various basic blocks. It relies on calculated values that are stored in
2040// the LazyValueInfoCache, and in the absence of cached values, recalculate the
2041// LazyValueInfo for `I`, and print that info.
2042void LazyValueInfoAnnotatedWriter::emitInstructionAnnot(
2043 const Instruction *I, formatted_raw_ostream &OS) {
2044
2045 auto *ParentBB = I->getParent();
2046 SmallPtrSet<const BasicBlock*, 16> BlocksContainingLVI;
2047 // We can generate (solve) LVI values only for blocks that are dominated by
2048 // the I's parent. However, to avoid generating LVI for all dominating blocks,
2049 // that contain redundant/uninteresting information, we print LVI for
2050 // blocks that may use this LVI information (such as immediate successor
2051 // blocks, and blocks that contain uses of `I`).
2052 auto printResult = [&](const BasicBlock *BB) {
2053 if (!BlocksContainingLVI.insert(Ptr: BB).second)
2054 return;
2055 ValueLatticeElement Result = LVIImpl->getValueInBlock(
2056 V: const_cast<Instruction *>(I), BB: const_cast<BasicBlock *>(BB));
2057 OS << "; LatticeVal for: '" << *I << "' in BB: '";
2058 BB->printAsOperand(O&: OS, PrintType: false);
2059 OS << "' is: " << Result << "\n";
2060 };
2061
2062 printResult(ParentBB);
2063 // Print the LVI analysis results for the immediate successor blocks, that
2064 // are dominated by `ParentBB`.
2065 for (const auto *BBSucc : successors(BB: ParentBB))
2066 if (DT.dominates(A: ParentBB, B: BBSucc))
2067 printResult(BBSucc);
2068
2069 // Print LVI in blocks where `I` is used.
2070 for (const auto *U : I->users())
2071 if (auto *UseI = dyn_cast<Instruction>(Val: U))
2072 if (!isa<PHINode>(Val: UseI) || DT.dominates(A: ParentBB, B: UseI->getParent()))
2073 printResult(UseI->getParent());
2074
2075}
2076
2077PreservedAnalyses LazyValueInfoPrinterPass::run(Function &F,
2078 FunctionAnalysisManager &AM) {
2079 OS << "LVI for function '" << F.getName() << "':\n";
2080 auto &LVI = AM.getResult<LazyValueAnalysis>(IR&: F);
2081 auto &DTree = AM.getResult<DominatorTreeAnalysis>(IR&: F);
2082 LVI.printLVI(F, DTree, OS);
2083 return PreservedAnalyses::all();
2084}
2085

source code of llvm/lib/Analysis/LazyValueInfo.cpp