1//===- NewGVN.cpp - Global Value Numbering Pass ---------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file implements the new LLVM's Global Value Numbering pass.
11/// GVN partitions values computed by a function into congruence classes.
12/// Values ending up in the same congruence class are guaranteed to be the same
13/// for every execution of the program. In that respect, congruency is a
14/// compile-time approximation of equivalence of values at runtime.
15/// The algorithm implemented here uses a sparse formulation and it's based
16/// on the ideas described in the paper:
17/// "A Sparse Algorithm for Predicated Global Value Numbering" from
18/// Karthik Gargi.
19///
20/// A brief overview of the algorithm: The algorithm is essentially the same as
21/// the standard RPO value numbering algorithm (a good reference is the paper
22/// "SCC based value numbering" by L. Taylor Simpson) with one major difference:
23/// The RPO algorithm proceeds, on every iteration, to process every reachable
24/// block and every instruction in that block. This is because the standard RPO
25/// algorithm does not track what things have the same value number, it only
26/// tracks what the value number of a given operation is (the mapping is
27/// operation -> value number). Thus, when a value number of an operation
28/// changes, it must reprocess everything to ensure all uses of a value number
29/// get updated properly. In constrast, the sparse algorithm we use *also*
30/// tracks what operations have a given value number (IE it also tracks the
31/// reverse mapping from value number -> operations with that value number), so
32/// that it only needs to reprocess the instructions that are affected when
33/// something's value number changes. The vast majority of complexity and code
34/// in this file is devoted to tracking what value numbers could change for what
35/// instructions when various things happen. The rest of the algorithm is
36/// devoted to performing symbolic evaluation, forward propagation, and
37/// simplification of operations based on the value numbers deduced so far
38///
39/// In order to make the GVN mostly-complete, we use a technique derived from
40/// "Detection of Redundant Expressions: A Complete and Polynomial-time
41/// Algorithm in SSA" by R.R. Pai. The source of incompleteness in most SSA
42/// based GVN algorithms is related to their inability to detect equivalence
43/// between phi of ops (IE phi(a+b, c+d)) and op of phis (phi(a,c) + phi(b, d)).
44/// We resolve this issue by generating the equivalent "phi of ops" form for
45/// each op of phis we see, in a way that only takes polynomial time to resolve.
46///
47/// We also do not perform elimination by using any published algorithm. All
48/// published algorithms are O(Instructions). Instead, we use a technique that
49/// is O(number of operations with the same value number), enabling us to skip
50/// trying to eliminate things that have unique value numbers.
51//
52//===----------------------------------------------------------------------===//
53
54#include "llvm/Transforms/Scalar/NewGVN.h"
55#include "llvm/ADT/ArrayRef.h"
56#include "llvm/ADT/BitVector.h"
57#include "llvm/ADT/DenseMap.h"
58#include "llvm/ADT/DenseMapInfo.h"
59#include "llvm/ADT/DenseSet.h"
60#include "llvm/ADT/DepthFirstIterator.h"
61#include "llvm/ADT/GraphTraits.h"
62#include "llvm/ADT/Hashing.h"
63#include "llvm/ADT/PointerIntPair.h"
64#include "llvm/ADT/PostOrderIterator.h"
65#include "llvm/ADT/SetOperations.h"
66#include "llvm/ADT/SmallPtrSet.h"
67#include "llvm/ADT/SmallVector.h"
68#include "llvm/ADT/SparseBitVector.h"
69#include "llvm/ADT/Statistic.h"
70#include "llvm/ADT/iterator_range.h"
71#include "llvm/Analysis/AliasAnalysis.h"
72#include "llvm/Analysis/AssumptionCache.h"
73#include "llvm/Analysis/CFGPrinter.h"
74#include "llvm/Analysis/ConstantFolding.h"
75#include "llvm/Analysis/GlobalsModRef.h"
76#include "llvm/Analysis/InstructionSimplify.h"
77#include "llvm/Analysis/MemoryBuiltins.h"
78#include "llvm/Analysis/MemorySSA.h"
79#include "llvm/Analysis/TargetLibraryInfo.h"
80#include "llvm/Analysis/ValueTracking.h"
81#include "llvm/IR/Argument.h"
82#include "llvm/IR/BasicBlock.h"
83#include "llvm/IR/Constant.h"
84#include "llvm/IR/Constants.h"
85#include "llvm/IR/Dominators.h"
86#include "llvm/IR/Function.h"
87#include "llvm/IR/InstrTypes.h"
88#include "llvm/IR/Instruction.h"
89#include "llvm/IR/Instructions.h"
90#include "llvm/IR/IntrinsicInst.h"
91#include "llvm/IR/PatternMatch.h"
92#include "llvm/IR/Type.h"
93#include "llvm/IR/Use.h"
94#include "llvm/IR/User.h"
95#include "llvm/IR/Value.h"
96#include "llvm/Support/Allocator.h"
97#include "llvm/Support/ArrayRecycler.h"
98#include "llvm/Support/Casting.h"
99#include "llvm/Support/CommandLine.h"
100#include "llvm/Support/Debug.h"
101#include "llvm/Support/DebugCounter.h"
102#include "llvm/Support/ErrorHandling.h"
103#include "llvm/Support/PointerLikeTypeTraits.h"
104#include "llvm/Support/raw_ostream.h"
105#include "llvm/Transforms/Scalar/GVNExpression.h"
106#include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
107#include "llvm/Transforms/Utils/Local.h"
108#include "llvm/Transforms/Utils/PredicateInfo.h"
109#include "llvm/Transforms/Utils/VNCoercion.h"
110#include <algorithm>
111#include <cassert>
112#include <cstdint>
113#include <iterator>
114#include <map>
115#include <memory>
116#include <set>
117#include <string>
118#include <tuple>
119#include <utility>
120#include <vector>
121
122using namespace llvm;
123using namespace llvm::GVNExpression;
124using namespace llvm::VNCoercion;
125using namespace llvm::PatternMatch;
126
127#define DEBUG_TYPE "newgvn"
128
129STATISTIC(NumGVNInstrDeleted, "Number of instructions deleted");
130STATISTIC(NumGVNBlocksDeleted, "Number of blocks deleted");
131STATISTIC(NumGVNOpsSimplified, "Number of Expressions simplified");
132STATISTIC(NumGVNPhisAllSame, "Number of PHIs whos arguments are all the same");
133STATISTIC(NumGVNMaxIterations,
134 "Maximum Number of iterations it took to converge GVN");
135STATISTIC(NumGVNLeaderChanges, "Number of leader changes");
136STATISTIC(NumGVNSortedLeaderChanges, "Number of sorted leader changes");
137STATISTIC(NumGVNAvoidedSortedLeaderChanges,
138 "Number of avoided sorted leader changes");
139STATISTIC(NumGVNDeadStores, "Number of redundant/dead stores eliminated");
140STATISTIC(NumGVNPHIOfOpsCreated, "Number of PHI of ops created");
141STATISTIC(NumGVNPHIOfOpsEliminations,
142 "Number of things eliminated using PHI of ops");
143DEBUG_COUNTER(VNCounter, "newgvn-vn",
144 "Controls which instructions are value numbered");
145DEBUG_COUNTER(PHIOfOpsCounter, "newgvn-phi",
146 "Controls which instructions we create phi of ops for");
147// Currently store defining access refinement is too slow due to basicaa being
148// egregiously slow. This flag lets us keep it working while we work on this
149// issue.
150static cl::opt<bool> EnableStoreRefinement("enable-store-refinement",
151 cl::init(Val: false), cl::Hidden);
152
153/// Currently, the generation "phi of ops" can result in correctness issues.
154static cl::opt<bool> EnablePhiOfOps("enable-phi-of-ops", cl::init(Val: true),
155 cl::Hidden);
156
157//===----------------------------------------------------------------------===//
158// GVN Pass
159//===----------------------------------------------------------------------===//
160
161// Anchor methods.
162namespace llvm {
163namespace GVNExpression {
164
165Expression::~Expression() = default;
166BasicExpression::~BasicExpression() = default;
167CallExpression::~CallExpression() = default;
168LoadExpression::~LoadExpression() = default;
169StoreExpression::~StoreExpression() = default;
170AggregateValueExpression::~AggregateValueExpression() = default;
171PHIExpression::~PHIExpression() = default;
172
173} // end namespace GVNExpression
174} // end namespace llvm
175
176namespace {
177
178// Tarjan's SCC finding algorithm with Nuutila's improvements
179// SCCIterator is actually fairly complex for the simple thing we want.
180// It also wants to hand us SCC's that are unrelated to the phi node we ask
181// about, and have us process them there or risk redoing work.
182// Graph traits over a filter iterator also doesn't work that well here.
183// This SCC finder is specialized to walk use-def chains, and only follows
184// instructions,
185// not generic values (arguments, etc).
186struct TarjanSCC {
187 TarjanSCC() : Components(1) {}
188
189 void Start(const Instruction *Start) {
190 if (Root.lookup(Val: Start) == 0)
191 FindSCC(I: Start);
192 }
193
194 const SmallPtrSetImpl<const Value *> &getComponentFor(const Value *V) const {
195 unsigned ComponentID = ValueToComponent.lookup(Val: V);
196
197 assert(ComponentID > 0 &&
198 "Asking for a component for a value we never processed");
199 return Components[ComponentID];
200 }
201
202private:
203 void FindSCC(const Instruction *I) {
204 Root[I] = ++DFSNum;
205 // Store the DFS Number we had before it possibly gets incremented.
206 unsigned int OurDFS = DFSNum;
207 for (const auto &Op : I->operands()) {
208 if (auto *InstOp = dyn_cast<Instruction>(Val: Op)) {
209 if (Root.lookup(Val: Op) == 0)
210 FindSCC(I: InstOp);
211 if (!InComponent.count(Ptr: Op))
212 Root[I] = std::min(a: Root.lookup(Val: I), b: Root.lookup(Val: Op));
213 }
214 }
215 // See if we really were the root of a component, by seeing if we still have
216 // our DFSNumber. If we do, we are the root of the component, and we have
217 // completed a component. If we do not, we are not the root of a component,
218 // and belong on the component stack.
219 if (Root.lookup(Val: I) == OurDFS) {
220 unsigned ComponentID = Components.size();
221 Components.resize(N: Components.size() + 1);
222 auto &Component = Components.back();
223 Component.insert(Ptr: I);
224 LLVM_DEBUG(dbgs() << "Component root is " << *I << "\n");
225 InComponent.insert(Ptr: I);
226 ValueToComponent[I] = ComponentID;
227 // Pop a component off the stack and label it.
228 while (!Stack.empty() && Root.lookup(Val: Stack.back()) >= OurDFS) {
229 auto *Member = Stack.back();
230 LLVM_DEBUG(dbgs() << "Component member is " << *Member << "\n");
231 Component.insert(Ptr: Member);
232 InComponent.insert(Ptr: Member);
233 ValueToComponent[Member] = ComponentID;
234 Stack.pop_back();
235 }
236 } else {
237 // Part of a component, push to stack
238 Stack.push_back(Elt: I);
239 }
240 }
241
242 unsigned int DFSNum = 1;
243 SmallPtrSet<const Value *, 8> InComponent;
244 DenseMap<const Value *, unsigned int> Root;
245 SmallVector<const Value *, 8> Stack;
246
247 // Store the components as vector of ptr sets, because we need the topo order
248 // of SCC's, but not individual member order
249 SmallVector<SmallPtrSet<const Value *, 8>, 8> Components;
250
251 DenseMap<const Value *, unsigned> ValueToComponent;
252};
253
254// Congruence classes represent the set of expressions/instructions
255// that are all the same *during some scope in the function*.
256// That is, because of the way we perform equality propagation, and
257// because of memory value numbering, it is not correct to assume
258// you can willy-nilly replace any member with any other at any
259// point in the function.
260//
261// For any Value in the Member set, it is valid to replace any dominated member
262// with that Value.
263//
264// Every congruence class has a leader, and the leader is used to symbolize
265// instructions in a canonical way (IE every operand of an instruction that is a
266// member of the same congruence class will always be replaced with leader
267// during symbolization). To simplify symbolization, we keep the leader as a
268// constant if class can be proved to be a constant value. Otherwise, the
269// leader is the member of the value set with the smallest DFS number. Each
270// congruence class also has a defining expression, though the expression may be
271// null. If it exists, it can be used for forward propagation and reassociation
272// of values.
273
274// For memory, we also track a representative MemoryAccess, and a set of memory
275// members for MemoryPhis (which have no real instructions). Note that for
276// memory, it seems tempting to try to split the memory members into a
277// MemoryCongruenceClass or something. Unfortunately, this does not work
278// easily. The value numbering of a given memory expression depends on the
279// leader of the memory congruence class, and the leader of memory congruence
280// class depends on the value numbering of a given memory expression. This
281// leads to wasted propagation, and in some cases, missed optimization. For
282// example: If we had value numbered two stores together before, but now do not,
283// we move them to a new value congruence class. This in turn will move at one
284// of the memorydefs to a new memory congruence class. Which in turn, affects
285// the value numbering of the stores we just value numbered (because the memory
286// congruence class is part of the value number). So while theoretically
287// possible to split them up, it turns out to be *incredibly* complicated to get
288// it to work right, because of the interdependency. While structurally
289// slightly messier, it is algorithmically much simpler and faster to do what we
290// do here, and track them both at once in the same class.
291// Note: The default iterators for this class iterate over values
292class CongruenceClass {
293public:
294 using MemberType = Value;
295 using MemberSet = SmallPtrSet<MemberType *, 4>;
296 using MemoryMemberType = MemoryPhi;
297 using MemoryMemberSet = SmallPtrSet<const MemoryMemberType *, 2>;
298
299 explicit CongruenceClass(unsigned ID) : ID(ID) {}
300 CongruenceClass(unsigned ID, Value *Leader, const Expression *E)
301 : ID(ID), RepLeader(Leader), DefiningExpr(E) {}
302
303 unsigned getID() const { return ID; }
304
305 // True if this class has no members left. This is mainly used for assertion
306 // purposes, and for skipping empty classes.
307 bool isDead() const {
308 // If it's both dead from a value perspective, and dead from a memory
309 // perspective, it's really dead.
310 return empty() && memory_empty();
311 }
312
313 // Leader functions
314 Value *getLeader() const { return RepLeader; }
315 void setLeader(Value *Leader) { RepLeader = Leader; }
316 const std::pair<Value *, unsigned int> &getNextLeader() const {
317 return NextLeader;
318 }
319 void resetNextLeader() { NextLeader = {nullptr, ~0}; }
320 void addPossibleNextLeader(std::pair<Value *, unsigned int> LeaderPair) {
321 if (LeaderPair.second < NextLeader.second)
322 NextLeader = LeaderPair;
323 }
324
325 Value *getStoredValue() const { return RepStoredValue; }
326 void setStoredValue(Value *Leader) { RepStoredValue = Leader; }
327 const MemoryAccess *getMemoryLeader() const { return RepMemoryAccess; }
328 void setMemoryLeader(const MemoryAccess *Leader) { RepMemoryAccess = Leader; }
329
330 // Forward propagation info
331 const Expression *getDefiningExpr() const { return DefiningExpr; }
332
333 // Value member set
334 bool empty() const { return Members.empty(); }
335 unsigned size() const { return Members.size(); }
336 MemberSet::const_iterator begin() const { return Members.begin(); }
337 MemberSet::const_iterator end() const { return Members.end(); }
338 void insert(MemberType *M) { Members.insert(Ptr: M); }
339 void erase(MemberType *M) { Members.erase(Ptr: M); }
340 void swap(MemberSet &Other) { Members.swap(RHS&: Other); }
341
342 // Memory member set
343 bool memory_empty() const { return MemoryMembers.empty(); }
344 unsigned memory_size() const { return MemoryMembers.size(); }
345 MemoryMemberSet::const_iterator memory_begin() const {
346 return MemoryMembers.begin();
347 }
348 MemoryMemberSet::const_iterator memory_end() const {
349 return MemoryMembers.end();
350 }
351 iterator_range<MemoryMemberSet::const_iterator> memory() const {
352 return make_range(x: memory_begin(), y: memory_end());
353 }
354
355 void memory_insert(const MemoryMemberType *M) { MemoryMembers.insert(Ptr: M); }
356 void memory_erase(const MemoryMemberType *M) { MemoryMembers.erase(Ptr: M); }
357
358 // Store count
359 unsigned getStoreCount() const { return StoreCount; }
360 void incStoreCount() { ++StoreCount; }
361 void decStoreCount() {
362 assert(StoreCount != 0 && "Store count went negative");
363 --StoreCount;
364 }
365
366 // True if this class has no memory members.
367 bool definesNoMemory() const { return StoreCount == 0 && memory_empty(); }
368
369 // Return true if two congruence classes are equivalent to each other. This
370 // means that every field but the ID number and the dead field are equivalent.
371 bool isEquivalentTo(const CongruenceClass *Other) const {
372 if (!Other)
373 return false;
374 if (this == Other)
375 return true;
376
377 if (std::tie(args: StoreCount, args: RepLeader, args: RepStoredValue, args: RepMemoryAccess) !=
378 std::tie(args: Other->StoreCount, args: Other->RepLeader, args: Other->RepStoredValue,
379 args: Other->RepMemoryAccess))
380 return false;
381 if (DefiningExpr != Other->DefiningExpr)
382 if (!DefiningExpr || !Other->DefiningExpr ||
383 *DefiningExpr != *Other->DefiningExpr)
384 return false;
385
386 if (Members.size() != Other->Members.size())
387 return false;
388
389 return llvm::set_is_subset(S1: Members, S2: Other->Members);
390 }
391
392private:
393 unsigned ID;
394
395 // Representative leader.
396 Value *RepLeader = nullptr;
397
398 // The most dominating leader after our current leader, because the member set
399 // is not sorted and is expensive to keep sorted all the time.
400 std::pair<Value *, unsigned int> NextLeader = {nullptr, ~0U};
401
402 // If this is represented by a store, the value of the store.
403 Value *RepStoredValue = nullptr;
404
405 // If this class contains MemoryDefs or MemoryPhis, this is the leading memory
406 // access.
407 const MemoryAccess *RepMemoryAccess = nullptr;
408
409 // Defining Expression.
410 const Expression *DefiningExpr = nullptr;
411
412 // Actual members of this class.
413 MemberSet Members;
414
415 // This is the set of MemoryPhis that exist in the class. MemoryDefs and
416 // MemoryUses have real instructions representing them, so we only need to
417 // track MemoryPhis here.
418 MemoryMemberSet MemoryMembers;
419
420 // Number of stores in this congruence class.
421 // This is used so we can detect store equivalence changes properly.
422 int StoreCount = 0;
423};
424
425} // end anonymous namespace
426
427namespace llvm {
428
429struct ExactEqualsExpression {
430 const Expression &E;
431
432 explicit ExactEqualsExpression(const Expression &E) : E(E) {}
433
434 hash_code getComputedHash() const { return E.getComputedHash(); }
435
436 bool operator==(const Expression &Other) const {
437 return E.exactlyEquals(Other);
438 }
439};
440
441template <> struct DenseMapInfo<const Expression *> {
442 static const Expression *getEmptyKey() {
443 auto Val = static_cast<uintptr_t>(-1);
444 Val <<= PointerLikeTypeTraits<const Expression *>::NumLowBitsAvailable;
445 return reinterpret_cast<const Expression *>(Val);
446 }
447
448 static const Expression *getTombstoneKey() {
449 auto Val = static_cast<uintptr_t>(~1U);
450 Val <<= PointerLikeTypeTraits<const Expression *>::NumLowBitsAvailable;
451 return reinterpret_cast<const Expression *>(Val);
452 }
453
454 static unsigned getHashValue(const Expression *E) {
455 return E->getComputedHash();
456 }
457
458 static unsigned getHashValue(const ExactEqualsExpression &E) {
459 return E.getComputedHash();
460 }
461
462 static bool isEqual(const ExactEqualsExpression &LHS, const Expression *RHS) {
463 if (RHS == getTombstoneKey() || RHS == getEmptyKey())
464 return false;
465 return LHS == *RHS;
466 }
467
468 static bool isEqual(const Expression *LHS, const Expression *RHS) {
469 if (LHS == RHS)
470 return true;
471 if (LHS == getTombstoneKey() || RHS == getTombstoneKey() ||
472 LHS == getEmptyKey() || RHS == getEmptyKey())
473 return false;
474 // Compare hashes before equality. This is *not* what the hashtable does,
475 // since it is computing it modulo the number of buckets, whereas we are
476 // using the full hash keyspace. Since the hashes are precomputed, this
477 // check is *much* faster than equality.
478 if (LHS->getComputedHash() != RHS->getComputedHash())
479 return false;
480 return *LHS == *RHS;
481 }
482};
483
484} // end namespace llvm
485
486namespace {
487
488class NewGVN {
489 Function &F;
490 DominatorTree *DT = nullptr;
491 const TargetLibraryInfo *TLI = nullptr;
492 AliasAnalysis *AA = nullptr;
493 MemorySSA *MSSA = nullptr;
494 MemorySSAWalker *MSSAWalker = nullptr;
495 AssumptionCache *AC = nullptr;
496 const DataLayout &DL;
497 std::unique_ptr<PredicateInfo> PredInfo;
498
499 // These are the only two things the create* functions should have
500 // side-effects on due to allocating memory.
501 mutable BumpPtrAllocator ExpressionAllocator;
502 mutable ArrayRecycler<Value *> ArgRecycler;
503 mutable TarjanSCC SCCFinder;
504 const SimplifyQuery SQ;
505
506 // Number of function arguments, used by ranking
507 unsigned int NumFuncArgs = 0;
508
509 // RPOOrdering of basic blocks
510 DenseMap<const DomTreeNode *, unsigned> RPOOrdering;
511
512 // Congruence class info.
513
514 // This class is called INITIAL in the paper. It is the class everything
515 // startsout in, and represents any value. Being an optimistic analysis,
516 // anything in the TOP class has the value TOP, which is indeterminate and
517 // equivalent to everything.
518 CongruenceClass *TOPClass = nullptr;
519 std::vector<CongruenceClass *> CongruenceClasses;
520 unsigned NextCongruenceNum = 0;
521
522 // Value Mappings.
523 DenseMap<Value *, CongruenceClass *> ValueToClass;
524 DenseMap<Value *, const Expression *> ValueToExpression;
525
526 // Value PHI handling, used to make equivalence between phi(op, op) and
527 // op(phi, phi).
528 // These mappings just store various data that would normally be part of the
529 // IR.
530 SmallPtrSet<const Instruction *, 8> PHINodeUses;
531
532 DenseMap<const Value *, bool> OpSafeForPHIOfOps;
533
534 // Map a temporary instruction we created to a parent block.
535 DenseMap<const Value *, BasicBlock *> TempToBlock;
536
537 // Map between the already in-program instructions and the temporary phis we
538 // created that they are known equivalent to.
539 DenseMap<const Value *, PHINode *> RealToTemp;
540
541 // In order to know when we should re-process instructions that have
542 // phi-of-ops, we track the set of expressions that they needed as
543 // leaders. When we discover new leaders for those expressions, we process the
544 // associated phi-of-op instructions again in case they have changed. The
545 // other way they may change is if they had leaders, and those leaders
546 // disappear. However, at the point they have leaders, there are uses of the
547 // relevant operands in the created phi node, and so they will get reprocessed
548 // through the normal user marking we perform.
549 mutable DenseMap<const Value *, SmallPtrSet<Value *, 2>> AdditionalUsers;
550 DenseMap<const Expression *, SmallPtrSet<Instruction *, 2>>
551 ExpressionToPhiOfOps;
552
553 // Map from temporary operation to MemoryAccess.
554 DenseMap<const Instruction *, MemoryUseOrDef *> TempToMemory;
555
556 // Set of all temporary instructions we created.
557 // Note: This will include instructions that were just created during value
558 // numbering. The way to test if something is using them is to check
559 // RealToTemp.
560 DenseSet<Instruction *> AllTempInstructions;
561
562 // This is the set of instructions to revisit on a reachability change. At
563 // the end of the main iteration loop it will contain at least all the phi of
564 // ops instructions that will be changed to phis, as well as regular phis.
565 // During the iteration loop, it may contain other things, such as phi of ops
566 // instructions that used edge reachability to reach a result, and so need to
567 // be revisited when the edge changes, independent of whether the phi they
568 // depended on changes.
569 DenseMap<BasicBlock *, SparseBitVector<>> RevisitOnReachabilityChange;
570
571 // Mapping from predicate info we used to the instructions we used it with.
572 // In order to correctly ensure propagation, we must keep track of what
573 // comparisons we used, so that when the values of the comparisons change, we
574 // propagate the information to the places we used the comparison.
575 mutable DenseMap<const Value *, SmallPtrSet<Instruction *, 2>>
576 PredicateToUsers;
577
578 // the same reasoning as PredicateToUsers. When we skip MemoryAccesses for
579 // stores, we no longer can rely solely on the def-use chains of MemorySSA.
580 mutable DenseMap<const MemoryAccess *, SmallPtrSet<MemoryAccess *, 2>>
581 MemoryToUsers;
582
583 // A table storing which memorydefs/phis represent a memory state provably
584 // equivalent to another memory state.
585 // We could use the congruence class machinery, but the MemoryAccess's are
586 // abstract memory states, so they can only ever be equivalent to each other,
587 // and not to constants, etc.
588 DenseMap<const MemoryAccess *, CongruenceClass *> MemoryAccessToClass;
589
590 // We could, if we wanted, build MemoryPhiExpressions and
591 // MemoryVariableExpressions, etc, and value number them the same way we value
592 // number phi expressions. For the moment, this seems like overkill. They
593 // can only exist in one of three states: they can be TOP (equal to
594 // everything), Equivalent to something else, or unique. Because we do not
595 // create expressions for them, we need to simulate leader change not just
596 // when they change class, but when they change state. Note: We can do the
597 // same thing for phis, and avoid having phi expressions if we wanted, We
598 // should eventually unify in one direction or the other, so this is a little
599 // bit of an experiment in which turns out easier to maintain.
600 enum MemoryPhiState { MPS_Invalid, MPS_TOP, MPS_Equivalent, MPS_Unique };
601 DenseMap<const MemoryPhi *, MemoryPhiState> MemoryPhiState;
602
603 enum InstCycleState { ICS_Unknown, ICS_CycleFree, ICS_Cycle };
604 mutable DenseMap<const Instruction *, InstCycleState> InstCycleState;
605
606 // Expression to class mapping.
607 using ExpressionClassMap = DenseMap<const Expression *, CongruenceClass *>;
608 ExpressionClassMap ExpressionToClass;
609
610 // We have a single expression that represents currently DeadExpressions.
611 // For dead expressions we can prove will stay dead, we mark them with
612 // DFS number zero. However, it's possible in the case of phi nodes
613 // for us to assume/prove all arguments are dead during fixpointing.
614 // We use DeadExpression for that case.
615 DeadExpression *SingletonDeadExpression = nullptr;
616
617 // Which values have changed as a result of leader changes.
618 SmallPtrSet<Value *, 8> LeaderChanges;
619
620 // Reachability info.
621 using BlockEdge = BasicBlockEdge;
622 DenseSet<BlockEdge> ReachableEdges;
623 SmallPtrSet<const BasicBlock *, 8> ReachableBlocks;
624
625 // This is a bitvector because, on larger functions, we may have
626 // thousands of touched instructions at once (entire blocks,
627 // instructions with hundreds of uses, etc). Even with optimization
628 // for when we mark whole blocks as touched, when this was a
629 // SmallPtrSet or DenseSet, for some functions, we spent >20% of all
630 // the time in GVN just managing this list. The bitvector, on the
631 // other hand, efficiently supports test/set/clear of both
632 // individual and ranges, as well as "find next element" This
633 // enables us to use it as a worklist with essentially 0 cost.
634 BitVector TouchedInstructions;
635
636 DenseMap<const BasicBlock *, std::pair<unsigned, unsigned>> BlockInstRange;
637 mutable DenseMap<const IntrinsicInst *, const Value *> IntrinsicInstPred;
638
639#ifndef NDEBUG
640 // Debugging for how many times each block and instruction got processed.
641 DenseMap<const Value *, unsigned> ProcessedCount;
642#endif
643
644 // DFS info.
645 // This contains a mapping from Instructions to DFS numbers.
646 // The numbering starts at 1. An instruction with DFS number zero
647 // means that the instruction is dead.
648 DenseMap<const Value *, unsigned> InstrDFS;
649
650 // This contains the mapping DFS numbers to instructions.
651 SmallVector<Value *, 32> DFSToInstr;
652
653 // Deletion info.
654 SmallPtrSet<Instruction *, 8> InstructionsToErase;
655
656public:
657 NewGVN(Function &F, DominatorTree *DT, AssumptionCache *AC,
658 TargetLibraryInfo *TLI, AliasAnalysis *AA, MemorySSA *MSSA,
659 const DataLayout &DL)
660 : F(F), DT(DT), TLI(TLI), AA(AA), MSSA(MSSA), AC(AC), DL(DL),
661 PredInfo(std::make_unique<PredicateInfo>(args&: F, args&: *DT, args&: *AC)),
662 SQ(DL, TLI, DT, AC, /*CtxI=*/nullptr, /*UseInstrInfo=*/false,
663 /*CanUseUndef=*/false) {}
664
665 bool runGVN();
666
667private:
668 /// Helper struct return a Expression with an optional extra dependency.
669 struct ExprResult {
670 const Expression *Expr;
671 Value *ExtraDep;
672 const PredicateBase *PredDep;
673
674 ExprResult(const Expression *Expr, Value *ExtraDep = nullptr,
675 const PredicateBase *PredDep = nullptr)
676 : Expr(Expr), ExtraDep(ExtraDep), PredDep(PredDep) {}
677 ExprResult(const ExprResult &) = delete;
678 ExprResult(ExprResult &&Other)
679 : Expr(Other.Expr), ExtraDep(Other.ExtraDep), PredDep(Other.PredDep) {
680 Other.Expr = nullptr;
681 Other.ExtraDep = nullptr;
682 Other.PredDep = nullptr;
683 }
684 ExprResult &operator=(const ExprResult &Other) = delete;
685 ExprResult &operator=(ExprResult &&Other) = delete;
686
687 ~ExprResult() { assert(!ExtraDep && "unhandled ExtraDep"); }
688
689 operator bool() const { return Expr; }
690
691 static ExprResult none() { return {nullptr, nullptr, nullptr}; }
692 static ExprResult some(const Expression *Expr, Value *ExtraDep = nullptr) {
693 return {Expr, ExtraDep, nullptr};
694 }
695 static ExprResult some(const Expression *Expr,
696 const PredicateBase *PredDep) {
697 return {Expr, nullptr, PredDep};
698 }
699 static ExprResult some(const Expression *Expr, Value *ExtraDep,
700 const PredicateBase *PredDep) {
701 return {Expr, ExtraDep, PredDep};
702 }
703 };
704
705 // Expression handling.
706 ExprResult createExpression(Instruction *) const;
707 const Expression *createBinaryExpression(unsigned, Type *, Value *, Value *,
708 Instruction *) const;
709
710 // Our canonical form for phi arguments is a pair of incoming value, incoming
711 // basic block.
712 using ValPair = std::pair<Value *, BasicBlock *>;
713
714 PHIExpression *createPHIExpression(ArrayRef<ValPair>, const Instruction *,
715 BasicBlock *, bool &HasBackEdge,
716 bool &OriginalOpsConstant) const;
717 const DeadExpression *createDeadExpression() const;
718 const VariableExpression *createVariableExpression(Value *) const;
719 const ConstantExpression *createConstantExpression(Constant *) const;
720 const Expression *createVariableOrConstant(Value *V) const;
721 const UnknownExpression *createUnknownExpression(Instruction *) const;
722 const StoreExpression *createStoreExpression(StoreInst *,
723 const MemoryAccess *) const;
724 LoadExpression *createLoadExpression(Type *, Value *, LoadInst *,
725 const MemoryAccess *) const;
726 const CallExpression *createCallExpression(CallInst *,
727 const MemoryAccess *) const;
728 const AggregateValueExpression *
729 createAggregateValueExpression(Instruction *) const;
730 bool setBasicExpressionInfo(Instruction *, BasicExpression *) const;
731
732 // Congruence class handling.
733 CongruenceClass *createCongruenceClass(Value *Leader, const Expression *E) {
734 auto *result = new CongruenceClass(NextCongruenceNum++, Leader, E);
735 CongruenceClasses.emplace_back(args&: result);
736 return result;
737 }
738
739 CongruenceClass *createMemoryClass(MemoryAccess *MA) {
740 auto *CC = createCongruenceClass(Leader: nullptr, E: nullptr);
741 CC->setMemoryLeader(MA);
742 return CC;
743 }
744
745 CongruenceClass *ensureLeaderOfMemoryClass(MemoryAccess *MA) {
746 auto *CC = getMemoryClass(MA);
747 if (CC->getMemoryLeader() != MA)
748 CC = createMemoryClass(MA);
749 return CC;
750 }
751
752 CongruenceClass *createSingletonCongruenceClass(Value *Member) {
753 CongruenceClass *CClass = createCongruenceClass(Leader: Member, E: nullptr);
754 CClass->insert(M: Member);
755 ValueToClass[Member] = CClass;
756 return CClass;
757 }
758
759 void initializeCongruenceClasses(Function &F);
760 const Expression *makePossiblePHIOfOps(Instruction *,
761 SmallPtrSetImpl<Value *> &);
762 Value *findLeaderForInst(Instruction *ValueOp,
763 SmallPtrSetImpl<Value *> &Visited,
764 MemoryAccess *MemAccess, Instruction *OrigInst,
765 BasicBlock *PredBB);
766 bool OpIsSafeForPHIOfOps(Value *Op, const BasicBlock *PHIBlock,
767 SmallPtrSetImpl<const Value *> &);
768 void addPhiOfOps(PHINode *Op, BasicBlock *BB, Instruction *ExistingValue);
769 void removePhiOfOps(Instruction *I, PHINode *PHITemp);
770
771 // Value number an Instruction or MemoryPhi.
772 void valueNumberMemoryPhi(MemoryPhi *);
773 void valueNumberInstruction(Instruction *);
774
775 // Symbolic evaluation.
776 ExprResult checkExprResults(Expression *, Instruction *, Value *) const;
777 ExprResult performSymbolicEvaluation(Instruction *,
778 SmallPtrSetImpl<Value *> &) const;
779 const Expression *performSymbolicLoadCoercion(Type *, Value *, LoadInst *,
780 Instruction *,
781 MemoryAccess *) const;
782 const Expression *performSymbolicLoadEvaluation(Instruction *) const;
783 const Expression *performSymbolicStoreEvaluation(Instruction *) const;
784 ExprResult performSymbolicCallEvaluation(Instruction *) const;
785 void sortPHIOps(MutableArrayRef<ValPair> Ops) const;
786 const Expression *performSymbolicPHIEvaluation(ArrayRef<ValPair>,
787 Instruction *I,
788 BasicBlock *PHIBlock) const;
789 const Expression *performSymbolicAggrValueEvaluation(Instruction *) const;
790 ExprResult performSymbolicCmpEvaluation(Instruction *) const;
791 ExprResult performSymbolicPredicateInfoEvaluation(IntrinsicInst *) const;
792
793 // Congruence finding.
794 bool someEquivalentDominates(const Instruction *, const Instruction *) const;
795 Value *lookupOperandLeader(Value *) const;
796 CongruenceClass *getClassForExpression(const Expression *E) const;
797 void performCongruenceFinding(Instruction *, const Expression *);
798 void moveValueToNewCongruenceClass(Instruction *, const Expression *,
799 CongruenceClass *, CongruenceClass *);
800 void moveMemoryToNewCongruenceClass(Instruction *, MemoryAccess *,
801 CongruenceClass *, CongruenceClass *);
802 Value *getNextValueLeader(CongruenceClass *) const;
803 const MemoryAccess *getNextMemoryLeader(CongruenceClass *) const;
804 bool setMemoryClass(const MemoryAccess *From, CongruenceClass *To);
805 CongruenceClass *getMemoryClass(const MemoryAccess *MA) const;
806 const MemoryAccess *lookupMemoryLeader(const MemoryAccess *) const;
807 bool isMemoryAccessTOP(const MemoryAccess *) const;
808
809 // Ranking
810 unsigned int getRank(const Value *) const;
811 bool shouldSwapOperands(const Value *, const Value *) const;
812 bool shouldSwapOperandsForIntrinsic(const Value *, const Value *,
813 const IntrinsicInst *I) const;
814
815 // Reachability handling.
816 void updateReachableEdge(BasicBlock *, BasicBlock *);
817 void processOutgoingEdges(Instruction *, BasicBlock *);
818 Value *findConditionEquivalence(Value *) const;
819
820 // Elimination.
821 struct ValueDFS;
822 void convertClassToDFSOrdered(const CongruenceClass &,
823 SmallVectorImpl<ValueDFS> &,
824 DenseMap<const Value *, unsigned int> &,
825 SmallPtrSetImpl<Instruction *> &) const;
826 void convertClassToLoadsAndStores(const CongruenceClass &,
827 SmallVectorImpl<ValueDFS> &) const;
828
829 bool eliminateInstructions(Function &);
830 void replaceInstruction(Instruction *, Value *);
831 void markInstructionForDeletion(Instruction *);
832 void deleteInstructionsInBlock(BasicBlock *);
833 Value *findPHIOfOpsLeader(const Expression *, const Instruction *,
834 const BasicBlock *) const;
835
836 // Various instruction touch utilities
837 template <typename Map, typename KeyType>
838 void touchAndErase(Map &, const KeyType &);
839 void markUsersTouched(Value *);
840 void markMemoryUsersTouched(const MemoryAccess *);
841 void markMemoryDefTouched(const MemoryAccess *);
842 void markPredicateUsersTouched(Instruction *);
843 void markValueLeaderChangeTouched(CongruenceClass *CC);
844 void markMemoryLeaderChangeTouched(CongruenceClass *CC);
845 void markPhiOfOpsChanged(const Expression *E);
846 void addMemoryUsers(const MemoryAccess *To, MemoryAccess *U) const;
847 void addAdditionalUsers(Value *To, Value *User) const;
848 void addAdditionalUsers(ExprResult &Res, Instruction *User) const;
849
850 // Main loop of value numbering
851 void iterateTouchedInstructions();
852
853 // Utilities.
854 void cleanupTables();
855 std::pair<unsigned, unsigned> assignDFSNumbers(BasicBlock *, unsigned);
856 void updateProcessedCount(const Value *V);
857 void verifyMemoryCongruency() const;
858 void verifyIterationSettled(Function &F);
859 void verifyStoreExpressions() const;
860 bool singleReachablePHIPath(SmallPtrSet<const MemoryAccess *, 8> &,
861 const MemoryAccess *, const MemoryAccess *) const;
862 BasicBlock *getBlockForValue(Value *V) const;
863 void deleteExpression(const Expression *E) const;
864 MemoryUseOrDef *getMemoryAccess(const Instruction *) const;
865 MemoryPhi *getMemoryAccess(const BasicBlock *) const;
866 template <class T, class Range> T *getMinDFSOfRange(const Range &) const;
867
868 unsigned InstrToDFSNum(const Value *V) const {
869 assert(isa<Instruction>(V) && "This should not be used for MemoryAccesses");
870 return InstrDFS.lookup(Val: V);
871 }
872
873 unsigned InstrToDFSNum(const MemoryAccess *MA) const {
874 return MemoryToDFSNum(MA);
875 }
876
877 Value *InstrFromDFSNum(unsigned DFSNum) { return DFSToInstr[DFSNum]; }
878
879 // Given a MemoryAccess, return the relevant instruction DFS number. Note:
880 // This deliberately takes a value so it can be used with Use's, which will
881 // auto-convert to Value's but not to MemoryAccess's.
882 unsigned MemoryToDFSNum(const Value *MA) const {
883 assert(isa<MemoryAccess>(MA) &&
884 "This should not be used with instructions");
885 return isa<MemoryUseOrDef>(Val: MA)
886 ? InstrToDFSNum(V: cast<MemoryUseOrDef>(Val: MA)->getMemoryInst())
887 : InstrDFS.lookup(Val: MA);
888 }
889
890 bool isCycleFree(const Instruction *) const;
891 bool isBackedge(BasicBlock *From, BasicBlock *To) const;
892
893 // Debug counter info. When verifying, we have to reset the value numbering
894 // debug counter to the same state it started in to get the same results.
895 int64_t StartingVNCounter = 0;
896};
897
898} // end anonymous namespace
899
900template <typename T>
901static bool equalsLoadStoreHelper(const T &LHS, const Expression &RHS) {
902 if (!isa<LoadExpression>(Val: RHS) && !isa<StoreExpression>(Val: RHS))
903 return false;
904 return LHS.MemoryExpression::equals(RHS);
905}
906
907bool LoadExpression::equals(const Expression &Other) const {
908 return equalsLoadStoreHelper(LHS: *this, RHS: Other);
909}
910
911bool StoreExpression::equals(const Expression &Other) const {
912 if (!equalsLoadStoreHelper(LHS: *this, RHS: Other))
913 return false;
914 // Make sure that store vs store includes the value operand.
915 if (const auto *S = dyn_cast<StoreExpression>(Val: &Other))
916 if (getStoredValue() != S->getStoredValue())
917 return false;
918 return true;
919}
920
921// Determine if the edge From->To is a backedge
922bool NewGVN::isBackedge(BasicBlock *From, BasicBlock *To) const {
923 return From == To ||
924 RPOOrdering.lookup(Val: DT->getNode(BB: From)) >=
925 RPOOrdering.lookup(Val: DT->getNode(BB: To));
926}
927
928#ifndef NDEBUG
929static std::string getBlockName(const BasicBlock *B) {
930 return DOTGraphTraits<DOTFuncInfo *>::getSimpleNodeLabel(Node: B, nullptr);
931}
932#endif
933
934// Get a MemoryAccess for an instruction, fake or real.
935MemoryUseOrDef *NewGVN::getMemoryAccess(const Instruction *I) const {
936 auto *Result = MSSA->getMemoryAccess(I);
937 return Result ? Result : TempToMemory.lookup(Val: I);
938}
939
940// Get a MemoryPhi for a basic block. These are all real.
941MemoryPhi *NewGVN::getMemoryAccess(const BasicBlock *BB) const {
942 return MSSA->getMemoryAccess(BB);
943}
944
945// Get the basic block from an instruction/memory value.
946BasicBlock *NewGVN::getBlockForValue(Value *V) const {
947 if (auto *I = dyn_cast<Instruction>(Val: V)) {
948 auto *Parent = I->getParent();
949 if (Parent)
950 return Parent;
951 Parent = TempToBlock.lookup(Val: V);
952 assert(Parent && "Every fake instruction should have a block");
953 return Parent;
954 }
955
956 auto *MP = dyn_cast<MemoryPhi>(Val: V);
957 assert(MP && "Should have been an instruction or a MemoryPhi");
958 return MP->getBlock();
959}
960
961// Delete a definitely dead expression, so it can be reused by the expression
962// allocator. Some of these are not in creation functions, so we have to accept
963// const versions.
964void NewGVN::deleteExpression(const Expression *E) const {
965 assert(isa<BasicExpression>(E));
966 auto *BE = cast<BasicExpression>(Val: E);
967 const_cast<BasicExpression *>(BE)->deallocateOperands(Recycler&: ArgRecycler);
968 ExpressionAllocator.Deallocate(Ptr: E);
969}
970
971// If V is a predicateinfo copy, get the thing it is a copy of.
972static Value *getCopyOf(const Value *V) {
973 if (auto *II = dyn_cast<IntrinsicInst>(Val: V))
974 if (II->getIntrinsicID() == Intrinsic::ssa_copy)
975 return II->getOperand(i_nocapture: 0);
976 return nullptr;
977}
978
979// Return true if V is really PN, even accounting for predicateinfo copies.
980static bool isCopyOfPHI(const Value *V, const PHINode *PN) {
981 return V == PN || getCopyOf(V) == PN;
982}
983
984static bool isCopyOfAPHI(const Value *V) {
985 auto *CO = getCopyOf(V);
986 return CO && isa<PHINode>(Val: CO);
987}
988
989// Sort PHI Operands into a canonical order. What we use here is an RPO
990// order. The BlockInstRange numbers are generated in an RPO walk of the basic
991// blocks.
992void NewGVN::sortPHIOps(MutableArrayRef<ValPair> Ops) const {
993 llvm::sort(C&: Ops, Comp: [&](const ValPair &P1, const ValPair &P2) {
994 return BlockInstRange.lookup(Val: P1.second).first <
995 BlockInstRange.lookup(Val: P2.second).first;
996 });
997}
998
999// Return true if V is a value that will always be available (IE can
1000// be placed anywhere) in the function. We don't do globals here
1001// because they are often worse to put in place.
1002static bool alwaysAvailable(Value *V) {
1003 return isa<Constant>(Val: V) || isa<Argument>(Val: V);
1004}
1005
1006// Create a PHIExpression from an array of {incoming edge, value} pairs. I is
1007// the original instruction we are creating a PHIExpression for (but may not be
1008// a phi node). We require, as an invariant, that all the PHIOperands in the
1009// same block are sorted the same way. sortPHIOps will sort them into a
1010// canonical order.
1011PHIExpression *NewGVN::createPHIExpression(ArrayRef<ValPair> PHIOperands,
1012 const Instruction *I,
1013 BasicBlock *PHIBlock,
1014 bool &HasBackedge,
1015 bool &OriginalOpsConstant) const {
1016 unsigned NumOps = PHIOperands.size();
1017 auto *E = new (ExpressionAllocator) PHIExpression(NumOps, PHIBlock);
1018
1019 E->allocateOperands(Recycler&: ArgRecycler, Allocator&: ExpressionAllocator);
1020 E->setType(PHIOperands.begin()->first->getType());
1021 E->setOpcode(Instruction::PHI);
1022
1023 // Filter out unreachable phi operands.
1024 auto Filtered = make_filter_range(Range&: PHIOperands, Pred: [&](const ValPair &P) {
1025 auto *BB = P.second;
1026 if (auto *PHIOp = dyn_cast<PHINode>(Val: I))
1027 if (isCopyOfPHI(V: P.first, PN: PHIOp))
1028 return false;
1029 if (!ReachableEdges.count(V: {BB, PHIBlock}))
1030 return false;
1031 // Things in TOPClass are equivalent to everything.
1032 if (ValueToClass.lookup(Val: P.first) == TOPClass)
1033 return false;
1034 OriginalOpsConstant = OriginalOpsConstant && isa<Constant>(Val: P.first);
1035 HasBackedge = HasBackedge || isBackedge(From: BB, To: PHIBlock);
1036 return lookupOperandLeader(P.first) != I;
1037 });
1038 std::transform(first: Filtered.begin(), last: Filtered.end(), result: op_inserter(E),
1039 unary_op: [&](const ValPair &P) -> Value * {
1040 return lookupOperandLeader(P.first);
1041 });
1042 return E;
1043}
1044
1045// Set basic expression info (Arguments, type, opcode) for Expression
1046// E from Instruction I in block B.
1047bool NewGVN::setBasicExpressionInfo(Instruction *I, BasicExpression *E) const {
1048 bool AllConstant = true;
1049 if (auto *GEP = dyn_cast<GetElementPtrInst>(Val: I))
1050 E->setType(GEP->getSourceElementType());
1051 else
1052 E->setType(I->getType());
1053 E->setOpcode(I->getOpcode());
1054 E->allocateOperands(Recycler&: ArgRecycler, Allocator&: ExpressionAllocator);
1055
1056 // Transform the operand array into an operand leader array, and keep track of
1057 // whether all members are constant.
1058 std::transform(first: I->op_begin(), last: I->op_end(), result: op_inserter(E), unary_op: [&](Value *O) {
1059 auto Operand = lookupOperandLeader(O);
1060 AllConstant = AllConstant && isa<Constant>(Val: Operand);
1061 return Operand;
1062 });
1063
1064 return AllConstant;
1065}
1066
1067const Expression *NewGVN::createBinaryExpression(unsigned Opcode, Type *T,
1068 Value *Arg1, Value *Arg2,
1069 Instruction *I) const {
1070 auto *E = new (ExpressionAllocator) BasicExpression(2);
1071 // TODO: we need to remove context instruction after Value Tracking
1072 // can run without context instruction
1073 const SimplifyQuery Q = SQ.getWithInstruction(I);
1074
1075 E->setType(T);
1076 E->setOpcode(Opcode);
1077 E->allocateOperands(Recycler&: ArgRecycler, Allocator&: ExpressionAllocator);
1078 if (Instruction::isCommutative(Opcode)) {
1079 // Ensure that commutative instructions that only differ by a permutation
1080 // of their operands get the same value number by sorting the operand value
1081 // numbers. Since all commutative instructions have two operands it is more
1082 // efficient to sort by hand rather than using, say, std::sort.
1083 if (shouldSwapOperands(Arg1, Arg2))
1084 std::swap(a&: Arg1, b&: Arg2);
1085 }
1086 E->op_push_back(Arg: lookupOperandLeader(Arg1));
1087 E->op_push_back(Arg: lookupOperandLeader(Arg2));
1088
1089 Value *V = simplifyBinOp(Opcode, LHS: E->getOperand(N: 0), RHS: E->getOperand(N: 1), Q);
1090 if (auto Simplified = checkExprResults(E, I, V)) {
1091 addAdditionalUsers(Res&: Simplified, User: I);
1092 return Simplified.Expr;
1093 }
1094 return E;
1095}
1096
1097// Take a Value returned by simplification of Expression E/Instruction
1098// I, and see if it resulted in a simpler expression. If so, return
1099// that expression.
1100NewGVN::ExprResult NewGVN::checkExprResults(Expression *E, Instruction *I,
1101 Value *V) const {
1102 if (!V)
1103 return ExprResult::none();
1104
1105 if (auto *C = dyn_cast<Constant>(Val: V)) {
1106 if (I)
1107 LLVM_DEBUG(dbgs() << "Simplified " << *I << " to "
1108 << " constant " << *C << "\n");
1109 NumGVNOpsSimplified++;
1110 assert(isa<BasicExpression>(E) &&
1111 "We should always have had a basic expression here");
1112 deleteExpression(E);
1113 return ExprResult::some(Expr: createConstantExpression(C));
1114 } else if (isa<Argument>(Val: V) || isa<GlobalVariable>(Val: V)) {
1115 if (I)
1116 LLVM_DEBUG(dbgs() << "Simplified " << *I << " to "
1117 << " variable " << *V << "\n");
1118 deleteExpression(E);
1119 return ExprResult::some(Expr: createVariableExpression(V));
1120 }
1121
1122 CongruenceClass *CC = ValueToClass.lookup(Val: V);
1123 if (CC) {
1124 if (CC->getLeader() && CC->getLeader() != I) {
1125 return ExprResult::some(Expr: createVariableOrConstant(V: CC->getLeader()), ExtraDep: V);
1126 }
1127 if (CC->getDefiningExpr()) {
1128 if (I)
1129 LLVM_DEBUG(dbgs() << "Simplified " << *I << " to "
1130 << " expression " << *CC->getDefiningExpr() << "\n");
1131 NumGVNOpsSimplified++;
1132 deleteExpression(E);
1133 return ExprResult::some(Expr: CC->getDefiningExpr(), ExtraDep: V);
1134 }
1135 }
1136
1137 return ExprResult::none();
1138}
1139
1140// Create a value expression from the instruction I, replacing operands with
1141// their leaders.
1142
1143NewGVN::ExprResult NewGVN::createExpression(Instruction *I) const {
1144 auto *E = new (ExpressionAllocator) BasicExpression(I->getNumOperands());
1145 // TODO: we need to remove context instruction after Value Tracking
1146 // can run without context instruction
1147 const SimplifyQuery Q = SQ.getWithInstruction(I);
1148
1149 bool AllConstant = setBasicExpressionInfo(I, E);
1150
1151 if (I->isCommutative()) {
1152 // Ensure that commutative instructions that only differ by a permutation
1153 // of their operands get the same value number by sorting the operand value
1154 // numbers. Since all commutative instructions have two operands it is more
1155 // efficient to sort by hand rather than using, say, std::sort.
1156 assert(I->getNumOperands() == 2 && "Unsupported commutative instruction!");
1157 if (shouldSwapOperands(E->getOperand(N: 0), E->getOperand(N: 1)))
1158 E->swapOperands(First: 0, Second: 1);
1159 }
1160 // Perform simplification.
1161 if (auto *CI = dyn_cast<CmpInst>(Val: I)) {
1162 // Sort the operand value numbers so x<y and y>x get the same value
1163 // number.
1164 CmpInst::Predicate Predicate = CI->getPredicate();
1165 if (shouldSwapOperands(E->getOperand(N: 0), E->getOperand(N: 1))) {
1166 E->swapOperands(First: 0, Second: 1);
1167 Predicate = CmpInst::getSwappedPredicate(pred: Predicate);
1168 }
1169 E->setOpcode((CI->getOpcode() << 8) | Predicate);
1170 // TODO: 25% of our time is spent in simplifyCmpInst with pointer operands
1171 assert(I->getOperand(0)->getType() == I->getOperand(1)->getType() &&
1172 "Wrong types on cmp instruction");
1173 assert((E->getOperand(0)->getType() == I->getOperand(0)->getType() &&
1174 E->getOperand(1)->getType() == I->getOperand(1)->getType()));
1175 Value *V =
1176 simplifyCmpInst(Predicate, LHS: E->getOperand(N: 0), RHS: E->getOperand(N: 1), Q);
1177 if (auto Simplified = checkExprResults(E, I, V))
1178 return Simplified;
1179 } else if (isa<SelectInst>(Val: I)) {
1180 if (isa<Constant>(Val: E->getOperand(N: 0)) ||
1181 E->getOperand(N: 1) == E->getOperand(N: 2)) {
1182 assert(E->getOperand(1)->getType() == I->getOperand(1)->getType() &&
1183 E->getOperand(2)->getType() == I->getOperand(2)->getType());
1184 Value *V = simplifySelectInst(Cond: E->getOperand(N: 0), TrueVal: E->getOperand(N: 1),
1185 FalseVal: E->getOperand(N: 2), Q);
1186 if (auto Simplified = checkExprResults(E, I, V))
1187 return Simplified;
1188 }
1189 } else if (I->isBinaryOp()) {
1190 Value *V =
1191 simplifyBinOp(Opcode: E->getOpcode(), LHS: E->getOperand(N: 0), RHS: E->getOperand(N: 1), Q);
1192 if (auto Simplified = checkExprResults(E, I, V))
1193 return Simplified;
1194 } else if (auto *CI = dyn_cast<CastInst>(Val: I)) {
1195 Value *V =
1196 simplifyCastInst(CastOpc: CI->getOpcode(), Op: E->getOperand(N: 0), Ty: CI->getType(), Q);
1197 if (auto Simplified = checkExprResults(E, I, V))
1198 return Simplified;
1199 } else if (auto *GEPI = dyn_cast<GetElementPtrInst>(Val: I)) {
1200 Value *V = simplifyGEPInst(SrcTy: GEPI->getSourceElementType(), Ptr: *E->op_begin(),
1201 Indices: ArrayRef(std::next(x: E->op_begin()), E->op_end()),
1202 InBounds: GEPI->isInBounds(), Q);
1203 if (auto Simplified = checkExprResults(E, I, V))
1204 return Simplified;
1205 } else if (AllConstant) {
1206 // We don't bother trying to simplify unless all of the operands
1207 // were constant.
1208 // TODO: There are a lot of Simplify*'s we could call here, if we
1209 // wanted to. The original motivating case for this code was a
1210 // zext i1 false to i8, which we don't have an interface to
1211 // simplify (IE there is no SimplifyZExt).
1212
1213 SmallVector<Constant *, 8> C;
1214 for (Value *Arg : E->operands())
1215 C.emplace_back(Args: cast<Constant>(Val: Arg));
1216
1217 if (Value *V = ConstantFoldInstOperands(I, Ops: C, DL, TLI))
1218 if (auto Simplified = checkExprResults(E, I, V))
1219 return Simplified;
1220 }
1221 return ExprResult::some(Expr: E);
1222}
1223
1224const AggregateValueExpression *
1225NewGVN::createAggregateValueExpression(Instruction *I) const {
1226 if (auto *II = dyn_cast<InsertValueInst>(Val: I)) {
1227 auto *E = new (ExpressionAllocator)
1228 AggregateValueExpression(I->getNumOperands(), II->getNumIndices());
1229 setBasicExpressionInfo(I, E);
1230 E->allocateIntOperands(Allocator&: ExpressionAllocator);
1231 std::copy(first: II->idx_begin(), last: II->idx_end(), result: int_op_inserter(E));
1232 return E;
1233 } else if (auto *EI = dyn_cast<ExtractValueInst>(Val: I)) {
1234 auto *E = new (ExpressionAllocator)
1235 AggregateValueExpression(I->getNumOperands(), EI->getNumIndices());
1236 setBasicExpressionInfo(I: EI, E);
1237 E->allocateIntOperands(Allocator&: ExpressionAllocator);
1238 std::copy(first: EI->idx_begin(), last: EI->idx_end(), result: int_op_inserter(E));
1239 return E;
1240 }
1241 llvm_unreachable("Unhandled type of aggregate value operation");
1242}
1243
1244const DeadExpression *NewGVN::createDeadExpression() const {
1245 // DeadExpression has no arguments and all DeadExpression's are the same,
1246 // so we only need one of them.
1247 return SingletonDeadExpression;
1248}
1249
1250const VariableExpression *NewGVN::createVariableExpression(Value *V) const {
1251 auto *E = new (ExpressionAllocator) VariableExpression(V);
1252 E->setOpcode(V->getValueID());
1253 return E;
1254}
1255
1256const Expression *NewGVN::createVariableOrConstant(Value *V) const {
1257 if (auto *C = dyn_cast<Constant>(Val: V))
1258 return createConstantExpression(C);
1259 return createVariableExpression(V);
1260}
1261
1262const ConstantExpression *NewGVN::createConstantExpression(Constant *C) const {
1263 auto *E = new (ExpressionAllocator) ConstantExpression(C);
1264 E->setOpcode(C->getValueID());
1265 return E;
1266}
1267
1268const UnknownExpression *NewGVN::createUnknownExpression(Instruction *I) const {
1269 auto *E = new (ExpressionAllocator) UnknownExpression(I);
1270 E->setOpcode(I->getOpcode());
1271 return E;
1272}
1273
1274const CallExpression *
1275NewGVN::createCallExpression(CallInst *CI, const MemoryAccess *MA) const {
1276 // FIXME: Add operand bundles for calls.
1277 auto *E =
1278 new (ExpressionAllocator) CallExpression(CI->getNumOperands(), CI, MA);
1279 setBasicExpressionInfo(I: CI, E);
1280 if (CI->isCommutative()) {
1281 // Ensure that commutative intrinsics that only differ by a permutation
1282 // of their operands get the same value number by sorting the operand value
1283 // numbers.
1284 assert(CI->getNumOperands() >= 2 && "Unsupported commutative intrinsic!");
1285 if (shouldSwapOperands(E->getOperand(N: 0), E->getOperand(N: 1)))
1286 E->swapOperands(First: 0, Second: 1);
1287 }
1288 return E;
1289}
1290
1291// Return true if some equivalent of instruction Inst dominates instruction U.
1292bool NewGVN::someEquivalentDominates(const Instruction *Inst,
1293 const Instruction *U) const {
1294 auto *CC = ValueToClass.lookup(Val: Inst);
1295 // This must be an instruction because we are only called from phi nodes
1296 // in the case that the value it needs to check against is an instruction.
1297
1298 // The most likely candidates for dominance are the leader and the next leader.
1299 // The leader or nextleader will dominate in all cases where there is an
1300 // equivalent that is higher up in the dom tree.
1301 // We can't *only* check them, however, because the
1302 // dominator tree could have an infinite number of non-dominating siblings
1303 // with instructions that are in the right congruence class.
1304 // A
1305 // B C D E F G
1306 // |
1307 // H
1308 // Instruction U could be in H, with equivalents in every other sibling.
1309 // Depending on the rpo order picked, the leader could be the equivalent in
1310 // any of these siblings.
1311 if (!CC)
1312 return false;
1313 if (alwaysAvailable(V: CC->getLeader()))
1314 return true;
1315 if (DT->dominates(Def: cast<Instruction>(Val: CC->getLeader()), User: U))
1316 return true;
1317 if (CC->getNextLeader().first &&
1318 DT->dominates(Def: cast<Instruction>(Val: CC->getNextLeader().first), User: U))
1319 return true;
1320 return llvm::any_of(Range&: *CC, P: [&](const Value *Member) {
1321 return Member != CC->getLeader() &&
1322 DT->dominates(Def: cast<Instruction>(Val: Member), User: U);
1323 });
1324}
1325
1326// See if we have a congruence class and leader for this operand, and if so,
1327// return it. Otherwise, return the operand itself.
1328Value *NewGVN::lookupOperandLeader(Value *V) const {
1329 CongruenceClass *CC = ValueToClass.lookup(Val: V);
1330 if (CC) {
1331 // Everything in TOP is represented by poison, as it can be any value.
1332 // We do have to make sure we get the type right though, so we can't set the
1333 // RepLeader to poison.
1334 if (CC == TOPClass)
1335 return PoisonValue::get(T: V->getType());
1336 return CC->getStoredValue() ? CC->getStoredValue() : CC->getLeader();
1337 }
1338
1339 return V;
1340}
1341
1342const MemoryAccess *NewGVN::lookupMemoryLeader(const MemoryAccess *MA) const {
1343 auto *CC = getMemoryClass(MA);
1344 assert(CC->getMemoryLeader() &&
1345 "Every MemoryAccess should be mapped to a congruence class with a "
1346 "representative memory access");
1347 return CC->getMemoryLeader();
1348}
1349
1350// Return true if the MemoryAccess is really equivalent to everything. This is
1351// equivalent to the lattice value "TOP" in most lattices. This is the initial
1352// state of all MemoryAccesses.
1353bool NewGVN::isMemoryAccessTOP(const MemoryAccess *MA) const {
1354 return getMemoryClass(MA) == TOPClass;
1355}
1356
1357LoadExpression *NewGVN::createLoadExpression(Type *LoadType, Value *PointerOp,
1358 LoadInst *LI,
1359 const MemoryAccess *MA) const {
1360 auto *E =
1361 new (ExpressionAllocator) LoadExpression(1, LI, lookupMemoryLeader(MA));
1362 E->allocateOperands(Recycler&: ArgRecycler, Allocator&: ExpressionAllocator);
1363 E->setType(LoadType);
1364
1365 // Give store and loads same opcode so they value number together.
1366 E->setOpcode(0);
1367 E->op_push_back(Arg: PointerOp);
1368
1369 // TODO: Value number heap versions. We may be able to discover
1370 // things alias analysis can't on it's own (IE that a store and a
1371 // load have the same value, and thus, it isn't clobbering the load).
1372 return E;
1373}
1374
1375const StoreExpression *
1376NewGVN::createStoreExpression(StoreInst *SI, const MemoryAccess *MA) const {
1377 auto *StoredValueLeader = lookupOperandLeader(V: SI->getValueOperand());
1378 auto *E = new (ExpressionAllocator)
1379 StoreExpression(SI->getNumOperands(), SI, StoredValueLeader, MA);
1380 E->allocateOperands(Recycler&: ArgRecycler, Allocator&: ExpressionAllocator);
1381 E->setType(SI->getValueOperand()->getType());
1382
1383 // Give store and loads same opcode so they value number together.
1384 E->setOpcode(0);
1385 E->op_push_back(Arg: lookupOperandLeader(V: SI->getPointerOperand()));
1386
1387 // TODO: Value number heap versions. We may be able to discover
1388 // things alias analysis can't on it's own (IE that a store and a
1389 // load have the same value, and thus, it isn't clobbering the load).
1390 return E;
1391}
1392
1393const Expression *NewGVN::performSymbolicStoreEvaluation(Instruction *I) const {
1394 // Unlike loads, we never try to eliminate stores, so we do not check if they
1395 // are simple and avoid value numbering them.
1396 auto *SI = cast<StoreInst>(Val: I);
1397 auto *StoreAccess = getMemoryAccess(I: SI);
1398 // Get the expression, if any, for the RHS of the MemoryDef.
1399 const MemoryAccess *StoreRHS = StoreAccess->getDefiningAccess();
1400 if (EnableStoreRefinement)
1401 StoreRHS = MSSAWalker->getClobberingMemoryAccess(MA: StoreAccess);
1402 // If we bypassed the use-def chains, make sure we add a use.
1403 StoreRHS = lookupMemoryLeader(MA: StoreRHS);
1404 if (StoreRHS != StoreAccess->getDefiningAccess())
1405 addMemoryUsers(To: StoreRHS, U: StoreAccess);
1406 // If we are defined by ourselves, use the live on entry def.
1407 if (StoreRHS == StoreAccess)
1408 StoreRHS = MSSA->getLiveOnEntryDef();
1409
1410 if (SI->isSimple()) {
1411 // See if we are defined by a previous store expression, it already has a
1412 // value, and it's the same value as our current store. FIXME: Right now, we
1413 // only do this for simple stores, we should expand to cover memcpys, etc.
1414 const auto *LastStore = createStoreExpression(SI, MA: StoreRHS);
1415 const auto *LastCC = ExpressionToClass.lookup(Val: LastStore);
1416 // We really want to check whether the expression we matched was a store. No
1417 // easy way to do that. However, we can check that the class we found has a
1418 // store, which, assuming the value numbering state is not corrupt, is
1419 // sufficient, because we must also be equivalent to that store's expression
1420 // for it to be in the same class as the load.
1421 if (LastCC && LastCC->getStoredValue() == LastStore->getStoredValue())
1422 return LastStore;
1423 // Also check if our value operand is defined by a load of the same memory
1424 // location, and the memory state is the same as it was then (otherwise, it
1425 // could have been overwritten later. See test32 in
1426 // transforms/DeadStoreElimination/simple.ll).
1427 if (auto *LI = dyn_cast<LoadInst>(Val: LastStore->getStoredValue()))
1428 if ((lookupOperandLeader(V: LI->getPointerOperand()) ==
1429 LastStore->getOperand(N: 0)) &&
1430 (lookupMemoryLeader(MA: getMemoryAccess(I: LI)->getDefiningAccess()) ==
1431 StoreRHS))
1432 return LastStore;
1433 deleteExpression(E: LastStore);
1434 }
1435
1436 // If the store is not equivalent to anything, value number it as a store that
1437 // produces a unique memory state (instead of using it's MemoryUse, we use
1438 // it's MemoryDef).
1439 return createStoreExpression(SI, MA: StoreAccess);
1440}
1441
1442// See if we can extract the value of a loaded pointer from a load, a store, or
1443// a memory instruction.
1444const Expression *
1445NewGVN::performSymbolicLoadCoercion(Type *LoadType, Value *LoadPtr,
1446 LoadInst *LI, Instruction *DepInst,
1447 MemoryAccess *DefiningAccess) const {
1448 assert((!LI || LI->isSimple()) && "Not a simple load");
1449 if (auto *DepSI = dyn_cast<StoreInst>(Val: DepInst)) {
1450 // Can't forward from non-atomic to atomic without violating memory model.
1451 // Also don't need to coerce if they are the same type, we will just
1452 // propagate.
1453 if (LI->isAtomic() > DepSI->isAtomic() ||
1454 LoadType == DepSI->getValueOperand()->getType())
1455 return nullptr;
1456 int Offset = analyzeLoadFromClobberingStore(LoadTy: LoadType, LoadPtr, DepSI, DL);
1457 if (Offset >= 0) {
1458 if (auto *C = dyn_cast<Constant>(
1459 Val: lookupOperandLeader(V: DepSI->getValueOperand()))) {
1460 if (Constant *Res = getConstantValueForLoad(SrcVal: C, Offset, LoadTy: LoadType, DL)) {
1461 LLVM_DEBUG(dbgs() << "Coercing load from store " << *DepSI
1462 << " to constant " << *Res << "\n");
1463 return createConstantExpression(C: Res);
1464 }
1465 }
1466 }
1467 } else if (auto *DepLI = dyn_cast<LoadInst>(Val: DepInst)) {
1468 // Can't forward from non-atomic to atomic without violating memory model.
1469 if (LI->isAtomic() > DepLI->isAtomic())
1470 return nullptr;
1471 int Offset = analyzeLoadFromClobberingLoad(LoadTy: LoadType, LoadPtr, DepLI, DL);
1472 if (Offset >= 0) {
1473 // We can coerce a constant load into a load.
1474 if (auto *C = dyn_cast<Constant>(Val: lookupOperandLeader(V: DepLI)))
1475 if (auto *PossibleConstant =
1476 getConstantValueForLoad(SrcVal: C, Offset, LoadTy: LoadType, DL)) {
1477 LLVM_DEBUG(dbgs() << "Coercing load from load " << *LI
1478 << " to constant " << *PossibleConstant << "\n");
1479 return createConstantExpression(C: PossibleConstant);
1480 }
1481 }
1482 } else if (auto *DepMI = dyn_cast<MemIntrinsic>(Val: DepInst)) {
1483 int Offset = analyzeLoadFromClobberingMemInst(LoadTy: LoadType, LoadPtr, DepMI, DL);
1484 if (Offset >= 0) {
1485 if (auto *PossibleConstant =
1486 getConstantMemInstValueForLoad(SrcInst: DepMI, Offset, LoadTy: LoadType, DL)) {
1487 LLVM_DEBUG(dbgs() << "Coercing load from meminst " << *DepMI
1488 << " to constant " << *PossibleConstant << "\n");
1489 return createConstantExpression(C: PossibleConstant);
1490 }
1491 }
1492 }
1493
1494 // All of the below are only true if the loaded pointer is produced
1495 // by the dependent instruction.
1496 if (LoadPtr != lookupOperandLeader(V: DepInst) &&
1497 !AA->isMustAlias(V1: LoadPtr, V2: DepInst))
1498 return nullptr;
1499 // If this load really doesn't depend on anything, then we must be loading an
1500 // undef value. This can happen when loading for a fresh allocation with no
1501 // intervening stores, for example. Note that this is only true in the case
1502 // that the result of the allocation is pointer equal to the load ptr.
1503 if (isa<AllocaInst>(Val: DepInst)) {
1504 return createConstantExpression(C: UndefValue::get(T: LoadType));
1505 }
1506 // If this load occurs either right after a lifetime begin,
1507 // then the loaded value is undefined.
1508 else if (auto *II = dyn_cast<IntrinsicInst>(Val: DepInst)) {
1509 if (II->getIntrinsicID() == Intrinsic::lifetime_start)
1510 return createConstantExpression(C: UndefValue::get(T: LoadType));
1511 } else if (auto *InitVal =
1512 getInitialValueOfAllocation(V: DepInst, TLI, Ty: LoadType))
1513 return createConstantExpression(C: InitVal);
1514
1515 return nullptr;
1516}
1517
1518const Expression *NewGVN::performSymbolicLoadEvaluation(Instruction *I) const {
1519 auto *LI = cast<LoadInst>(Val: I);
1520
1521 // We can eliminate in favor of non-simple loads, but we won't be able to
1522 // eliminate the loads themselves.
1523 if (!LI->isSimple())
1524 return nullptr;
1525
1526 Value *LoadAddressLeader = lookupOperandLeader(V: LI->getPointerOperand());
1527 // Load of undef is UB.
1528 if (isa<UndefValue>(Val: LoadAddressLeader))
1529 return createConstantExpression(C: PoisonValue::get(T: LI->getType()));
1530 MemoryAccess *OriginalAccess = getMemoryAccess(I);
1531 MemoryAccess *DefiningAccess =
1532 MSSAWalker->getClobberingMemoryAccess(MA: OriginalAccess);
1533
1534 if (!MSSA->isLiveOnEntryDef(MA: DefiningAccess)) {
1535 if (auto *MD = dyn_cast<MemoryDef>(Val: DefiningAccess)) {
1536 Instruction *DefiningInst = MD->getMemoryInst();
1537 // If the defining instruction is not reachable, replace with poison.
1538 if (!ReachableBlocks.count(Ptr: DefiningInst->getParent()))
1539 return createConstantExpression(C: PoisonValue::get(T: LI->getType()));
1540 // This will handle stores and memory insts. We only do if it the
1541 // defining access has a different type, or it is a pointer produced by
1542 // certain memory operations that cause the memory to have a fixed value
1543 // (IE things like calloc).
1544 if (const auto *CoercionResult =
1545 performSymbolicLoadCoercion(LoadType: LI->getType(), LoadPtr: LoadAddressLeader, LI,
1546 DepInst: DefiningInst, DefiningAccess))
1547 return CoercionResult;
1548 }
1549 }
1550
1551 const auto *LE = createLoadExpression(LoadType: LI->getType(), PointerOp: LoadAddressLeader, LI,
1552 MA: DefiningAccess);
1553 // If our MemoryLeader is not our defining access, add a use to the
1554 // MemoryLeader, so that we get reprocessed when it changes.
1555 if (LE->getMemoryLeader() != DefiningAccess)
1556 addMemoryUsers(To: LE->getMemoryLeader(), U: OriginalAccess);
1557 return LE;
1558}
1559
1560NewGVN::ExprResult
1561NewGVN::performSymbolicPredicateInfoEvaluation(IntrinsicInst *I) const {
1562 auto *PI = PredInfo->getPredicateInfoFor(V: I);
1563 if (!PI)
1564 return ExprResult::none();
1565
1566 LLVM_DEBUG(dbgs() << "Found predicate info from instruction !\n");
1567
1568 const std::optional<PredicateConstraint> &Constraint = PI->getConstraint();
1569 if (!Constraint)
1570 return ExprResult::none();
1571
1572 CmpInst::Predicate Predicate = Constraint->Predicate;
1573 Value *CmpOp0 = I->getOperand(i_nocapture: 0);
1574 Value *CmpOp1 = Constraint->OtherOp;
1575
1576 Value *FirstOp = lookupOperandLeader(V: CmpOp0);
1577 Value *SecondOp = lookupOperandLeader(V: CmpOp1);
1578 Value *AdditionallyUsedValue = CmpOp0;
1579
1580 // Sort the ops.
1581 if (shouldSwapOperandsForIntrinsic(FirstOp, SecondOp, I)) {
1582 std::swap(a&: FirstOp, b&: SecondOp);
1583 Predicate = CmpInst::getSwappedPredicate(pred: Predicate);
1584 AdditionallyUsedValue = CmpOp1;
1585 }
1586
1587 if (Predicate == CmpInst::ICMP_EQ)
1588 return ExprResult::some(Expr: createVariableOrConstant(V: FirstOp),
1589 ExtraDep: AdditionallyUsedValue, PredDep: PI);
1590
1591 // Handle the special case of floating point.
1592 if (Predicate == CmpInst::FCMP_OEQ && isa<ConstantFP>(Val: FirstOp) &&
1593 !cast<ConstantFP>(Val: FirstOp)->isZero())
1594 return ExprResult::some(Expr: createConstantExpression(C: cast<Constant>(Val: FirstOp)),
1595 ExtraDep: AdditionallyUsedValue, PredDep: PI);
1596
1597 return ExprResult::none();
1598}
1599
1600// Evaluate read only and pure calls, and create an expression result.
1601NewGVN::ExprResult NewGVN::performSymbolicCallEvaluation(Instruction *I) const {
1602 auto *CI = cast<CallInst>(Val: I);
1603 if (auto *II = dyn_cast<IntrinsicInst>(Val: I)) {
1604 // Intrinsics with the returned attribute are copies of arguments.
1605 if (auto *ReturnedValue = II->getReturnedArgOperand()) {
1606 if (II->getIntrinsicID() == Intrinsic::ssa_copy)
1607 if (auto Res = performSymbolicPredicateInfoEvaluation(I: II))
1608 return Res;
1609 return ExprResult::some(Expr: createVariableOrConstant(V: ReturnedValue));
1610 }
1611 }
1612
1613 // FIXME: Currently the calls which may access the thread id may
1614 // be considered as not accessing the memory. But this is
1615 // problematic for coroutines, since coroutines may resume in a
1616 // different thread. So we disable the optimization here for the
1617 // correctness. However, it may block many other correct
1618 // optimizations. Revert this one when we detect the memory
1619 // accessing kind more precisely.
1620 if (CI->getFunction()->isPresplitCoroutine())
1621 return ExprResult::none();
1622
1623 // Do not combine convergent calls since they implicitly depend on the set of
1624 // threads that is currently executing, and they might be in different basic
1625 // blocks.
1626 if (CI->isConvergent())
1627 return ExprResult::none();
1628
1629 if (AA->doesNotAccessMemory(Call: CI)) {
1630 return ExprResult::some(
1631 Expr: createCallExpression(CI, MA: TOPClass->getMemoryLeader()));
1632 } else if (AA->onlyReadsMemory(Call: CI)) {
1633 if (auto *MA = MSSA->getMemoryAccess(I: CI)) {
1634 auto *DefiningAccess = MSSAWalker->getClobberingMemoryAccess(MA);
1635 return ExprResult::some(Expr: createCallExpression(CI, MA: DefiningAccess));
1636 } else // MSSA determined that CI does not access memory.
1637 return ExprResult::some(
1638 Expr: createCallExpression(CI, MA: TOPClass->getMemoryLeader()));
1639 }
1640 return ExprResult::none();
1641}
1642
1643// Retrieve the memory class for a given MemoryAccess.
1644CongruenceClass *NewGVN::getMemoryClass(const MemoryAccess *MA) const {
1645 auto *Result = MemoryAccessToClass.lookup(Val: MA);
1646 assert(Result && "Should have found memory class");
1647 return Result;
1648}
1649
1650// Update the MemoryAccess equivalence table to say that From is equal to To,
1651// and return true if this is different from what already existed in the table.
1652bool NewGVN::setMemoryClass(const MemoryAccess *From,
1653 CongruenceClass *NewClass) {
1654 assert(NewClass &&
1655 "Every MemoryAccess should be getting mapped to a non-null class");
1656 LLVM_DEBUG(dbgs() << "Setting " << *From);
1657 LLVM_DEBUG(dbgs() << " equivalent to congruence class ");
1658 LLVM_DEBUG(dbgs() << NewClass->getID()
1659 << " with current MemoryAccess leader ");
1660 LLVM_DEBUG(dbgs() << *NewClass->getMemoryLeader() << "\n");
1661
1662 auto LookupResult = MemoryAccessToClass.find(Val: From);
1663 bool Changed = false;
1664 // If it's already in the table, see if the value changed.
1665 if (LookupResult != MemoryAccessToClass.end()) {
1666 auto *OldClass = LookupResult->second;
1667 if (OldClass != NewClass) {
1668 // If this is a phi, we have to handle memory member updates.
1669 if (auto *MP = dyn_cast<MemoryPhi>(Val: From)) {
1670 OldClass->memory_erase(M: MP);
1671 NewClass->memory_insert(M: MP);
1672 // This may have killed the class if it had no non-memory members
1673 if (OldClass->getMemoryLeader() == From) {
1674 if (OldClass->definesNoMemory()) {
1675 OldClass->setMemoryLeader(nullptr);
1676 } else {
1677 OldClass->setMemoryLeader(getNextMemoryLeader(OldClass));
1678 LLVM_DEBUG(dbgs() << "Memory class leader change for class "
1679 << OldClass->getID() << " to "
1680 << *OldClass->getMemoryLeader()
1681 << " due to removal of a memory member " << *From
1682 << "\n");
1683 markMemoryLeaderChangeTouched(CC: OldClass);
1684 }
1685 }
1686 }
1687 // It wasn't equivalent before, and now it is.
1688 LookupResult->second = NewClass;
1689 Changed = true;
1690 }
1691 }
1692
1693 return Changed;
1694}
1695
1696// Determine if a instruction is cycle-free. That means the values in the
1697// instruction don't depend on any expressions that can change value as a result
1698// of the instruction. For example, a non-cycle free instruction would be v =
1699// phi(0, v+1).
1700bool NewGVN::isCycleFree(const Instruction *I) const {
1701 // In order to compute cycle-freeness, we do SCC finding on the instruction,
1702 // and see what kind of SCC it ends up in. If it is a singleton, it is
1703 // cycle-free. If it is not in a singleton, it is only cycle free if the
1704 // other members are all phi nodes (as they do not compute anything, they are
1705 // copies).
1706 auto ICS = InstCycleState.lookup(Val: I);
1707 if (ICS == ICS_Unknown) {
1708 SCCFinder.Start(Start: I);
1709 auto &SCC = SCCFinder.getComponentFor(V: I);
1710 // It's cycle free if it's size 1 or the SCC is *only* phi nodes.
1711 if (SCC.size() == 1)
1712 InstCycleState.insert(KV: {I, ICS_CycleFree});
1713 else {
1714 bool AllPhis = llvm::all_of(Range: SCC, P: [](const Value *V) {
1715 return isa<PHINode>(Val: V) || isCopyOfAPHI(V);
1716 });
1717 ICS = AllPhis ? ICS_CycleFree : ICS_Cycle;
1718 for (const auto *Member : SCC)
1719 if (auto *MemberPhi = dyn_cast<PHINode>(Val: Member))
1720 InstCycleState.insert(KV: {MemberPhi, ICS});
1721 }
1722 }
1723 if (ICS == ICS_Cycle)
1724 return false;
1725 return true;
1726}
1727
1728// Evaluate PHI nodes symbolically and create an expression result.
1729const Expression *
1730NewGVN::performSymbolicPHIEvaluation(ArrayRef<ValPair> PHIOps,
1731 Instruction *I,
1732 BasicBlock *PHIBlock) const {
1733 // True if one of the incoming phi edges is a backedge.
1734 bool HasBackedge = false;
1735 // All constant tracks the state of whether all the *original* phi operands
1736 // This is really shorthand for "this phi cannot cycle due to forward
1737 // change in value of the phi is guaranteed not to later change the value of
1738 // the phi. IE it can't be v = phi(undef, v+1)
1739 bool OriginalOpsConstant = true;
1740 auto *E = cast<PHIExpression>(Val: createPHIExpression(
1741 PHIOperands: PHIOps, I, PHIBlock, HasBackedge, OriginalOpsConstant));
1742 // We match the semantics of SimplifyPhiNode from InstructionSimplify here.
1743 // See if all arguments are the same.
1744 // We track if any were undef because they need special handling.
1745 bool HasUndef = false, HasPoison = false;
1746 auto Filtered = make_filter_range(Range: E->operands(), Pred: [&](Value *Arg) {
1747 if (isa<PoisonValue>(Val: Arg)) {
1748 HasPoison = true;
1749 return false;
1750 }
1751 if (isa<UndefValue>(Val: Arg)) {
1752 HasUndef = true;
1753 return false;
1754 }
1755 return true;
1756 });
1757 // If we are left with no operands, it's dead.
1758 if (Filtered.empty()) {
1759 // If it has undef or poison at this point, it means there are no-non-undef
1760 // arguments, and thus, the value of the phi node must be undef.
1761 if (HasUndef) {
1762 LLVM_DEBUG(
1763 dbgs() << "PHI Node " << *I
1764 << " has no non-undef arguments, valuing it as undef\n");
1765 return createConstantExpression(C: UndefValue::get(T: I->getType()));
1766 }
1767 if (HasPoison) {
1768 LLVM_DEBUG(
1769 dbgs() << "PHI Node " << *I
1770 << " has no non-poison arguments, valuing it as poison\n");
1771 return createConstantExpression(C: PoisonValue::get(T: I->getType()));
1772 }
1773
1774 LLVM_DEBUG(dbgs() << "No arguments of PHI node " << *I << " are live\n");
1775 deleteExpression(E);
1776 return createDeadExpression();
1777 }
1778 Value *AllSameValue = *(Filtered.begin());
1779 ++Filtered.begin();
1780 // Can't use std::equal here, sadly, because filter.begin moves.
1781 if (llvm::all_of(Range&: Filtered, P: [&](Value *Arg) { return Arg == AllSameValue; })) {
1782 // Can't fold phi(undef, X) -> X unless X can't be poison (thus X is undef
1783 // in the worst case).
1784 if (HasUndef && !isGuaranteedNotToBePoison(V: AllSameValue, AC, CtxI: nullptr, DT))
1785 return E;
1786
1787 // In LLVM's non-standard representation of phi nodes, it's possible to have
1788 // phi nodes with cycles (IE dependent on other phis that are .... dependent
1789 // on the original phi node), especially in weird CFG's where some arguments
1790 // are unreachable, or uninitialized along certain paths. This can cause
1791 // infinite loops during evaluation. We work around this by not trying to
1792 // really evaluate them independently, but instead using a variable
1793 // expression to say if one is equivalent to the other.
1794 // We also special case undef/poison, so that if we have an undef, we can't
1795 // use the common value unless it dominates the phi block.
1796 if (HasPoison || HasUndef) {
1797 // If we have undef and at least one other value, this is really a
1798 // multivalued phi, and we need to know if it's cycle free in order to
1799 // evaluate whether we can ignore the undef. The other parts of this are
1800 // just shortcuts. If there is no backedge, or all operands are
1801 // constants, it also must be cycle free.
1802 if (HasBackedge && !OriginalOpsConstant &&
1803 !isa<UndefValue>(Val: AllSameValue) && !isCycleFree(I))
1804 return E;
1805
1806 // Only have to check for instructions
1807 if (auto *AllSameInst = dyn_cast<Instruction>(Val: AllSameValue))
1808 if (!someEquivalentDominates(Inst: AllSameInst, U: I))
1809 return E;
1810 }
1811 // Can't simplify to something that comes later in the iteration.
1812 // Otherwise, when and if it changes congruence class, we will never catch
1813 // up. We will always be a class behind it.
1814 if (isa<Instruction>(Val: AllSameValue) &&
1815 InstrToDFSNum(V: AllSameValue) > InstrToDFSNum(V: I))
1816 return E;
1817 NumGVNPhisAllSame++;
1818 LLVM_DEBUG(dbgs() << "Simplified PHI node " << *I << " to " << *AllSameValue
1819 << "\n");
1820 deleteExpression(E);
1821 return createVariableOrConstant(V: AllSameValue);
1822 }
1823 return E;
1824}
1825
1826const Expression *
1827NewGVN::performSymbolicAggrValueEvaluation(Instruction *I) const {
1828 if (auto *EI = dyn_cast<ExtractValueInst>(Val: I)) {
1829 auto *WO = dyn_cast<WithOverflowInst>(Val: EI->getAggregateOperand());
1830 if (WO && EI->getNumIndices() == 1 && *EI->idx_begin() == 0)
1831 // EI is an extract from one of our with.overflow intrinsics. Synthesize
1832 // a semantically equivalent expression instead of an extract value
1833 // expression.
1834 return createBinaryExpression(Opcode: WO->getBinaryOp(), T: EI->getType(),
1835 Arg1: WO->getLHS(), Arg2: WO->getRHS(), I);
1836 }
1837
1838 return createAggregateValueExpression(I);
1839}
1840
1841NewGVN::ExprResult NewGVN::performSymbolicCmpEvaluation(Instruction *I) const {
1842 assert(isa<CmpInst>(I) && "Expected a cmp instruction.");
1843
1844 auto *CI = cast<CmpInst>(Val: I);
1845 // See if our operands are equal to those of a previous predicate, and if so,
1846 // if it implies true or false.
1847 auto Op0 = lookupOperandLeader(V: CI->getOperand(i_nocapture: 0));
1848 auto Op1 = lookupOperandLeader(V: CI->getOperand(i_nocapture: 1));
1849 auto OurPredicate = CI->getPredicate();
1850 if (shouldSwapOperands(Op0, Op1)) {
1851 std::swap(a&: Op0, b&: Op1);
1852 OurPredicate = CI->getSwappedPredicate();
1853 }
1854
1855 // Avoid processing the same info twice.
1856 const PredicateBase *LastPredInfo = nullptr;
1857 // See if we know something about the comparison itself, like it is the target
1858 // of an assume.
1859 auto *CmpPI = PredInfo->getPredicateInfoFor(V: I);
1860 if (isa_and_nonnull<PredicateAssume>(Val: CmpPI))
1861 return ExprResult::some(
1862 Expr: createConstantExpression(C: ConstantInt::getTrue(Ty: CI->getType())));
1863
1864 if (Op0 == Op1) {
1865 // This condition does not depend on predicates, no need to add users
1866 if (CI->isTrueWhenEqual())
1867 return ExprResult::some(
1868 Expr: createConstantExpression(C: ConstantInt::getTrue(Ty: CI->getType())));
1869 else if (CI->isFalseWhenEqual())
1870 return ExprResult::some(
1871 Expr: createConstantExpression(C: ConstantInt::getFalse(Ty: CI->getType())));
1872 }
1873
1874 // NOTE: Because we are comparing both operands here and below, and using
1875 // previous comparisons, we rely on fact that predicateinfo knows to mark
1876 // comparisons that use renamed operands as users of the earlier comparisons.
1877 // It is *not* enough to just mark predicateinfo renamed operands as users of
1878 // the earlier comparisons, because the *other* operand may have changed in a
1879 // previous iteration.
1880 // Example:
1881 // icmp slt %a, %b
1882 // %b.0 = ssa.copy(%b)
1883 // false branch:
1884 // icmp slt %c, %b.0
1885
1886 // %c and %a may start out equal, and thus, the code below will say the second
1887 // %icmp is false. c may become equal to something else, and in that case the
1888 // %second icmp *must* be reexamined, but would not if only the renamed
1889 // %operands are considered users of the icmp.
1890
1891 // *Currently* we only check one level of comparisons back, and only mark one
1892 // level back as touched when changes happen. If you modify this code to look
1893 // back farther through comparisons, you *must* mark the appropriate
1894 // comparisons as users in PredicateInfo.cpp, or you will cause bugs. See if
1895 // we know something just from the operands themselves
1896
1897 // See if our operands have predicate info, so that we may be able to derive
1898 // something from a previous comparison.
1899 for (const auto &Op : CI->operands()) {
1900 auto *PI = PredInfo->getPredicateInfoFor(V: Op);
1901 if (const auto *PBranch = dyn_cast_or_null<PredicateBranch>(Val: PI)) {
1902 if (PI == LastPredInfo)
1903 continue;
1904 LastPredInfo = PI;
1905 // In phi of ops cases, we may have predicate info that we are evaluating
1906 // in a different context.
1907 if (!DT->dominates(A: PBranch->To, B: I->getParent()))
1908 continue;
1909 // TODO: Along the false edge, we may know more things too, like
1910 // icmp of
1911 // same operands is false.
1912 // TODO: We only handle actual comparison conditions below, not
1913 // and/or.
1914 auto *BranchCond = dyn_cast<CmpInst>(Val: PBranch->Condition);
1915 if (!BranchCond)
1916 continue;
1917 auto *BranchOp0 = lookupOperandLeader(V: BranchCond->getOperand(i_nocapture: 0));
1918 auto *BranchOp1 = lookupOperandLeader(V: BranchCond->getOperand(i_nocapture: 1));
1919 auto BranchPredicate = BranchCond->getPredicate();
1920 if (shouldSwapOperands(BranchOp0, BranchOp1)) {
1921 std::swap(a&: BranchOp0, b&: BranchOp1);
1922 BranchPredicate = BranchCond->getSwappedPredicate();
1923 }
1924 if (BranchOp0 == Op0 && BranchOp1 == Op1) {
1925 if (PBranch->TrueEdge) {
1926 // If we know the previous predicate is true and we are in the true
1927 // edge then we may be implied true or false.
1928 if (CmpInst::isImpliedTrueByMatchingCmp(Pred1: BranchPredicate,
1929 Pred2: OurPredicate)) {
1930 return ExprResult::some(
1931 Expr: createConstantExpression(C: ConstantInt::getTrue(Ty: CI->getType())),
1932 PredDep: PI);
1933 }
1934
1935 if (CmpInst::isImpliedFalseByMatchingCmp(Pred1: BranchPredicate,
1936 Pred2: OurPredicate)) {
1937 return ExprResult::some(
1938 Expr: createConstantExpression(C: ConstantInt::getFalse(Ty: CI->getType())),
1939 PredDep: PI);
1940 }
1941 } else {
1942 // Just handle the ne and eq cases, where if we have the same
1943 // operands, we may know something.
1944 if (BranchPredicate == OurPredicate) {
1945 // Same predicate, same ops,we know it was false, so this is false.
1946 return ExprResult::some(
1947 Expr: createConstantExpression(C: ConstantInt::getFalse(Ty: CI->getType())),
1948 PredDep: PI);
1949 } else if (BranchPredicate ==
1950 CmpInst::getInversePredicate(pred: OurPredicate)) {
1951 // Inverse predicate, we know the other was false, so this is true.
1952 return ExprResult::some(
1953 Expr: createConstantExpression(C: ConstantInt::getTrue(Ty: CI->getType())),
1954 PredDep: PI);
1955 }
1956 }
1957 }
1958 }
1959 }
1960 // Create expression will take care of simplifyCmpInst
1961 return createExpression(I);
1962}
1963
1964// Substitute and symbolize the instruction before value numbering.
1965NewGVN::ExprResult
1966NewGVN::performSymbolicEvaluation(Instruction *I,
1967 SmallPtrSetImpl<Value *> &Visited) const {
1968
1969 const Expression *E = nullptr;
1970 // TODO: memory intrinsics.
1971 // TODO: Some day, we should do the forward propagation and reassociation
1972 // parts of the algorithm.
1973 switch (I->getOpcode()) {
1974 case Instruction::ExtractValue:
1975 case Instruction::InsertValue:
1976 E = performSymbolicAggrValueEvaluation(I);
1977 break;
1978 case Instruction::PHI: {
1979 SmallVector<ValPair, 3> Ops;
1980 auto *PN = cast<PHINode>(Val: I);
1981 for (unsigned i = 0; i < PN->getNumOperands(); ++i)
1982 Ops.push_back(Elt: {PN->getIncomingValue(i), PN->getIncomingBlock(i)});
1983 // Sort to ensure the invariant createPHIExpression requires is met.
1984 sortPHIOps(Ops);
1985 E = performSymbolicPHIEvaluation(PHIOps: Ops, I, PHIBlock: getBlockForValue(V: I));
1986 } break;
1987 case Instruction::Call:
1988 return performSymbolicCallEvaluation(I);
1989 break;
1990 case Instruction::Store:
1991 E = performSymbolicStoreEvaluation(I);
1992 break;
1993 case Instruction::Load:
1994 E = performSymbolicLoadEvaluation(I);
1995 break;
1996 case Instruction::BitCast:
1997 case Instruction::AddrSpaceCast:
1998 case Instruction::Freeze:
1999 return createExpression(I);
2000 break;
2001 case Instruction::ICmp:
2002 case Instruction::FCmp:
2003 return performSymbolicCmpEvaluation(I);
2004 break;
2005 case Instruction::FNeg:
2006 case Instruction::Add:
2007 case Instruction::FAdd:
2008 case Instruction::Sub:
2009 case Instruction::FSub:
2010 case Instruction::Mul:
2011 case Instruction::FMul:
2012 case Instruction::UDiv:
2013 case Instruction::SDiv:
2014 case Instruction::FDiv:
2015 case Instruction::URem:
2016 case Instruction::SRem:
2017 case Instruction::FRem:
2018 case Instruction::Shl:
2019 case Instruction::LShr:
2020 case Instruction::AShr:
2021 case Instruction::And:
2022 case Instruction::Or:
2023 case Instruction::Xor:
2024 case Instruction::Trunc:
2025 case Instruction::ZExt:
2026 case Instruction::SExt:
2027 case Instruction::FPToUI:
2028 case Instruction::FPToSI:
2029 case Instruction::UIToFP:
2030 case Instruction::SIToFP:
2031 case Instruction::FPTrunc:
2032 case Instruction::FPExt:
2033 case Instruction::PtrToInt:
2034 case Instruction::IntToPtr:
2035 case Instruction::Select:
2036 case Instruction::ExtractElement:
2037 case Instruction::InsertElement:
2038 case Instruction::GetElementPtr:
2039 return createExpression(I);
2040 break;
2041 case Instruction::ShuffleVector:
2042 // FIXME: Add support for shufflevector to createExpression.
2043 return ExprResult::none();
2044 default:
2045 return ExprResult::none();
2046 }
2047 return ExprResult::some(Expr: E);
2048}
2049
2050// Look up a container of values/instructions in a map, and touch all the
2051// instructions in the container. Then erase value from the map.
2052template <typename Map, typename KeyType>
2053void NewGVN::touchAndErase(Map &M, const KeyType &Key) {
2054 const auto Result = M.find_as(Key);
2055 if (Result != M.end()) {
2056 for (const typename Map::mapped_type::value_type Mapped : Result->second)
2057 TouchedInstructions.set(InstrToDFSNum(Mapped));
2058 M.erase(Result);
2059 }
2060}
2061
2062void NewGVN::addAdditionalUsers(Value *To, Value *User) const {
2063 assert(User && To != User);
2064 if (isa<Instruction>(Val: To))
2065 AdditionalUsers[To].insert(Ptr: User);
2066}
2067
2068void NewGVN::addAdditionalUsers(ExprResult &Res, Instruction *User) const {
2069 if (Res.ExtraDep && Res.ExtraDep != User)
2070 addAdditionalUsers(To: Res.ExtraDep, User);
2071 Res.ExtraDep = nullptr;
2072
2073 if (Res.PredDep) {
2074 if (const auto *PBranch = dyn_cast<PredicateBranch>(Val: Res.PredDep))
2075 PredicateToUsers[PBranch->Condition].insert(Ptr: User);
2076 else if (const auto *PAssume = dyn_cast<PredicateAssume>(Val: Res.PredDep))
2077 PredicateToUsers[PAssume->Condition].insert(Ptr: User);
2078 }
2079 Res.PredDep = nullptr;
2080}
2081
2082void NewGVN::markUsersTouched(Value *V) {
2083 // Now mark the users as touched.
2084 for (auto *User : V->users()) {
2085 assert(isa<Instruction>(User) && "Use of value not within an instruction?");
2086 TouchedInstructions.set(InstrToDFSNum(V: User));
2087 }
2088 touchAndErase(M&: AdditionalUsers, Key: V);
2089}
2090
2091void NewGVN::addMemoryUsers(const MemoryAccess *To, MemoryAccess *U) const {
2092 LLVM_DEBUG(dbgs() << "Adding memory user " << *U << " to " << *To << "\n");
2093 MemoryToUsers[To].insert(Ptr: U);
2094}
2095
2096void NewGVN::markMemoryDefTouched(const MemoryAccess *MA) {
2097 TouchedInstructions.set(MemoryToDFSNum(MA));
2098}
2099
2100void NewGVN::markMemoryUsersTouched(const MemoryAccess *MA) {
2101 if (isa<MemoryUse>(Val: MA))
2102 return;
2103 for (const auto *U : MA->users())
2104 TouchedInstructions.set(MemoryToDFSNum(MA: U));
2105 touchAndErase(M&: MemoryToUsers, Key: MA);
2106}
2107
2108// Touch all the predicates that depend on this instruction.
2109void NewGVN::markPredicateUsersTouched(Instruction *I) {
2110 touchAndErase(M&: PredicateToUsers, Key: I);
2111}
2112
2113// Mark users affected by a memory leader change.
2114void NewGVN::markMemoryLeaderChangeTouched(CongruenceClass *CC) {
2115 for (const auto *M : CC->memory())
2116 markMemoryDefTouched(MA: M);
2117}
2118
2119// Touch the instructions that need to be updated after a congruence class has a
2120// leader change, and mark changed values.
2121void NewGVN::markValueLeaderChangeTouched(CongruenceClass *CC) {
2122 for (auto *M : *CC) {
2123 if (auto *I = dyn_cast<Instruction>(Val: M))
2124 TouchedInstructions.set(InstrToDFSNum(V: I));
2125 LeaderChanges.insert(Ptr: M);
2126 }
2127}
2128
2129// Give a range of things that have instruction DFS numbers, this will return
2130// the member of the range with the smallest dfs number.
2131template <class T, class Range>
2132T *NewGVN::getMinDFSOfRange(const Range &R) const {
2133 std::pair<T *, unsigned> MinDFS = {nullptr, ~0U};
2134 for (const auto X : R) {
2135 auto DFSNum = InstrToDFSNum(X);
2136 if (DFSNum < MinDFS.second)
2137 MinDFS = {X, DFSNum};
2138 }
2139 return MinDFS.first;
2140}
2141
2142// This function returns the MemoryAccess that should be the next leader of
2143// congruence class CC, under the assumption that the current leader is going to
2144// disappear.
2145const MemoryAccess *NewGVN::getNextMemoryLeader(CongruenceClass *CC) const {
2146 // TODO: If this ends up to slow, we can maintain a next memory leader like we
2147 // do for regular leaders.
2148 // Make sure there will be a leader to find.
2149 assert(!CC->definesNoMemory() && "Can't get next leader if there is none");
2150 if (CC->getStoreCount() > 0) {
2151 if (auto *NL = dyn_cast_or_null<StoreInst>(Val: CC->getNextLeader().first))
2152 return getMemoryAccess(I: NL);
2153 // Find the store with the minimum DFS number.
2154 auto *V = getMinDFSOfRange<Value>(R: make_filter_range(
2155 Range&: *CC, Pred: [&](const Value *V) { return isa<StoreInst>(Val: V); }));
2156 return getMemoryAccess(I: cast<StoreInst>(Val: V));
2157 }
2158 assert(CC->getStoreCount() == 0);
2159
2160 // Given our assertion, hitting this part must mean
2161 // !OldClass->memory_empty()
2162 if (CC->memory_size() == 1)
2163 return *CC->memory_begin();
2164 return getMinDFSOfRange<const MemoryPhi>(R: CC->memory());
2165}
2166
2167// This function returns the next value leader of a congruence class, under the
2168// assumption that the current leader is going away. This should end up being
2169// the next most dominating member.
2170Value *NewGVN::getNextValueLeader(CongruenceClass *CC) const {
2171 // We don't need to sort members if there is only 1, and we don't care about
2172 // sorting the TOP class because everything either gets out of it or is
2173 // unreachable.
2174
2175 if (CC->size() == 1 || CC == TOPClass) {
2176 return *(CC->begin());
2177 } else if (CC->getNextLeader().first) {
2178 ++NumGVNAvoidedSortedLeaderChanges;
2179 return CC->getNextLeader().first;
2180 } else {
2181 ++NumGVNSortedLeaderChanges;
2182 // NOTE: If this ends up to slow, we can maintain a dual structure for
2183 // member testing/insertion, or keep things mostly sorted, and sort only
2184 // here, or use SparseBitVector or ....
2185 return getMinDFSOfRange<Value>(R: *CC);
2186 }
2187}
2188
2189// Move a MemoryAccess, currently in OldClass, to NewClass, including updates to
2190// the memory members, etc for the move.
2191//
2192// The invariants of this function are:
2193//
2194// - I must be moving to NewClass from OldClass
2195// - The StoreCount of OldClass and NewClass is expected to have been updated
2196// for I already if it is a store.
2197// - The OldClass memory leader has not been updated yet if I was the leader.
2198void NewGVN::moveMemoryToNewCongruenceClass(Instruction *I,
2199 MemoryAccess *InstMA,
2200 CongruenceClass *OldClass,
2201 CongruenceClass *NewClass) {
2202 // If the leader is I, and we had a representative MemoryAccess, it should
2203 // be the MemoryAccess of OldClass.
2204 assert((!InstMA || !OldClass->getMemoryLeader() ||
2205 OldClass->getLeader() != I ||
2206 MemoryAccessToClass.lookup(OldClass->getMemoryLeader()) ==
2207 MemoryAccessToClass.lookup(InstMA)) &&
2208 "Representative MemoryAccess mismatch");
2209 // First, see what happens to the new class
2210 if (!NewClass->getMemoryLeader()) {
2211 // Should be a new class, or a store becoming a leader of a new class.
2212 assert(NewClass->size() == 1 ||
2213 (isa<StoreInst>(I) && NewClass->getStoreCount() == 1));
2214 NewClass->setMemoryLeader(InstMA);
2215 // Mark it touched if we didn't just create a singleton
2216 LLVM_DEBUG(dbgs() << "Memory class leader change for class "
2217 << NewClass->getID()
2218 << " due to new memory instruction becoming leader\n");
2219 markMemoryLeaderChangeTouched(CC: NewClass);
2220 }
2221 setMemoryClass(From: InstMA, NewClass);
2222 // Now, fixup the old class if necessary
2223 if (OldClass->getMemoryLeader() == InstMA) {
2224 if (!OldClass->definesNoMemory()) {
2225 OldClass->setMemoryLeader(getNextMemoryLeader(CC: OldClass));
2226 LLVM_DEBUG(dbgs() << "Memory class leader change for class "
2227 << OldClass->getID() << " to "
2228 << *OldClass->getMemoryLeader()
2229 << " due to removal of old leader " << *InstMA << "\n");
2230 markMemoryLeaderChangeTouched(CC: OldClass);
2231 } else
2232 OldClass->setMemoryLeader(nullptr);
2233 }
2234}
2235
2236// Move a value, currently in OldClass, to be part of NewClass
2237// Update OldClass and NewClass for the move (including changing leaders, etc).
2238void NewGVN::moveValueToNewCongruenceClass(Instruction *I, const Expression *E,
2239 CongruenceClass *OldClass,
2240 CongruenceClass *NewClass) {
2241 if (I == OldClass->getNextLeader().first)
2242 OldClass->resetNextLeader();
2243
2244 OldClass->erase(M: I);
2245 NewClass->insert(M: I);
2246
2247 if (NewClass->getLeader() != I)
2248 NewClass->addPossibleNextLeader(LeaderPair: {I, InstrToDFSNum(V: I)});
2249 // Handle our special casing of stores.
2250 if (auto *SI = dyn_cast<StoreInst>(Val: I)) {
2251 OldClass->decStoreCount();
2252 // Okay, so when do we want to make a store a leader of a class?
2253 // If we have a store defined by an earlier load, we want the earlier load
2254 // to lead the class.
2255 // If we have a store defined by something else, we want the store to lead
2256 // the class so everything else gets the "something else" as a value.
2257 // If we have a store as the single member of the class, we want the store
2258 // as the leader
2259 if (NewClass->getStoreCount() == 0 && !NewClass->getStoredValue()) {
2260 // If it's a store expression we are using, it means we are not equivalent
2261 // to something earlier.
2262 if (auto *SE = dyn_cast<StoreExpression>(Val: E)) {
2263 NewClass->setStoredValue(SE->getStoredValue());
2264 markValueLeaderChangeTouched(CC: NewClass);
2265 // Shift the new class leader to be the store
2266 LLVM_DEBUG(dbgs() << "Changing leader of congruence class "
2267 << NewClass->getID() << " from "
2268 << *NewClass->getLeader() << " to " << *SI
2269 << " because store joined class\n");
2270 // If we changed the leader, we have to mark it changed because we don't
2271 // know what it will do to symbolic evaluation.
2272 NewClass->setLeader(SI);
2273 }
2274 // We rely on the code below handling the MemoryAccess change.
2275 }
2276 NewClass->incStoreCount();
2277 }
2278 // True if there is no memory instructions left in a class that had memory
2279 // instructions before.
2280
2281 // If it's not a memory use, set the MemoryAccess equivalence
2282 auto *InstMA = dyn_cast_or_null<MemoryDef>(Val: getMemoryAccess(I));
2283 if (InstMA)
2284 moveMemoryToNewCongruenceClass(I, InstMA, OldClass, NewClass);
2285 ValueToClass[I] = NewClass;
2286 // See if we destroyed the class or need to swap leaders.
2287 if (OldClass->empty() && OldClass != TOPClass) {
2288 if (OldClass->getDefiningExpr()) {
2289 LLVM_DEBUG(dbgs() << "Erasing expression " << *OldClass->getDefiningExpr()
2290 << " from table\n");
2291 // We erase it as an exact expression to make sure we don't just erase an
2292 // equivalent one.
2293 auto Iter = ExpressionToClass.find_as(
2294 Val: ExactEqualsExpression(*OldClass->getDefiningExpr()));
2295 if (Iter != ExpressionToClass.end())
2296 ExpressionToClass.erase(I: Iter);
2297#ifdef EXPENSIVE_CHECKS
2298 assert(
2299 (*OldClass->getDefiningExpr() != *E || ExpressionToClass.lookup(E)) &&
2300 "We erased the expression we just inserted, which should not happen");
2301#endif
2302 }
2303 } else if (OldClass->getLeader() == I) {
2304 // When the leader changes, the value numbering of
2305 // everything may change due to symbolization changes, so we need to
2306 // reprocess.
2307 LLVM_DEBUG(dbgs() << "Value class leader change for class "
2308 << OldClass->getID() << "\n");
2309 ++NumGVNLeaderChanges;
2310 // Destroy the stored value if there are no more stores to represent it.
2311 // Note that this is basically clean up for the expression removal that
2312 // happens below. If we remove stores from a class, we may leave it as a
2313 // class of equivalent memory phis.
2314 if (OldClass->getStoreCount() == 0) {
2315 if (OldClass->getStoredValue())
2316 OldClass->setStoredValue(nullptr);
2317 }
2318 OldClass->setLeader(getNextValueLeader(CC: OldClass));
2319 OldClass->resetNextLeader();
2320 markValueLeaderChangeTouched(CC: OldClass);
2321 }
2322}
2323
2324// For a given expression, mark the phi of ops instructions that could have
2325// changed as a result.
2326void NewGVN::markPhiOfOpsChanged(const Expression *E) {
2327 touchAndErase(M&: ExpressionToPhiOfOps, Key: E);
2328}
2329
2330// Perform congruence finding on a given value numbering expression.
2331void NewGVN::performCongruenceFinding(Instruction *I, const Expression *E) {
2332 // This is guaranteed to return something, since it will at least find
2333 // TOP.
2334
2335 CongruenceClass *IClass = ValueToClass.lookup(Val: I);
2336 assert(IClass && "Should have found a IClass");
2337 // Dead classes should have been eliminated from the mapping.
2338 assert(!IClass->isDead() && "Found a dead class");
2339
2340 CongruenceClass *EClass = nullptr;
2341 if (const auto *VE = dyn_cast<VariableExpression>(Val: E)) {
2342 EClass = ValueToClass.lookup(Val: VE->getVariableValue());
2343 } else if (isa<DeadExpression>(Val: E)) {
2344 EClass = TOPClass;
2345 }
2346 if (!EClass) {
2347 auto lookupResult = ExpressionToClass.insert(KV: {E, nullptr});
2348
2349 // If it's not in the value table, create a new congruence class.
2350 if (lookupResult.second) {
2351 CongruenceClass *NewClass = createCongruenceClass(Leader: nullptr, E);
2352 auto place = lookupResult.first;
2353 place->second = NewClass;
2354
2355 // Constants and variables should always be made the leader.
2356 if (const auto *CE = dyn_cast<ConstantExpression>(Val: E)) {
2357 NewClass->setLeader(CE->getConstantValue());
2358 } else if (const auto *SE = dyn_cast<StoreExpression>(Val: E)) {
2359 StoreInst *SI = SE->getStoreInst();
2360 NewClass->setLeader(SI);
2361 NewClass->setStoredValue(SE->getStoredValue());
2362 // The RepMemoryAccess field will be filled in properly by the
2363 // moveValueToNewCongruenceClass call.
2364 } else {
2365 NewClass->setLeader(I);
2366 }
2367 assert(!isa<VariableExpression>(E) &&
2368 "VariableExpression should have been handled already");
2369
2370 EClass = NewClass;
2371 LLVM_DEBUG(dbgs() << "Created new congruence class for " << *I
2372 << " using expression " << *E << " at "
2373 << NewClass->getID() << " and leader "
2374 << *(NewClass->getLeader()));
2375 if (NewClass->getStoredValue())
2376 LLVM_DEBUG(dbgs() << " and stored value "
2377 << *(NewClass->getStoredValue()));
2378 LLVM_DEBUG(dbgs() << "\n");
2379 } else {
2380 EClass = lookupResult.first->second;
2381 if (isa<ConstantExpression>(Val: E))
2382 assert((isa<Constant>(EClass->getLeader()) ||
2383 (EClass->getStoredValue() &&
2384 isa<Constant>(EClass->getStoredValue()))) &&
2385 "Any class with a constant expression should have a "
2386 "constant leader");
2387
2388 assert(EClass && "Somehow don't have an eclass");
2389
2390 assert(!EClass->isDead() && "We accidentally looked up a dead class");
2391 }
2392 }
2393 bool ClassChanged = IClass != EClass;
2394 bool LeaderChanged = LeaderChanges.erase(Ptr: I);
2395 if (ClassChanged || LeaderChanged) {
2396 LLVM_DEBUG(dbgs() << "New class " << EClass->getID() << " for expression "
2397 << *E << "\n");
2398 if (ClassChanged) {
2399 moveValueToNewCongruenceClass(I, E, OldClass: IClass, NewClass: EClass);
2400 markPhiOfOpsChanged(E);
2401 }
2402
2403 markUsersTouched(V: I);
2404 if (MemoryAccess *MA = getMemoryAccess(I))
2405 markMemoryUsersTouched(MA);
2406 if (auto *CI = dyn_cast<CmpInst>(Val: I))
2407 markPredicateUsersTouched(I: CI);
2408 }
2409 // If we changed the class of the store, we want to ensure nothing finds the
2410 // old store expression. In particular, loads do not compare against stored
2411 // value, so they will find old store expressions (and associated class
2412 // mappings) if we leave them in the table.
2413 if (ClassChanged && isa<StoreInst>(Val: I)) {
2414 auto *OldE = ValueToExpression.lookup(Val: I);
2415 // It could just be that the old class died. We don't want to erase it if we
2416 // just moved classes.
2417 if (OldE && isa<StoreExpression>(Val: OldE) && *E != *OldE) {
2418 // Erase this as an exact expression to ensure we don't erase expressions
2419 // equivalent to it.
2420 auto Iter = ExpressionToClass.find_as(Val: ExactEqualsExpression(*OldE));
2421 if (Iter != ExpressionToClass.end())
2422 ExpressionToClass.erase(I: Iter);
2423 }
2424 }
2425 ValueToExpression[I] = E;
2426}
2427
2428// Process the fact that Edge (from, to) is reachable, including marking
2429// any newly reachable blocks and instructions for processing.
2430void NewGVN::updateReachableEdge(BasicBlock *From, BasicBlock *To) {
2431 // Check if the Edge was reachable before.
2432 if (ReachableEdges.insert(V: {From, To}).second) {
2433 // If this block wasn't reachable before, all instructions are touched.
2434 if (ReachableBlocks.insert(Ptr: To).second) {
2435 LLVM_DEBUG(dbgs() << "Block " << getBlockName(To)
2436 << " marked reachable\n");
2437 const auto &InstRange = BlockInstRange.lookup(Val: To);
2438 TouchedInstructions.set(I: InstRange.first, E: InstRange.second);
2439 } else {
2440 LLVM_DEBUG(dbgs() << "Block " << getBlockName(To)
2441 << " was reachable, but new edge {"
2442 << getBlockName(From) << "," << getBlockName(To)
2443 << "} to it found\n");
2444
2445 // We've made an edge reachable to an existing block, which may
2446 // impact predicates. Otherwise, only mark the phi nodes as touched, as
2447 // they are the only thing that depend on new edges. Anything using their
2448 // values will get propagated to if necessary.
2449 if (MemoryAccess *MemPhi = getMemoryAccess(BB: To))
2450 TouchedInstructions.set(InstrToDFSNum(MA: MemPhi));
2451
2452 // FIXME: We should just add a union op on a Bitvector and
2453 // SparseBitVector. We can do it word by word faster than we are doing it
2454 // here.
2455 for (auto InstNum : RevisitOnReachabilityChange[To])
2456 TouchedInstructions.set(InstNum);
2457 }
2458 }
2459}
2460
2461// Given a predicate condition (from a switch, cmp, or whatever) and a block,
2462// see if we know some constant value for it already.
2463Value *NewGVN::findConditionEquivalence(Value *Cond) const {
2464 auto Result = lookupOperandLeader(V: Cond);
2465 return isa<Constant>(Val: Result) ? Result : nullptr;
2466}
2467
2468// Process the outgoing edges of a block for reachability.
2469void NewGVN::processOutgoingEdges(Instruction *TI, BasicBlock *B) {
2470 // Evaluate reachability of terminator instruction.
2471 Value *Cond;
2472 BasicBlock *TrueSucc, *FalseSucc;
2473 if (match(V: TI, P: m_Br(C: m_Value(V&: Cond), T&: TrueSucc, F&: FalseSucc))) {
2474 Value *CondEvaluated = findConditionEquivalence(Cond);
2475 if (!CondEvaluated) {
2476 if (auto *I = dyn_cast<Instruction>(Val: Cond)) {
2477 SmallPtrSet<Value *, 4> Visited;
2478 auto Res = performSymbolicEvaluation(I, Visited);
2479 if (const auto *CE = dyn_cast_or_null<ConstantExpression>(Val: Res.Expr)) {
2480 CondEvaluated = CE->getConstantValue();
2481 addAdditionalUsers(Res, User: I);
2482 } else {
2483 // Did not use simplification result, no need to add the extra
2484 // dependency.
2485 Res.ExtraDep = nullptr;
2486 }
2487 } else if (isa<ConstantInt>(Val: Cond)) {
2488 CondEvaluated = Cond;
2489 }
2490 }
2491 ConstantInt *CI;
2492 if (CondEvaluated && (CI = dyn_cast<ConstantInt>(Val: CondEvaluated))) {
2493 if (CI->isOne()) {
2494 LLVM_DEBUG(dbgs() << "Condition for Terminator " << *TI
2495 << " evaluated to true\n");
2496 updateReachableEdge(From: B, To: TrueSucc);
2497 } else if (CI->isZero()) {
2498 LLVM_DEBUG(dbgs() << "Condition for Terminator " << *TI
2499 << " evaluated to false\n");
2500 updateReachableEdge(From: B, To: FalseSucc);
2501 }
2502 } else {
2503 updateReachableEdge(From: B, To: TrueSucc);
2504 updateReachableEdge(From: B, To: FalseSucc);
2505 }
2506 } else if (auto *SI = dyn_cast<SwitchInst>(Val: TI)) {
2507 // For switches, propagate the case values into the case
2508 // destinations.
2509
2510 Value *SwitchCond = SI->getCondition();
2511 Value *CondEvaluated = findConditionEquivalence(Cond: SwitchCond);
2512 // See if we were able to turn this switch statement into a constant.
2513 if (CondEvaluated && isa<ConstantInt>(Val: CondEvaluated)) {
2514 auto *CondVal = cast<ConstantInt>(Val: CondEvaluated);
2515 // We should be able to get case value for this.
2516 auto Case = *SI->findCaseValue(C: CondVal);
2517 if (Case.getCaseSuccessor() == SI->getDefaultDest()) {
2518 // We proved the value is outside of the range of the case.
2519 // We can't do anything other than mark the default dest as reachable,
2520 // and go home.
2521 updateReachableEdge(From: B, To: SI->getDefaultDest());
2522 return;
2523 }
2524 // Now get where it goes and mark it reachable.
2525 BasicBlock *TargetBlock = Case.getCaseSuccessor();
2526 updateReachableEdge(From: B, To: TargetBlock);
2527 } else {
2528 for (unsigned i = 0, e = SI->getNumSuccessors(); i != e; ++i) {
2529 BasicBlock *TargetBlock = SI->getSuccessor(idx: i);
2530 updateReachableEdge(From: B, To: TargetBlock);
2531 }
2532 }
2533 } else {
2534 // Otherwise this is either unconditional, or a type we have no
2535 // idea about. Just mark successors as reachable.
2536 for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) {
2537 BasicBlock *TargetBlock = TI->getSuccessor(Idx: i);
2538 updateReachableEdge(From: B, To: TargetBlock);
2539 }
2540
2541 // This also may be a memory defining terminator, in which case, set it
2542 // equivalent only to itself.
2543 //
2544 auto *MA = getMemoryAccess(I: TI);
2545 if (MA && !isa<MemoryUse>(Val: MA)) {
2546 auto *CC = ensureLeaderOfMemoryClass(MA);
2547 if (setMemoryClass(From: MA, NewClass: CC))
2548 markMemoryUsersTouched(MA);
2549 }
2550 }
2551}
2552
2553// Remove the PHI of Ops PHI for I
2554void NewGVN::removePhiOfOps(Instruction *I, PHINode *PHITemp) {
2555 InstrDFS.erase(Val: PHITemp);
2556 // It's still a temp instruction. We keep it in the array so it gets erased.
2557 // However, it's no longer used by I, or in the block
2558 TempToBlock.erase(Val: PHITemp);
2559 RealToTemp.erase(Val: I);
2560 // We don't remove the users from the phi node uses. This wastes a little
2561 // time, but such is life. We could use two sets to track which were there
2562 // are the start of NewGVN, and which were added, but right nowt he cost of
2563 // tracking is more than the cost of checking for more phi of ops.
2564}
2565
2566// Add PHI Op in BB as a PHI of operations version of ExistingValue.
2567void NewGVN::addPhiOfOps(PHINode *Op, BasicBlock *BB,
2568 Instruction *ExistingValue) {
2569 InstrDFS[Op] = InstrToDFSNum(V: ExistingValue);
2570 AllTempInstructions.insert(V: Op);
2571 TempToBlock[Op] = BB;
2572 RealToTemp[ExistingValue] = Op;
2573 // Add all users to phi node use, as they are now uses of the phi of ops phis
2574 // and may themselves be phi of ops.
2575 for (auto *U : ExistingValue->users())
2576 if (auto *UI = dyn_cast<Instruction>(Val: U))
2577 PHINodeUses.insert(Ptr: UI);
2578}
2579
2580static bool okayForPHIOfOps(const Instruction *I) {
2581 if (!EnablePhiOfOps)
2582 return false;
2583 return isa<BinaryOperator>(Val: I) || isa<SelectInst>(Val: I) || isa<CmpInst>(Val: I) ||
2584 isa<LoadInst>(Val: I);
2585}
2586
2587// Return true if this operand will be safe to use for phi of ops.
2588//
2589// The reason some operands are unsafe is that we are not trying to recursively
2590// translate everything back through phi nodes. We actually expect some lookups
2591// of expressions to fail. In particular, a lookup where the expression cannot
2592// exist in the predecessor. This is true even if the expression, as shown, can
2593// be determined to be constant.
2594bool NewGVN::OpIsSafeForPHIOfOps(Value *V, const BasicBlock *PHIBlock,
2595 SmallPtrSetImpl<const Value *> &Visited) {
2596 SmallVector<Value *, 4> Worklist;
2597 Worklist.push_back(Elt: V);
2598 while (!Worklist.empty()) {
2599 auto *I = Worklist.pop_back_val();
2600 if (!isa<Instruction>(Val: I))
2601 continue;
2602
2603 auto OISIt = OpSafeForPHIOfOps.find(Val: I);
2604 if (OISIt != OpSafeForPHIOfOps.end())
2605 return OISIt->second;
2606
2607 // Keep walking until we either dominate the phi block, or hit a phi, or run
2608 // out of things to check.
2609 if (DT->properlyDominates(A: getBlockForValue(V: I), B: PHIBlock)) {
2610 OpSafeForPHIOfOps.insert(KV: {I, true});
2611 continue;
2612 }
2613 // PHI in the same block.
2614 if (isa<PHINode>(Val: I) && getBlockForValue(V: I) == PHIBlock) {
2615 OpSafeForPHIOfOps.insert(KV: {I, false});
2616 return false;
2617 }
2618
2619 auto *OrigI = cast<Instruction>(Val: I);
2620 // When we hit an instruction that reads memory (load, call, etc), we must
2621 // consider any store that may happen in the loop. For now, we assume the
2622 // worst: there is a store in the loop that alias with this read.
2623 // The case where the load is outside the loop is already covered by the
2624 // dominator check above.
2625 // TODO: relax this condition
2626 if (OrigI->mayReadFromMemory())
2627 return false;
2628
2629 // Check the operands of the current instruction.
2630 for (auto *Op : OrigI->operand_values()) {
2631 if (!isa<Instruction>(Val: Op))
2632 continue;
2633 // Stop now if we find an unsafe operand.
2634 auto OISIt = OpSafeForPHIOfOps.find(Val: OrigI);
2635 if (OISIt != OpSafeForPHIOfOps.end()) {
2636 if (!OISIt->second) {
2637 OpSafeForPHIOfOps.insert(KV: {I, false});
2638 return false;
2639 }
2640 continue;
2641 }
2642 if (!Visited.insert(Ptr: Op).second)
2643 continue;
2644 Worklist.push_back(Elt: cast<Instruction>(Val: Op));
2645 }
2646 }
2647 OpSafeForPHIOfOps.insert(KV: {V, true});
2648 return true;
2649}
2650
2651// Try to find a leader for instruction TransInst, which is a phi translated
2652// version of something in our original program. Visited is used to ensure we
2653// don't infinite loop during translations of cycles. OrigInst is the
2654// instruction in the original program, and PredBB is the predecessor we
2655// translated it through.
2656Value *NewGVN::findLeaderForInst(Instruction *TransInst,
2657 SmallPtrSetImpl<Value *> &Visited,
2658 MemoryAccess *MemAccess, Instruction *OrigInst,
2659 BasicBlock *PredBB) {
2660 unsigned IDFSNum = InstrToDFSNum(V: OrigInst);
2661 // Make sure it's marked as a temporary instruction.
2662 AllTempInstructions.insert(V: TransInst);
2663 // and make sure anything that tries to add it's DFS number is
2664 // redirected to the instruction we are making a phi of ops
2665 // for.
2666 TempToBlock.insert(KV: {TransInst, PredBB});
2667 InstrDFS.insert(KV: {TransInst, IDFSNum});
2668
2669 auto Res = performSymbolicEvaluation(I: TransInst, Visited);
2670 const Expression *E = Res.Expr;
2671 addAdditionalUsers(Res, User: OrigInst);
2672 InstrDFS.erase(Val: TransInst);
2673 AllTempInstructions.erase(V: TransInst);
2674 TempToBlock.erase(Val: TransInst);
2675 if (MemAccess)
2676 TempToMemory.erase(Val: TransInst);
2677 if (!E)
2678 return nullptr;
2679 auto *FoundVal = findPHIOfOpsLeader(E, OrigInst, PredBB);
2680 if (!FoundVal) {
2681 ExpressionToPhiOfOps[E].insert(Ptr: OrigInst);
2682 LLVM_DEBUG(dbgs() << "Cannot find phi of ops operand for " << *TransInst
2683 << " in block " << getBlockName(PredBB) << "\n");
2684 return nullptr;
2685 }
2686 if (auto *SI = dyn_cast<StoreInst>(Val: FoundVal))
2687 FoundVal = SI->getValueOperand();
2688 return FoundVal;
2689}
2690
2691// When we see an instruction that is an op of phis, generate the equivalent phi
2692// of ops form.
2693const Expression *
2694NewGVN::makePossiblePHIOfOps(Instruction *I,
2695 SmallPtrSetImpl<Value *> &Visited) {
2696 if (!okayForPHIOfOps(I))
2697 return nullptr;
2698
2699 if (!Visited.insert(Ptr: I).second)
2700 return nullptr;
2701 // For now, we require the instruction be cycle free because we don't
2702 // *always* create a phi of ops for instructions that could be done as phi
2703 // of ops, we only do it if we think it is useful. If we did do it all the
2704 // time, we could remove the cycle free check.
2705 if (!isCycleFree(I))
2706 return nullptr;
2707
2708 SmallPtrSet<const Value *, 8> ProcessedPHIs;
2709 // TODO: We don't do phi translation on memory accesses because it's
2710 // complicated. For a load, we'd need to be able to simulate a new memoryuse,
2711 // which we don't have a good way of doing ATM.
2712 auto *MemAccess = getMemoryAccess(I);
2713 // If the memory operation is defined by a memory operation this block that
2714 // isn't a MemoryPhi, transforming the pointer backwards through a scalar phi
2715 // can't help, as it would still be killed by that memory operation.
2716 if (MemAccess && !isa<MemoryPhi>(Val: MemAccess->getDefiningAccess()) &&
2717 MemAccess->getDefiningAccess()->getBlock() == I->getParent())
2718 return nullptr;
2719
2720 // Convert op of phis to phi of ops
2721 SmallPtrSet<const Value *, 10> VisitedOps;
2722 SmallVector<Value *, 4> Ops(I->operand_values());
2723 BasicBlock *SamePHIBlock = nullptr;
2724 PHINode *OpPHI = nullptr;
2725 if (!DebugCounter::shouldExecute(CounterName: PHIOfOpsCounter))
2726 return nullptr;
2727 for (auto *Op : Ops) {
2728 if (!isa<PHINode>(Val: Op)) {
2729 auto *ValuePHI = RealToTemp.lookup(Val: Op);
2730 if (!ValuePHI)
2731 continue;
2732 LLVM_DEBUG(dbgs() << "Found possible dependent phi of ops\n");
2733 Op = ValuePHI;
2734 }
2735 OpPHI = cast<PHINode>(Val: Op);
2736 if (!SamePHIBlock) {
2737 SamePHIBlock = getBlockForValue(V: OpPHI);
2738 } else if (SamePHIBlock != getBlockForValue(V: OpPHI)) {
2739 LLVM_DEBUG(
2740 dbgs()
2741 << "PHIs for operands are not all in the same block, aborting\n");
2742 return nullptr;
2743 }
2744 // No point in doing this for one-operand phis.
2745 // Since all PHIs for operands must be in the same block, then they must
2746 // have the same number of operands so we can just abort.
2747 if (OpPHI->getNumOperands() == 1)
2748 return nullptr;
2749 }
2750
2751 if (!OpPHI)
2752 return nullptr;
2753
2754 SmallVector<ValPair, 4> PHIOps;
2755 SmallPtrSet<Value *, 4> Deps;
2756 auto *PHIBlock = getBlockForValue(V: OpPHI);
2757 RevisitOnReachabilityChange[PHIBlock].reset(Idx: InstrToDFSNum(V: I));
2758 for (unsigned PredNum = 0; PredNum < OpPHI->getNumOperands(); ++PredNum) {
2759 auto *PredBB = OpPHI->getIncomingBlock(i: PredNum);
2760 Value *FoundVal = nullptr;
2761 SmallPtrSet<Value *, 4> CurrentDeps;
2762 // We could just skip unreachable edges entirely but it's tricky to do
2763 // with rewriting existing phi nodes.
2764 if (ReachableEdges.count(V: {PredBB, PHIBlock})) {
2765 // Clone the instruction, create an expression from it that is
2766 // translated back into the predecessor, and see if we have a leader.
2767 Instruction *ValueOp = I->clone();
2768 // Emit the temporal instruction in the predecessor basic block where the
2769 // corresponding value is defined.
2770 ValueOp->insertBefore(InsertPos: PredBB->getTerminator());
2771 if (MemAccess)
2772 TempToMemory.insert(KV: {ValueOp, MemAccess});
2773 bool SafeForPHIOfOps = true;
2774 VisitedOps.clear();
2775 for (auto &Op : ValueOp->operands()) {
2776 auto *OrigOp = &*Op;
2777 // When these operand changes, it could change whether there is a
2778 // leader for us or not, so we have to add additional users.
2779 if (isa<PHINode>(Val: Op)) {
2780 Op = Op->DoPHITranslation(CurBB: PHIBlock, PredBB);
2781 if (Op != OrigOp && Op != I)
2782 CurrentDeps.insert(Ptr: Op);
2783 } else if (auto *ValuePHI = RealToTemp.lookup(Val: Op)) {
2784 if (getBlockForValue(V: ValuePHI) == PHIBlock)
2785 Op = ValuePHI->getIncomingValueForBlock(BB: PredBB);
2786 }
2787 // If we phi-translated the op, it must be safe.
2788 SafeForPHIOfOps =
2789 SafeForPHIOfOps &&
2790 (Op != OrigOp || OpIsSafeForPHIOfOps(V: Op, PHIBlock, Visited&: VisitedOps));
2791 }
2792 // FIXME: For those things that are not safe we could generate
2793 // expressions all the way down, and see if this comes out to a
2794 // constant. For anything where that is true, and unsafe, we should
2795 // have made a phi-of-ops (or value numbered it equivalent to something)
2796 // for the pieces already.
2797 FoundVal = !SafeForPHIOfOps ? nullptr
2798 : findLeaderForInst(TransInst: ValueOp, Visited,
2799 MemAccess, OrigInst: I, PredBB);
2800 ValueOp->eraseFromParent();
2801 if (!FoundVal) {
2802 // We failed to find a leader for the current ValueOp, but this might
2803 // change in case of the translated operands change.
2804 if (SafeForPHIOfOps)
2805 for (auto *Dep : CurrentDeps)
2806 addAdditionalUsers(To: Dep, User: I);
2807
2808 return nullptr;
2809 }
2810 Deps.insert(I: CurrentDeps.begin(), E: CurrentDeps.end());
2811 } else {
2812 LLVM_DEBUG(dbgs() << "Skipping phi of ops operand for incoming block "
2813 << getBlockName(PredBB)
2814 << " because the block is unreachable\n");
2815 FoundVal = PoisonValue::get(T: I->getType());
2816 RevisitOnReachabilityChange[PHIBlock].set(InstrToDFSNum(V: I));
2817 }
2818
2819 PHIOps.push_back(Elt: {FoundVal, PredBB});
2820 LLVM_DEBUG(dbgs() << "Found phi of ops operand " << *FoundVal << " in "
2821 << getBlockName(PredBB) << "\n");
2822 }
2823 for (auto *Dep : Deps)
2824 addAdditionalUsers(To: Dep, User: I);
2825 sortPHIOps(Ops: PHIOps);
2826 auto *E = performSymbolicPHIEvaluation(PHIOps, I, PHIBlock);
2827 if (isa<ConstantExpression>(Val: E) || isa<VariableExpression>(Val: E)) {
2828 LLVM_DEBUG(
2829 dbgs()
2830 << "Not creating real PHI of ops because it simplified to existing "
2831 "value or constant\n");
2832 // We have leaders for all operands, but do not create a real PHI node with
2833 // those leaders as operands, so the link between the operands and the
2834 // PHI-of-ops is not materialized in the IR. If any of those leaders
2835 // changes, the PHI-of-op may change also, so we need to add the operands as
2836 // additional users.
2837 for (auto &O : PHIOps)
2838 addAdditionalUsers(To: O.first, User: I);
2839
2840 return E;
2841 }
2842 auto *ValuePHI = RealToTemp.lookup(Val: I);
2843 bool NewPHI = false;
2844 if (!ValuePHI) {
2845 ValuePHI =
2846 PHINode::Create(Ty: I->getType(), NumReservedValues: OpPHI->getNumOperands(), NameStr: "phiofops");
2847 addPhiOfOps(Op: ValuePHI, BB: PHIBlock, ExistingValue: I);
2848 NewPHI = true;
2849 NumGVNPHIOfOpsCreated++;
2850 }
2851 if (NewPHI) {
2852 for (auto PHIOp : PHIOps)
2853 ValuePHI->addIncoming(V: PHIOp.first, BB: PHIOp.second);
2854 } else {
2855 TempToBlock[ValuePHI] = PHIBlock;
2856 unsigned int i = 0;
2857 for (auto PHIOp : PHIOps) {
2858 ValuePHI->setIncomingValue(i, V: PHIOp.first);
2859 ValuePHI->setIncomingBlock(i, BB: PHIOp.second);
2860 ++i;
2861 }
2862 }
2863 RevisitOnReachabilityChange[PHIBlock].set(InstrToDFSNum(V: I));
2864 LLVM_DEBUG(dbgs() << "Created phi of ops " << *ValuePHI << " for " << *I
2865 << "\n");
2866
2867 return E;
2868}
2869
2870// The algorithm initially places the values of the routine in the TOP
2871// congruence class. The leader of TOP is the undetermined value `poison`.
2872// When the algorithm has finished, values still in TOP are unreachable.
2873void NewGVN::initializeCongruenceClasses(Function &F) {
2874 NextCongruenceNum = 0;
2875
2876 // Note that even though we use the live on entry def as a representative
2877 // MemoryAccess, it is *not* the same as the actual live on entry def. We
2878 // have no real equivalent to poison for MemoryAccesses, and so we really
2879 // should be checking whether the MemoryAccess is top if we want to know if it
2880 // is equivalent to everything. Otherwise, what this really signifies is that
2881 // the access "it reaches all the way back to the beginning of the function"
2882
2883 // Initialize all other instructions to be in TOP class.
2884 TOPClass = createCongruenceClass(Leader: nullptr, E: nullptr);
2885 TOPClass->setMemoryLeader(MSSA->getLiveOnEntryDef());
2886 // The live on entry def gets put into it's own class
2887 MemoryAccessToClass[MSSA->getLiveOnEntryDef()] =
2888 createMemoryClass(MA: MSSA->getLiveOnEntryDef());
2889
2890 for (auto *DTN : nodes(G: DT)) {
2891 BasicBlock *BB = DTN->getBlock();
2892 // All MemoryAccesses are equivalent to live on entry to start. They must
2893 // be initialized to something so that initial changes are noticed. For
2894 // the maximal answer, we initialize them all to be the same as
2895 // liveOnEntry.
2896 auto *MemoryBlockDefs = MSSA->getBlockDefs(BB);
2897 if (MemoryBlockDefs)
2898 for (const auto &Def : *MemoryBlockDefs) {
2899 MemoryAccessToClass[&Def] = TOPClass;
2900 auto *MD = dyn_cast<MemoryDef>(Val: &Def);
2901 // Insert the memory phis into the member list.
2902 if (!MD) {
2903 const MemoryPhi *MP = cast<MemoryPhi>(Val: &Def);
2904 TOPClass->memory_insert(M: MP);
2905 MemoryPhiState.insert(KV: {MP, MPS_TOP});
2906 }
2907
2908 if (MD && isa<StoreInst>(Val: MD->getMemoryInst()))
2909 TOPClass->incStoreCount();
2910 }
2911
2912 // FIXME: This is trying to discover which instructions are uses of phi
2913 // nodes. We should move this into one of the myriad of places that walk
2914 // all the operands already.
2915 for (auto &I : *BB) {
2916 if (isa<PHINode>(Val: &I))
2917 for (auto *U : I.users())
2918 if (auto *UInst = dyn_cast<Instruction>(Val: U))
2919 if (InstrToDFSNum(V: UInst) != 0 && okayForPHIOfOps(I: UInst))
2920 PHINodeUses.insert(Ptr: UInst);
2921 // Don't insert void terminators into the class. We don't value number
2922 // them, and they just end up sitting in TOP.
2923 if (I.isTerminator() && I.getType()->isVoidTy())
2924 continue;
2925 TOPClass->insert(M: &I);
2926 ValueToClass[&I] = TOPClass;
2927 }
2928 }
2929
2930 // Initialize arguments to be in their own unique congruence classes
2931 for (auto &FA : F.args())
2932 createSingletonCongruenceClass(Member: &FA);
2933}
2934
2935void NewGVN::cleanupTables() {
2936 for (CongruenceClass *&CC : CongruenceClasses) {
2937 LLVM_DEBUG(dbgs() << "Congruence class " << CC->getID() << " has "
2938 << CC->size() << " members\n");
2939 // Make sure we delete the congruence class (probably worth switching to
2940 // a unique_ptr at some point.
2941 delete CC;
2942 CC = nullptr;
2943 }
2944
2945 // Destroy the value expressions
2946 SmallVector<Instruction *, 8> TempInst(AllTempInstructions.begin(),
2947 AllTempInstructions.end());
2948 AllTempInstructions.clear();
2949
2950 // We have to drop all references for everything first, so there are no uses
2951 // left as we delete them.
2952 for (auto *I : TempInst) {
2953 I->dropAllReferences();
2954 }
2955
2956 while (!TempInst.empty()) {
2957 auto *I = TempInst.pop_back_val();
2958 I->deleteValue();
2959 }
2960
2961 ValueToClass.clear();
2962 ArgRecycler.clear(ExpressionAllocator);
2963 ExpressionAllocator.Reset();
2964 CongruenceClasses.clear();
2965 ExpressionToClass.clear();
2966 ValueToExpression.clear();
2967 RealToTemp.clear();
2968 AdditionalUsers.clear();
2969 ExpressionToPhiOfOps.clear();
2970 TempToBlock.clear();
2971 TempToMemory.clear();
2972 PHINodeUses.clear();
2973 OpSafeForPHIOfOps.clear();
2974 ReachableBlocks.clear();
2975 ReachableEdges.clear();
2976#ifndef NDEBUG
2977 ProcessedCount.clear();
2978#endif
2979 InstrDFS.clear();
2980 InstructionsToErase.clear();
2981 DFSToInstr.clear();
2982 BlockInstRange.clear();
2983 TouchedInstructions.clear();
2984 MemoryAccessToClass.clear();
2985 PredicateToUsers.clear();
2986 MemoryToUsers.clear();
2987 RevisitOnReachabilityChange.clear();
2988 IntrinsicInstPred.clear();
2989}
2990
2991// Assign local DFS number mapping to instructions, and leave space for Value
2992// PHI's.
2993std::pair<unsigned, unsigned> NewGVN::assignDFSNumbers(BasicBlock *B,
2994 unsigned Start) {
2995 unsigned End = Start;
2996 if (MemoryAccess *MemPhi = getMemoryAccess(BB: B)) {
2997 InstrDFS[MemPhi] = End++;
2998 DFSToInstr.emplace_back(Args&: MemPhi);
2999 }
3000
3001 // Then the real block goes next.
3002 for (auto &I : *B) {
3003 // There's no need to call isInstructionTriviallyDead more than once on
3004 // an instruction. Therefore, once we know that an instruction is dead
3005 // we change its DFS number so that it doesn't get value numbered.
3006 if (isInstructionTriviallyDead(I: &I, TLI)) {
3007 InstrDFS[&I] = 0;
3008 LLVM_DEBUG(dbgs() << "Skipping trivially dead instruction " << I << "\n");
3009 markInstructionForDeletion(&I);
3010 continue;
3011 }
3012 if (isa<PHINode>(Val: &I))
3013 RevisitOnReachabilityChange[B].set(End);
3014 InstrDFS[&I] = End++;
3015 DFSToInstr.emplace_back(Args: &I);
3016 }
3017
3018 // All of the range functions taken half-open ranges (open on the end side).
3019 // So we do not subtract one from count, because at this point it is one
3020 // greater than the last instruction.
3021 return std::make_pair(x&: Start, y&: End);
3022}
3023
3024void NewGVN::updateProcessedCount(const Value *V) {
3025#ifndef NDEBUG
3026 if (ProcessedCount.count(Val: V) == 0) {
3027 ProcessedCount.insert(KV: {V, 1});
3028 } else {
3029 ++ProcessedCount[V];
3030 assert(ProcessedCount[V] < 100 &&
3031 "Seem to have processed the same Value a lot");
3032 }
3033#endif
3034}
3035
3036// Evaluate MemoryPhi nodes symbolically, just like PHI nodes
3037void NewGVN::valueNumberMemoryPhi(MemoryPhi *MP) {
3038 // If all the arguments are the same, the MemoryPhi has the same value as the
3039 // argument. Filter out unreachable blocks and self phis from our operands.
3040 // TODO: We could do cycle-checking on the memory phis to allow valueizing for
3041 // self-phi checking.
3042 const BasicBlock *PHIBlock = MP->getBlock();
3043 auto Filtered = make_filter_range(Range: MP->operands(), Pred: [&](const Use &U) {
3044 return cast<MemoryAccess>(Val: U) != MP &&
3045 !isMemoryAccessTOP(MA: cast<MemoryAccess>(Val: U)) &&
3046 ReachableEdges.count(V: {MP->getIncomingBlock(U), PHIBlock});
3047 });
3048 // If all that is left is nothing, our memoryphi is poison. We keep it as
3049 // InitialClass. Note: The only case this should happen is if we have at
3050 // least one self-argument.
3051 if (Filtered.begin() == Filtered.end()) {
3052 if (setMemoryClass(From: MP, NewClass: TOPClass))
3053 markMemoryUsersTouched(MA: MP);
3054 return;
3055 }
3056
3057 // Transform the remaining operands into operand leaders.
3058 // FIXME: mapped_iterator should have a range version.
3059 auto LookupFunc = [&](const Use &U) {
3060 return lookupMemoryLeader(MA: cast<MemoryAccess>(Val: U));
3061 };
3062 auto MappedBegin = map_iterator(I: Filtered.begin(), F: LookupFunc);
3063 auto MappedEnd = map_iterator(I: Filtered.end(), F: LookupFunc);
3064
3065 // and now check if all the elements are equal.
3066 // Sadly, we can't use std::equals since these are random access iterators.
3067 const auto *AllSameValue = *MappedBegin;
3068 ++MappedBegin;
3069 bool AllEqual = std::all_of(
3070 first: MappedBegin, last: MappedEnd,
3071 pred: [&AllSameValue](const MemoryAccess *V) { return V == AllSameValue; });
3072
3073 if (AllEqual)
3074 LLVM_DEBUG(dbgs() << "Memory Phi value numbered to " << *AllSameValue
3075 << "\n");
3076 else
3077 LLVM_DEBUG(dbgs() << "Memory Phi value numbered to itself\n");
3078 // If it's equal to something, it's in that class. Otherwise, it has to be in
3079 // a class where it is the leader (other things may be equivalent to it, but
3080 // it needs to start off in its own class, which means it must have been the
3081 // leader, and it can't have stopped being the leader because it was never
3082 // removed).
3083 CongruenceClass *CC =
3084 AllEqual ? getMemoryClass(MA: AllSameValue) : ensureLeaderOfMemoryClass(MA: MP);
3085 auto OldState = MemoryPhiState.lookup(Val: MP);
3086 assert(OldState != MPS_Invalid && "Invalid memory phi state");
3087 auto NewState = AllEqual ? MPS_Equivalent : MPS_Unique;
3088 MemoryPhiState[MP] = NewState;
3089 if (setMemoryClass(From: MP, NewClass: CC) || OldState != NewState)
3090 markMemoryUsersTouched(MA: MP);
3091}
3092
3093// Value number a single instruction, symbolically evaluating, performing
3094// congruence finding, and updating mappings.
3095void NewGVN::valueNumberInstruction(Instruction *I) {
3096 LLVM_DEBUG(dbgs() << "Processing instruction " << *I << "\n");
3097 if (!I->isTerminator()) {
3098 const Expression *Symbolized = nullptr;
3099 SmallPtrSet<Value *, 2> Visited;
3100 if (DebugCounter::shouldExecute(CounterName: VNCounter)) {
3101 auto Res = performSymbolicEvaluation(I, Visited);
3102 Symbolized = Res.Expr;
3103 addAdditionalUsers(Res, User: I);
3104
3105 // Make a phi of ops if necessary
3106 if (Symbolized && !isa<ConstantExpression>(Val: Symbolized) &&
3107 !isa<VariableExpression>(Val: Symbolized) && PHINodeUses.count(Ptr: I)) {
3108 auto *PHIE = makePossiblePHIOfOps(I, Visited);
3109 // If we created a phi of ops, use it.
3110 // If we couldn't create one, make sure we don't leave one lying around
3111 if (PHIE) {
3112 Symbolized = PHIE;
3113 } else if (auto *Op = RealToTemp.lookup(Val: I)) {
3114 removePhiOfOps(I, PHITemp: Op);
3115 }
3116 }
3117 } else {
3118 // Mark the instruction as unused so we don't value number it again.
3119 InstrDFS[I] = 0;
3120 }
3121 // If we couldn't come up with a symbolic expression, use the unknown
3122 // expression
3123 if (Symbolized == nullptr)
3124 Symbolized = createUnknownExpression(I);
3125 performCongruenceFinding(I, E: Symbolized);
3126 } else {
3127 // Handle terminators that return values. All of them produce values we
3128 // don't currently understand. We don't place non-value producing
3129 // terminators in a class.
3130 if (!I->getType()->isVoidTy()) {
3131 auto *Symbolized = createUnknownExpression(I);
3132 performCongruenceFinding(I, E: Symbolized);
3133 }
3134 processOutgoingEdges(TI: I, B: I->getParent());
3135 }
3136}
3137
3138// Check if there is a path, using single or equal argument phi nodes, from
3139// First to Second.
3140bool NewGVN::singleReachablePHIPath(
3141 SmallPtrSet<const MemoryAccess *, 8> &Visited, const MemoryAccess *First,
3142 const MemoryAccess *Second) const {
3143 if (First == Second)
3144 return true;
3145 if (MSSA->isLiveOnEntryDef(MA: First))
3146 return false;
3147
3148 // This is not perfect, but as we're just verifying here, we can live with
3149 // the loss of precision. The real solution would be that of doing strongly
3150 // connected component finding in this routine, and it's probably not worth
3151 // the complexity for the time being. So, we just keep a set of visited
3152 // MemoryAccess and return true when we hit a cycle.
3153 if (!Visited.insert(Ptr: First).second)
3154 return true;
3155
3156 const auto *EndDef = First;
3157 for (const auto *ChainDef : optimized_def_chain(MA: First)) {
3158 if (ChainDef == Second)
3159 return true;
3160 if (MSSA->isLiveOnEntryDef(MA: ChainDef))
3161 return false;
3162 EndDef = ChainDef;
3163 }
3164 auto *MP = cast<MemoryPhi>(Val: EndDef);
3165 auto ReachableOperandPred = [&](const Use &U) {
3166 return ReachableEdges.count(V: {MP->getIncomingBlock(U), MP->getBlock()});
3167 };
3168 auto FilteredPhiArgs =
3169 make_filter_range(Range: MP->operands(), Pred: ReachableOperandPred);
3170 SmallVector<const Value *, 32> OperandList;
3171 llvm::copy(Range&: FilteredPhiArgs, Out: std::back_inserter(x&: OperandList));
3172 bool Okay = all_equal(Range&: OperandList);
3173 if (Okay)
3174 return singleReachablePHIPath(Visited, First: cast<MemoryAccess>(Val: OperandList[0]),
3175 Second);
3176 return false;
3177}
3178
3179// Verify the that the memory equivalence table makes sense relative to the
3180// congruence classes. Note that this checking is not perfect, and is currently
3181// subject to very rare false negatives. It is only useful for
3182// testing/debugging.
3183void NewGVN::verifyMemoryCongruency() const {
3184#ifndef NDEBUG
3185 // Verify that the memory table equivalence and memory member set match
3186 for (const auto *CC : CongruenceClasses) {
3187 if (CC == TOPClass || CC->isDead())
3188 continue;
3189 if (CC->getStoreCount() != 0) {
3190 assert((CC->getStoredValue() || !isa<StoreInst>(CC->getLeader())) &&
3191 "Any class with a store as a leader should have a "
3192 "representative stored value");
3193 assert(CC->getMemoryLeader() &&
3194 "Any congruence class with a store should have a "
3195 "representative access");
3196 }
3197
3198 if (CC->getMemoryLeader())
3199 assert(MemoryAccessToClass.lookup(CC->getMemoryLeader()) == CC &&
3200 "Representative MemoryAccess does not appear to be reverse "
3201 "mapped properly");
3202 for (const auto *M : CC->memory())
3203 assert(MemoryAccessToClass.lookup(M) == CC &&
3204 "Memory member does not appear to be reverse mapped properly");
3205 }
3206
3207 // Anything equivalent in the MemoryAccess table should be in the same
3208 // congruence class.
3209
3210 // Filter out the unreachable and trivially dead entries, because they may
3211 // never have been updated if the instructions were not processed.
3212 auto ReachableAccessPred =
3213 [&](const std::pair<const MemoryAccess *, CongruenceClass *> Pair) {
3214 bool Result = ReachableBlocks.count(Ptr: Pair.first->getBlock());
3215 if (!Result || MSSA->isLiveOnEntryDef(MA: Pair.first) ||
3216 MemoryToDFSNum(MA: Pair.first) == 0)
3217 return false;
3218 if (auto *MemDef = dyn_cast<MemoryDef>(Val: Pair.first))
3219 return !isInstructionTriviallyDead(I: MemDef->getMemoryInst());
3220
3221 // We could have phi nodes which operands are all trivially dead,
3222 // so we don't process them.
3223 if (auto *MemPHI = dyn_cast<MemoryPhi>(Val: Pair.first)) {
3224 for (const auto &U : MemPHI->incoming_values()) {
3225 if (auto *I = dyn_cast<Instruction>(Val: &*U)) {
3226 if (!isInstructionTriviallyDead(I))
3227 return true;
3228 }
3229 }
3230 return false;
3231 }
3232
3233 return true;
3234 };
3235
3236 auto Filtered = make_filter_range(Range: MemoryAccessToClass, Pred: ReachableAccessPred);
3237 for (auto KV : Filtered) {
3238 if (auto *FirstMUD = dyn_cast<MemoryUseOrDef>(Val: KV.first)) {
3239 auto *SecondMUD = dyn_cast<MemoryUseOrDef>(Val: KV.second->getMemoryLeader());
3240 if (FirstMUD && SecondMUD) {
3241 SmallPtrSet<const MemoryAccess *, 8> VisitedMAS;
3242 assert((singleReachablePHIPath(VisitedMAS, FirstMUD, SecondMUD) ||
3243 ValueToClass.lookup(FirstMUD->getMemoryInst()) ==
3244 ValueToClass.lookup(SecondMUD->getMemoryInst())) &&
3245 "The instructions for these memory operations should have "
3246 "been in the same congruence class or reachable through"
3247 "a single argument phi");
3248 }
3249 } else if (auto *FirstMP = dyn_cast<MemoryPhi>(Val: KV.first)) {
3250 // We can only sanely verify that MemoryDefs in the operand list all have
3251 // the same class.
3252 auto ReachableOperandPred = [&](const Use &U) {
3253 return ReachableEdges.count(
3254 V: {FirstMP->getIncomingBlock(U), FirstMP->getBlock()}) &&
3255 isa<MemoryDef>(Val: U);
3256
3257 };
3258 // All arguments should in the same class, ignoring unreachable arguments
3259 auto FilteredPhiArgs =
3260 make_filter_range(Range: FirstMP->operands(), Pred: ReachableOperandPred);
3261 SmallVector<const CongruenceClass *, 16> PhiOpClasses;
3262 std::transform(first: FilteredPhiArgs.begin(), last: FilteredPhiArgs.end(),
3263 result: std::back_inserter(x&: PhiOpClasses), unary_op: [&](const Use &U) {
3264 const MemoryDef *MD = cast<MemoryDef>(Val: U);
3265 return ValueToClass.lookup(Val: MD->getMemoryInst());
3266 });
3267 assert(all_equal(PhiOpClasses) &&
3268 "All MemoryPhi arguments should be in the same class");
3269 }
3270 }
3271#endif
3272}
3273
3274// Verify that the sparse propagation we did actually found the maximal fixpoint
3275// We do this by storing the value to class mapping, touching all instructions,
3276// and redoing the iteration to see if anything changed.
3277void NewGVN::verifyIterationSettled(Function &F) {
3278#ifndef NDEBUG
3279 LLVM_DEBUG(dbgs() << "Beginning iteration verification\n");
3280 if (DebugCounter::isCounterSet(ID: VNCounter))
3281 DebugCounter::setCounterValue(ID: VNCounter, Count: StartingVNCounter);
3282
3283 // Note that we have to store the actual classes, as we may change existing
3284 // classes during iteration. This is because our memory iteration propagation
3285 // is not perfect, and so may waste a little work. But it should generate
3286 // exactly the same congruence classes we have now, with different IDs.
3287 std::map<const Value *, CongruenceClass> BeforeIteration;
3288
3289 for (auto &KV : ValueToClass) {
3290 if (auto *I = dyn_cast<Instruction>(Val: KV.first))
3291 // Skip unused/dead instructions.
3292 if (InstrToDFSNum(V: I) == 0)
3293 continue;
3294 BeforeIteration.insert(x: {KV.first, *KV.second});
3295 }
3296
3297 TouchedInstructions.set();
3298 TouchedInstructions.reset(Idx: 0);
3299 OpSafeForPHIOfOps.clear();
3300 iterateTouchedInstructions();
3301 DenseSet<std::pair<const CongruenceClass *, const CongruenceClass *>>
3302 EqualClasses;
3303 for (const auto &KV : ValueToClass) {
3304 if (auto *I = dyn_cast<Instruction>(Val: KV.first))
3305 // Skip unused/dead instructions.
3306 if (InstrToDFSNum(V: I) == 0)
3307 continue;
3308 // We could sink these uses, but i think this adds a bit of clarity here as
3309 // to what we are comparing.
3310 auto *BeforeCC = &BeforeIteration.find(x: KV.first)->second;
3311 auto *AfterCC = KV.second;
3312 // Note that the classes can't change at this point, so we memoize the set
3313 // that are equal.
3314 if (!EqualClasses.count(V: {BeforeCC, AfterCC})) {
3315 assert(BeforeCC->isEquivalentTo(AfterCC) &&
3316 "Value number changed after main loop completed!");
3317 EqualClasses.insert(V: {BeforeCC, AfterCC});
3318 }
3319 }
3320#endif
3321}
3322
3323// Verify that for each store expression in the expression to class mapping,
3324// only the latest appears, and multiple ones do not appear.
3325// Because loads do not use the stored value when doing equality with stores,
3326// if we don't erase the old store expressions from the table, a load can find
3327// a no-longer valid StoreExpression.
3328void NewGVN::verifyStoreExpressions() const {
3329#ifndef NDEBUG
3330 // This is the only use of this, and it's not worth defining a complicated
3331 // densemapinfo hash/equality function for it.
3332 std::set<
3333 std::pair<const Value *,
3334 std::tuple<const Value *, const CongruenceClass *, Value *>>>
3335 StoreExpressionSet;
3336 for (const auto &KV : ExpressionToClass) {
3337 if (auto *SE = dyn_cast<StoreExpression>(Val: KV.first)) {
3338 // Make sure a version that will conflict with loads is not already there
3339 auto Res = StoreExpressionSet.insert(
3340 x: {SE->getOperand(N: 0), std::make_tuple(args: SE->getMemoryLeader(), args: KV.second,
3341 args: SE->getStoredValue())});
3342 bool Okay = Res.second;
3343 // It's okay to have the same expression already in there if it is
3344 // identical in nature.
3345 // This can happen when the leader of the stored value changes over time.
3346 if (!Okay)
3347 Okay = (std::get<1>(t: Res.first->second) == KV.second) &&
3348 (lookupOperandLeader(V: std::get<2>(t: Res.first->second)) ==
3349 lookupOperandLeader(V: SE->getStoredValue()));
3350 assert(Okay && "Stored expression conflict exists in expression table");
3351 auto *ValueExpr = ValueToExpression.lookup(Val: SE->getStoreInst());
3352 assert(ValueExpr && ValueExpr->equals(*SE) &&
3353 "StoreExpression in ExpressionToClass is not latest "
3354 "StoreExpression for value");
3355 }
3356 }
3357#endif
3358}
3359
3360// This is the main value numbering loop, it iterates over the initial touched
3361// instruction set, propagating value numbers, marking things touched, etc,
3362// until the set of touched instructions is completely empty.
3363void NewGVN::iterateTouchedInstructions() {
3364 uint64_t Iterations = 0;
3365 // Figure out where touchedinstructions starts
3366 int FirstInstr = TouchedInstructions.find_first();
3367 // Nothing set, nothing to iterate, just return.
3368 if (FirstInstr == -1)
3369 return;
3370 const BasicBlock *LastBlock = getBlockForValue(V: InstrFromDFSNum(DFSNum: FirstInstr));
3371 while (TouchedInstructions.any()) {
3372 ++Iterations;
3373 // Walk through all the instructions in all the blocks in RPO.
3374 // TODO: As we hit a new block, we should push and pop equalities into a
3375 // table lookupOperandLeader can use, to catch things PredicateInfo
3376 // might miss, like edge-only equivalences.
3377 for (unsigned InstrNum : TouchedInstructions.set_bits()) {
3378
3379 // This instruction was found to be dead. We don't bother looking
3380 // at it again.
3381 if (InstrNum == 0) {
3382 TouchedInstructions.reset(Idx: InstrNum);
3383 continue;
3384 }
3385
3386 Value *V = InstrFromDFSNum(DFSNum: InstrNum);
3387 const BasicBlock *CurrBlock = getBlockForValue(V);
3388
3389 // If we hit a new block, do reachability processing.
3390 if (CurrBlock != LastBlock) {
3391 LastBlock = CurrBlock;
3392 bool BlockReachable = ReachableBlocks.count(Ptr: CurrBlock);
3393 const auto &CurrInstRange = BlockInstRange.lookup(Val: CurrBlock);
3394
3395 // If it's not reachable, erase any touched instructions and move on.
3396 if (!BlockReachable) {
3397 TouchedInstructions.reset(I: CurrInstRange.first, E: CurrInstRange.second);
3398 LLVM_DEBUG(dbgs() << "Skipping instructions in block "
3399 << getBlockName(CurrBlock)
3400 << " because it is unreachable\n");
3401 continue;
3402 }
3403 updateProcessedCount(V: CurrBlock);
3404 }
3405 // Reset after processing (because we may mark ourselves as touched when
3406 // we propagate equalities).
3407 TouchedInstructions.reset(Idx: InstrNum);
3408
3409 if (auto *MP = dyn_cast<MemoryPhi>(Val: V)) {
3410 LLVM_DEBUG(dbgs() << "Processing MemoryPhi " << *MP << "\n");
3411 valueNumberMemoryPhi(MP);
3412 } else if (auto *I = dyn_cast<Instruction>(Val: V)) {
3413 valueNumberInstruction(I);
3414 } else {
3415 llvm_unreachable("Should have been a MemoryPhi or Instruction");
3416 }
3417 updateProcessedCount(V);
3418 }
3419 }
3420 NumGVNMaxIterations = std::max(a: NumGVNMaxIterations.getValue(), b: Iterations);
3421}
3422
3423// This is the main transformation entry point.
3424bool NewGVN::runGVN() {
3425 if (DebugCounter::isCounterSet(ID: VNCounter))
3426 StartingVNCounter = DebugCounter::getCounterValue(ID: VNCounter);
3427 bool Changed = false;
3428 NumFuncArgs = F.arg_size();
3429 MSSAWalker = MSSA->getWalker();
3430 SingletonDeadExpression = new (ExpressionAllocator) DeadExpression();
3431
3432 // Count number of instructions for sizing of hash tables, and come
3433 // up with a global dfs numbering for instructions.
3434 unsigned ICount = 1;
3435 // Add an empty instruction to account for the fact that we start at 1
3436 DFSToInstr.emplace_back(Args: nullptr);
3437 // Note: We want ideal RPO traversal of the blocks, which is not quite the
3438 // same as dominator tree order, particularly with regard whether backedges
3439 // get visited first or second, given a block with multiple successors.
3440 // If we visit in the wrong order, we will end up performing N times as many
3441 // iterations.
3442 // The dominator tree does guarantee that, for a given dom tree node, it's
3443 // parent must occur before it in the RPO ordering. Thus, we only need to sort
3444 // the siblings.
3445 ReversePostOrderTraversal<Function *> RPOT(&F);
3446 unsigned Counter = 0;
3447 for (auto &B : RPOT) {
3448 auto *Node = DT->getNode(BB: B);
3449 assert(Node && "RPO and Dominator tree should have same reachability");
3450 RPOOrdering[Node] = ++Counter;
3451 }
3452 // Sort dominator tree children arrays into RPO.
3453 for (auto &B : RPOT) {
3454 auto *Node = DT->getNode(BB: B);
3455 if (Node->getNumChildren() > 1)
3456 llvm::sort(C&: *Node, Comp: [&](const DomTreeNode *A, const DomTreeNode *B) {
3457 return RPOOrdering[A] < RPOOrdering[B];
3458 });
3459 }
3460
3461 // Now a standard depth first ordering of the domtree is equivalent to RPO.
3462 for (auto *DTN : depth_first(G: DT->getRootNode())) {
3463 BasicBlock *B = DTN->getBlock();
3464 const auto &BlockRange = assignDFSNumbers(B, Start: ICount);
3465 BlockInstRange.insert(KV: {B, BlockRange});
3466 ICount += BlockRange.second - BlockRange.first;
3467 }
3468 initializeCongruenceClasses(F);
3469
3470 TouchedInstructions.resize(N: ICount);
3471 // Ensure we don't end up resizing the expressionToClass map, as
3472 // that can be quite expensive. At most, we have one expression per
3473 // instruction.
3474 ExpressionToClass.reserve(NumEntries: ICount);
3475
3476 // Initialize the touched instructions to include the entry block.
3477 const auto &InstRange = BlockInstRange.lookup(Val: &F.getEntryBlock());
3478 TouchedInstructions.set(I: InstRange.first, E: InstRange.second);
3479 LLVM_DEBUG(dbgs() << "Block " << getBlockName(&F.getEntryBlock())
3480 << " marked reachable\n");
3481 ReachableBlocks.insert(Ptr: &F.getEntryBlock());
3482
3483 iterateTouchedInstructions();
3484 verifyMemoryCongruency();
3485 verifyIterationSettled(F);
3486 verifyStoreExpressions();
3487
3488 Changed |= eliminateInstructions(F);
3489
3490 // Delete all instructions marked for deletion.
3491 for (Instruction *ToErase : InstructionsToErase) {
3492 if (!ToErase->use_empty())
3493 ToErase->replaceAllUsesWith(V: PoisonValue::get(T: ToErase->getType()));
3494
3495 assert(ToErase->getParent() &&
3496 "BB containing ToErase deleted unexpectedly!");
3497 ToErase->eraseFromParent();
3498 }
3499 Changed |= !InstructionsToErase.empty();
3500
3501 // Delete all unreachable blocks.
3502 auto UnreachableBlockPred = [&](const BasicBlock &BB) {
3503 return !ReachableBlocks.count(Ptr: &BB);
3504 };
3505
3506 for (auto &BB : make_filter_range(Range&: F, Pred: UnreachableBlockPred)) {
3507 LLVM_DEBUG(dbgs() << "We believe block " << getBlockName(&BB)
3508 << " is unreachable\n");
3509 deleteInstructionsInBlock(&BB);
3510 Changed = true;
3511 }
3512
3513 cleanupTables();
3514 return Changed;
3515}
3516
3517struct NewGVN::ValueDFS {
3518 int DFSIn = 0;
3519 int DFSOut = 0;
3520 int LocalNum = 0;
3521
3522 // Only one of Def and U will be set.
3523 // The bool in the Def tells us whether the Def is the stored value of a
3524 // store.
3525 PointerIntPair<Value *, 1, bool> Def;
3526 Use *U = nullptr;
3527
3528 bool operator<(const ValueDFS &Other) const {
3529 // It's not enough that any given field be less than - we have sets
3530 // of fields that need to be evaluated together to give a proper ordering.
3531 // For example, if you have;
3532 // DFS (1, 3)
3533 // Val 0
3534 // DFS (1, 2)
3535 // Val 50
3536 // We want the second to be less than the first, but if we just go field
3537 // by field, we will get to Val 0 < Val 50 and say the first is less than
3538 // the second. We only want it to be less than if the DFS orders are equal.
3539 //
3540 // Each LLVM instruction only produces one value, and thus the lowest-level
3541 // differentiator that really matters for the stack (and what we use as a
3542 // replacement) is the local dfs number.
3543 // Everything else in the structure is instruction level, and only affects
3544 // the order in which we will replace operands of a given instruction.
3545 //
3546 // For a given instruction (IE things with equal dfsin, dfsout, localnum),
3547 // the order of replacement of uses does not matter.
3548 // IE given,
3549 // a = 5
3550 // b = a + a
3551 // When you hit b, you will have two valuedfs with the same dfsin, out, and
3552 // localnum.
3553 // The .val will be the same as well.
3554 // The .u's will be different.
3555 // You will replace both, and it does not matter what order you replace them
3556 // in (IE whether you replace operand 2, then operand 1, or operand 1, then
3557 // operand 2).
3558 // Similarly for the case of same dfsin, dfsout, localnum, but different
3559 // .val's
3560 // a = 5
3561 // b = 6
3562 // c = a + b
3563 // in c, we will a valuedfs for a, and one for b,with everything the same
3564 // but .val and .u.
3565 // It does not matter what order we replace these operands in.
3566 // You will always end up with the same IR, and this is guaranteed.
3567 return std::tie(args: DFSIn, args: DFSOut, args: LocalNum, args: Def, args: U) <
3568 std::tie(args: Other.DFSIn, args: Other.DFSOut, args: Other.LocalNum, args: Other.Def,
3569 args: Other.U);
3570 }
3571};
3572
3573// This function converts the set of members for a congruence class from values,
3574// to sets of defs and uses with associated DFS info. The total number of
3575// reachable uses for each value is stored in UseCount, and instructions that
3576// seem
3577// dead (have no non-dead uses) are stored in ProbablyDead.
3578void NewGVN::convertClassToDFSOrdered(
3579 const CongruenceClass &Dense, SmallVectorImpl<ValueDFS> &DFSOrderedSet,
3580 DenseMap<const Value *, unsigned int> &UseCounts,
3581 SmallPtrSetImpl<Instruction *> &ProbablyDead) const {
3582 for (auto *D : Dense) {
3583 // First add the value.
3584 BasicBlock *BB = getBlockForValue(V: D);
3585 // Constants are handled prior to ever calling this function, so
3586 // we should only be left with instructions as members.
3587 assert(BB && "Should have figured out a basic block for value");
3588 ValueDFS VDDef;
3589 DomTreeNode *DomNode = DT->getNode(BB);
3590 VDDef.DFSIn = DomNode->getDFSNumIn();
3591 VDDef.DFSOut = DomNode->getDFSNumOut();
3592 // If it's a store, use the leader of the value operand, if it's always
3593 // available, or the value operand. TODO: We could do dominance checks to
3594 // find a dominating leader, but not worth it ATM.
3595 if (auto *SI = dyn_cast<StoreInst>(Val: D)) {
3596 auto Leader = lookupOperandLeader(V: SI->getValueOperand());
3597 if (alwaysAvailable(V: Leader)) {
3598 VDDef.Def.setPointer(Leader);
3599 } else {
3600 VDDef.Def.setPointer(SI->getValueOperand());
3601 VDDef.Def.setInt(true);
3602 }
3603 } else {
3604 VDDef.Def.setPointer(D);
3605 }
3606 assert(isa<Instruction>(D) &&
3607 "The dense set member should always be an instruction");
3608 Instruction *Def = cast<Instruction>(Val: D);
3609 VDDef.LocalNum = InstrToDFSNum(V: D);
3610 DFSOrderedSet.push_back(Elt: VDDef);
3611 // If there is a phi node equivalent, add it
3612 if (auto *PN = RealToTemp.lookup(Val: Def)) {
3613 auto *PHIE =
3614 dyn_cast_or_null<PHIExpression>(Val: ValueToExpression.lookup(Val: Def));
3615 if (PHIE) {
3616 VDDef.Def.setInt(false);
3617 VDDef.Def.setPointer(PN);
3618 VDDef.LocalNum = 0;
3619 DFSOrderedSet.push_back(Elt: VDDef);
3620 }
3621 }
3622
3623 unsigned int UseCount = 0;
3624 // Now add the uses.
3625 for (auto &U : Def->uses()) {
3626 if (auto *I = dyn_cast<Instruction>(Val: U.getUser())) {
3627 // Don't try to replace into dead uses
3628 if (InstructionsToErase.count(Ptr: I))
3629 continue;
3630 ValueDFS VDUse;
3631 // Put the phi node uses in the incoming block.
3632 BasicBlock *IBlock;
3633 if (auto *P = dyn_cast<PHINode>(Val: I)) {
3634 IBlock = P->getIncomingBlock(U);
3635 // Make phi node users appear last in the incoming block
3636 // they are from.
3637 VDUse.LocalNum = InstrDFS.size() + 1;
3638 } else {
3639 IBlock = getBlockForValue(V: I);
3640 VDUse.LocalNum = InstrToDFSNum(V: I);
3641 }
3642
3643 // Skip uses in unreachable blocks, as we're going
3644 // to delete them.
3645 if (!ReachableBlocks.contains(Ptr: IBlock))
3646 continue;
3647
3648 DomTreeNode *DomNode = DT->getNode(BB: IBlock);
3649 VDUse.DFSIn = DomNode->getDFSNumIn();
3650 VDUse.DFSOut = DomNode->getDFSNumOut();
3651 VDUse.U = &U;
3652 ++UseCount;
3653 DFSOrderedSet.emplace_back(Args&: VDUse);
3654 }
3655 }
3656
3657 // If there are no uses, it's probably dead (but it may have side-effects,
3658 // so not definitely dead. Otherwise, store the number of uses so we can
3659 // track if it becomes dead later).
3660 if (UseCount == 0)
3661 ProbablyDead.insert(Ptr: Def);
3662 else
3663 UseCounts[Def] = UseCount;
3664 }
3665}
3666
3667// This function converts the set of members for a congruence class from values,
3668// to the set of defs for loads and stores, with associated DFS info.
3669void NewGVN::convertClassToLoadsAndStores(
3670 const CongruenceClass &Dense,
3671 SmallVectorImpl<ValueDFS> &LoadsAndStores) const {
3672 for (auto *D : Dense) {
3673 if (!isa<LoadInst>(Val: D) && !isa<StoreInst>(Val: D))
3674 continue;
3675
3676 BasicBlock *BB = getBlockForValue(V: D);
3677 ValueDFS VD;
3678 DomTreeNode *DomNode = DT->getNode(BB);
3679 VD.DFSIn = DomNode->getDFSNumIn();
3680 VD.DFSOut = DomNode->getDFSNumOut();
3681 VD.Def.setPointer(D);
3682
3683 // If it's an instruction, use the real local dfs number.
3684 if (auto *I = dyn_cast<Instruction>(Val: D))
3685 VD.LocalNum = InstrToDFSNum(V: I);
3686 else
3687 llvm_unreachable("Should have been an instruction");
3688
3689 LoadsAndStores.emplace_back(Args&: VD);
3690 }
3691}
3692
3693static void patchAndReplaceAllUsesWith(Instruction *I, Value *Repl) {
3694 patchReplacementInstruction(I, Repl);
3695 I->replaceAllUsesWith(V: Repl);
3696}
3697
3698void NewGVN::deleteInstructionsInBlock(BasicBlock *BB) {
3699 LLVM_DEBUG(dbgs() << " BasicBlock Dead:" << *BB);
3700 ++NumGVNBlocksDeleted;
3701
3702 // Delete the instructions backwards, as it has a reduced likelihood of having
3703 // to update as many def-use and use-def chains. Start after the terminator.
3704 auto StartPoint = BB->rbegin();
3705 ++StartPoint;
3706 // Note that we explicitly recalculate BB->rend() on each iteration,
3707 // as it may change when we remove the first instruction.
3708 for (BasicBlock::reverse_iterator I(StartPoint); I != BB->rend();) {
3709 Instruction &Inst = *I++;
3710 if (!Inst.use_empty())
3711 Inst.replaceAllUsesWith(V: PoisonValue::get(T: Inst.getType()));
3712 if (isa<LandingPadInst>(Val: Inst))
3713 continue;
3714 salvageKnowledge(I: &Inst, AC);
3715
3716 Inst.eraseFromParent();
3717 ++NumGVNInstrDeleted;
3718 }
3719 // Now insert something that simplifycfg will turn into an unreachable.
3720 Type *Int8Ty = Type::getInt8Ty(C&: BB->getContext());
3721 new StoreInst(
3722 PoisonValue::get(T: Int8Ty),
3723 Constant::getNullValue(Ty: PointerType::getUnqual(C&: BB->getContext())),
3724 BB->getTerminator()->getIterator());
3725}
3726
3727void NewGVN::markInstructionForDeletion(Instruction *I) {
3728 LLVM_DEBUG(dbgs() << "Marking " << *I << " for deletion\n");
3729 InstructionsToErase.insert(Ptr: I);
3730}
3731
3732void NewGVN::replaceInstruction(Instruction *I, Value *V) {
3733 LLVM_DEBUG(dbgs() << "Replacing " << *I << " with " << *V << "\n");
3734 patchAndReplaceAllUsesWith(I, Repl: V);
3735 // We save the actual erasing to avoid invalidating memory
3736 // dependencies until we are done with everything.
3737 markInstructionForDeletion(I);
3738}
3739
3740namespace {
3741
3742// This is a stack that contains both the value and dfs info of where
3743// that value is valid.
3744class ValueDFSStack {
3745public:
3746 Value *back() const { return ValueStack.back(); }
3747 std::pair<int, int> dfs_back() const { return DFSStack.back(); }
3748
3749 void push_back(Value *V, int DFSIn, int DFSOut) {
3750 ValueStack.emplace_back(Args&: V);
3751 DFSStack.emplace_back(Args&: DFSIn, Args&: DFSOut);
3752 }
3753
3754 bool empty() const { return DFSStack.empty(); }
3755
3756 bool isInScope(int DFSIn, int DFSOut) const {
3757 if (empty())
3758 return false;
3759 return DFSIn >= DFSStack.back().first && DFSOut <= DFSStack.back().second;
3760 }
3761
3762 void popUntilDFSScope(int DFSIn, int DFSOut) {
3763
3764 // These two should always be in sync at this point.
3765 assert(ValueStack.size() == DFSStack.size() &&
3766 "Mismatch between ValueStack and DFSStack");
3767 while (
3768 !DFSStack.empty() &&
3769 !(DFSIn >= DFSStack.back().first && DFSOut <= DFSStack.back().second)) {
3770 DFSStack.pop_back();
3771 ValueStack.pop_back();
3772 }
3773 }
3774
3775private:
3776 SmallVector<Value *, 8> ValueStack;
3777 SmallVector<std::pair<int, int>, 8> DFSStack;
3778};
3779
3780} // end anonymous namespace
3781
3782// Given an expression, get the congruence class for it.
3783CongruenceClass *NewGVN::getClassForExpression(const Expression *E) const {
3784 if (auto *VE = dyn_cast<VariableExpression>(Val: E))
3785 return ValueToClass.lookup(Val: VE->getVariableValue());
3786 else if (isa<DeadExpression>(Val: E))
3787 return TOPClass;
3788 return ExpressionToClass.lookup(Val: E);
3789}
3790
3791// Given a value and a basic block we are trying to see if it is available in,
3792// see if the value has a leader available in that block.
3793Value *NewGVN::findPHIOfOpsLeader(const Expression *E,
3794 const Instruction *OrigInst,
3795 const BasicBlock *BB) const {
3796 // It would already be constant if we could make it constant
3797 if (auto *CE = dyn_cast<ConstantExpression>(Val: E))
3798 return CE->getConstantValue();
3799 if (auto *VE = dyn_cast<VariableExpression>(Val: E)) {
3800 auto *V = VE->getVariableValue();
3801 if (alwaysAvailable(V) || DT->dominates(A: getBlockForValue(V), B: BB))
3802 return VE->getVariableValue();
3803 }
3804
3805 auto *CC = getClassForExpression(E);
3806 if (!CC)
3807 return nullptr;
3808 if (alwaysAvailable(V: CC->getLeader()))
3809 return CC->getLeader();
3810
3811 for (auto *Member : *CC) {
3812 auto *MemberInst = dyn_cast<Instruction>(Val: Member);
3813 if (MemberInst == OrigInst)
3814 continue;
3815 // Anything that isn't an instruction is always available.
3816 if (!MemberInst)
3817 return Member;
3818 if (DT->dominates(A: getBlockForValue(V: MemberInst), B: BB))
3819 return Member;
3820 }
3821 return nullptr;
3822}
3823
3824bool NewGVN::eliminateInstructions(Function &F) {
3825 // This is a non-standard eliminator. The normal way to eliminate is
3826 // to walk the dominator tree in order, keeping track of available
3827 // values, and eliminating them. However, this is mildly
3828 // pointless. It requires doing lookups on every instruction,
3829 // regardless of whether we will ever eliminate it. For
3830 // instructions part of most singleton congruence classes, we know we
3831 // will never eliminate them.
3832
3833 // Instead, this eliminator looks at the congruence classes directly, sorts
3834 // them into a DFS ordering of the dominator tree, and then we just
3835 // perform elimination straight on the sets by walking the congruence
3836 // class member uses in order, and eliminate the ones dominated by the
3837 // last member. This is worst case O(E log E) where E = number of
3838 // instructions in a single congruence class. In theory, this is all
3839 // instructions. In practice, it is much faster, as most instructions are
3840 // either in singleton congruence classes or can't possibly be eliminated
3841 // anyway (if there are no overlapping DFS ranges in class).
3842 // When we find something not dominated, it becomes the new leader
3843 // for elimination purposes.
3844 // TODO: If we wanted to be faster, We could remove any members with no
3845 // overlapping ranges while sorting, as we will never eliminate anything
3846 // with those members, as they don't dominate anything else in our set.
3847
3848 bool AnythingReplaced = false;
3849
3850 // Since we are going to walk the domtree anyway, and we can't guarantee the
3851 // DFS numbers are updated, we compute some ourselves.
3852 DT->updateDFSNumbers();
3853
3854 // Go through all of our phi nodes, and kill the arguments associated with
3855 // unreachable edges.
3856 auto ReplaceUnreachablePHIArgs = [&](PHINode *PHI, BasicBlock *BB) {
3857 for (auto &Operand : PHI->incoming_values())
3858 if (!ReachableEdges.count(V: {PHI->getIncomingBlock(U: Operand), BB})) {
3859 LLVM_DEBUG(dbgs() << "Replacing incoming value of " << PHI
3860 << " for block "
3861 << getBlockName(PHI->getIncomingBlock(Operand))
3862 << " with poison due to it being unreachable\n");
3863 Operand.set(PoisonValue::get(T: PHI->getType()));
3864 }
3865 };
3866 // Replace unreachable phi arguments.
3867 // At this point, RevisitOnReachabilityChange only contains:
3868 //
3869 // 1. PHIs
3870 // 2. Temporaries that will convert to PHIs
3871 // 3. Operations that are affected by an unreachable edge but do not fit into
3872 // 1 or 2 (rare).
3873 // So it is a slight overshoot of what we want. We could make it exact by
3874 // using two SparseBitVectors per block.
3875 DenseMap<const BasicBlock *, unsigned> ReachablePredCount;
3876 for (auto &KV : ReachableEdges)
3877 ReachablePredCount[KV.getEnd()]++;
3878 for (auto &BBPair : RevisitOnReachabilityChange) {
3879 for (auto InstNum : BBPair.second) {
3880 auto *Inst = InstrFromDFSNum(DFSNum: InstNum);
3881 auto *PHI = dyn_cast<PHINode>(Val: Inst);
3882 PHI = PHI ? PHI : dyn_cast_or_null<PHINode>(Val: RealToTemp.lookup(Val: Inst));
3883 if (!PHI)
3884 continue;
3885 auto *BB = BBPair.first;
3886 if (ReachablePredCount.lookup(Val: BB) != PHI->getNumIncomingValues())
3887 ReplaceUnreachablePHIArgs(PHI, BB);
3888 }
3889 }
3890
3891 // Map to store the use counts
3892 DenseMap<const Value *, unsigned int> UseCounts;
3893 for (auto *CC : reverse(C&: CongruenceClasses)) {
3894 LLVM_DEBUG(dbgs() << "Eliminating in congruence class " << CC->getID()
3895 << "\n");
3896 // Track the equivalent store info so we can decide whether to try
3897 // dead store elimination.
3898 SmallVector<ValueDFS, 8> PossibleDeadStores;
3899 SmallPtrSet<Instruction *, 8> ProbablyDead;
3900 if (CC->isDead() || CC->empty())
3901 continue;
3902 // Everything still in the TOP class is unreachable or dead.
3903 if (CC == TOPClass) {
3904 for (auto *M : *CC) {
3905 auto *VTE = ValueToExpression.lookup(Val: M);
3906 if (VTE && isa<DeadExpression>(Val: VTE))
3907 markInstructionForDeletion(I: cast<Instruction>(Val: M));
3908 assert((!ReachableBlocks.count(cast<Instruction>(M)->getParent()) ||
3909 InstructionsToErase.count(cast<Instruction>(M))) &&
3910 "Everything in TOP should be unreachable or dead at this "
3911 "point");
3912 }
3913 continue;
3914 }
3915
3916 assert(CC->getLeader() && "We should have had a leader");
3917 // If this is a leader that is always available, and it's a
3918 // constant or has no equivalences, just replace everything with
3919 // it. We then update the congruence class with whatever members
3920 // are left.
3921 Value *Leader =
3922 CC->getStoredValue() ? CC->getStoredValue() : CC->getLeader();
3923 if (alwaysAvailable(V: Leader)) {
3924 CongruenceClass::MemberSet MembersLeft;
3925 for (auto *M : *CC) {
3926 Value *Member = M;
3927 // Void things have no uses we can replace.
3928 if (Member == Leader || !isa<Instruction>(Val: Member) ||
3929 Member->getType()->isVoidTy()) {
3930 MembersLeft.insert(Ptr: Member);
3931 continue;
3932 }
3933 LLVM_DEBUG(dbgs() << "Found replacement " << *(Leader) << " for "
3934 << *Member << "\n");
3935 auto *I = cast<Instruction>(Val: Member);
3936 assert(Leader != I && "About to accidentally remove our leader");
3937 replaceInstruction(I, V: Leader);
3938 AnythingReplaced = true;
3939 }
3940 CC->swap(Other&: MembersLeft);
3941 } else {
3942 // If this is a singleton, we can skip it.
3943 if (CC->size() != 1 || RealToTemp.count(Val: Leader)) {
3944 // This is a stack because equality replacement/etc may place
3945 // constants in the middle of the member list, and we want to use
3946 // those constant values in preference to the current leader, over
3947 // the scope of those constants.
3948 ValueDFSStack EliminationStack;
3949
3950 // Convert the members to DFS ordered sets and then merge them.
3951 SmallVector<ValueDFS, 8> DFSOrderedSet;
3952 convertClassToDFSOrdered(Dense: *CC, DFSOrderedSet, UseCounts, ProbablyDead);
3953
3954 // Sort the whole thing.
3955 llvm::sort(C&: DFSOrderedSet);
3956 for (auto &VD : DFSOrderedSet) {
3957 int MemberDFSIn = VD.DFSIn;
3958 int MemberDFSOut = VD.DFSOut;
3959 Value *Def = VD.Def.getPointer();
3960 bool FromStore = VD.Def.getInt();
3961 Use *U = VD.U;
3962 // We ignore void things because we can't get a value from them.
3963 if (Def && Def->getType()->isVoidTy())
3964 continue;
3965 auto *DefInst = dyn_cast_or_null<Instruction>(Val: Def);
3966 if (DefInst && AllTempInstructions.count(V: DefInst)) {
3967 auto *PN = cast<PHINode>(Val: DefInst);
3968
3969 // If this is a value phi and that's the expression we used, insert
3970 // it into the program
3971 // remove from temp instruction list.
3972 AllTempInstructions.erase(V: PN);
3973 auto *DefBlock = getBlockForValue(V: Def);
3974 LLVM_DEBUG(dbgs() << "Inserting fully real phi of ops" << *Def
3975 << " into block "
3976 << getBlockName(getBlockForValue(Def)) << "\n");
3977 PN->insertBefore(InsertPos: &DefBlock->front());
3978 Def = PN;
3979 NumGVNPHIOfOpsEliminations++;
3980 }
3981
3982 if (EliminationStack.empty()) {
3983 LLVM_DEBUG(dbgs() << "Elimination Stack is empty\n");
3984 } else {
3985 LLVM_DEBUG(dbgs() << "Elimination Stack Top DFS numbers are ("
3986 << EliminationStack.dfs_back().first << ","
3987 << EliminationStack.dfs_back().second << ")\n");
3988 }
3989
3990 LLVM_DEBUG(dbgs() << "Current DFS numbers are (" << MemberDFSIn << ","
3991 << MemberDFSOut << ")\n");
3992 // First, we see if we are out of scope or empty. If so,
3993 // and there equivalences, we try to replace the top of
3994 // stack with equivalences (if it's on the stack, it must
3995 // not have been eliminated yet).
3996 // Then we synchronize to our current scope, by
3997 // popping until we are back within a DFS scope that
3998 // dominates the current member.
3999 // Then, what happens depends on a few factors
4000 // If the stack is now empty, we need to push
4001 // If we have a constant or a local equivalence we want to
4002 // start using, we also push.
4003 // Otherwise, we walk along, processing members who are
4004 // dominated by this scope, and eliminate them.
4005 bool ShouldPush = Def && EliminationStack.empty();
4006 bool OutOfScope =
4007 !EliminationStack.isInScope(DFSIn: MemberDFSIn, DFSOut: MemberDFSOut);
4008
4009 if (OutOfScope || ShouldPush) {
4010 // Sync to our current scope.
4011 EliminationStack.popUntilDFSScope(DFSIn: MemberDFSIn, DFSOut: MemberDFSOut);
4012 bool ShouldPush = Def && EliminationStack.empty();
4013 if (ShouldPush) {
4014 EliminationStack.push_back(V: Def, DFSIn: MemberDFSIn, DFSOut: MemberDFSOut);
4015 }
4016 }
4017
4018 // Skip the Def's, we only want to eliminate on their uses. But mark
4019 // dominated defs as dead.
4020 if (Def) {
4021 // For anything in this case, what and how we value number
4022 // guarantees that any side-effects that would have occurred (ie
4023 // throwing, etc) can be proven to either still occur (because it's
4024 // dominated by something that has the same side-effects), or never
4025 // occur. Otherwise, we would not have been able to prove it value
4026 // equivalent to something else. For these things, we can just mark
4027 // it all dead. Note that this is different from the "ProbablyDead"
4028 // set, which may not be dominated by anything, and thus, are only
4029 // easy to prove dead if they are also side-effect free. Note that
4030 // because stores are put in terms of the stored value, we skip
4031 // stored values here. If the stored value is really dead, it will
4032 // still be marked for deletion when we process it in its own class.
4033 auto *DefI = dyn_cast<Instruction>(Val: Def);
4034 if (!EliminationStack.empty() && DefI && !FromStore) {
4035 Value *DominatingLeader = EliminationStack.back();
4036 if (DominatingLeader != Def) {
4037 // Even if the instruction is removed, we still need to update
4038 // flags/metadata due to downstreams users of the leader.
4039 if (!match(DefI, m_Intrinsic<Intrinsic::ssa_copy>()))
4040 patchReplacementInstruction(I: DefI, Repl: DominatingLeader);
4041
4042 markInstructionForDeletion(I: DefI);
4043 }
4044 }
4045 continue;
4046 }
4047 // At this point, we know it is a Use we are trying to possibly
4048 // replace.
4049
4050 assert(isa<Instruction>(U->get()) &&
4051 "Current def should have been an instruction");
4052 assert(isa<Instruction>(U->getUser()) &&
4053 "Current user should have been an instruction");
4054
4055 // If the thing we are replacing into is already marked to be dead,
4056 // this use is dead. Note that this is true regardless of whether
4057 // we have anything dominating the use or not. We do this here
4058 // because we are already walking all the uses anyway.
4059 Instruction *InstUse = cast<Instruction>(Val: U->getUser());
4060 if (InstructionsToErase.count(Ptr: InstUse)) {
4061 auto &UseCount = UseCounts[U->get()];
4062 if (--UseCount == 0) {
4063 ProbablyDead.insert(Ptr: cast<Instruction>(Val: U->get()));
4064 }
4065 }
4066
4067 // If we get to this point, and the stack is empty we must have a use
4068 // with nothing we can use to eliminate this use, so just skip it.
4069 if (EliminationStack.empty())
4070 continue;
4071
4072 Value *DominatingLeader = EliminationStack.back();
4073
4074 auto *II = dyn_cast<IntrinsicInst>(Val: DominatingLeader);
4075 bool isSSACopy = II && II->getIntrinsicID() == Intrinsic::ssa_copy;
4076 if (isSSACopy)
4077 DominatingLeader = II->getOperand(i_nocapture: 0);
4078
4079 // Don't replace our existing users with ourselves.
4080 if (U->get() == DominatingLeader)
4081 continue;
4082 LLVM_DEBUG(dbgs()
4083 << "Found replacement " << *DominatingLeader << " for "
4084 << *U->get() << " in " << *(U->getUser()) << "\n");
4085
4086 // If we replaced something in an instruction, handle the patching of
4087 // metadata. Skip this if we are replacing predicateinfo with its
4088 // original operand, as we already know we can just drop it.
4089 auto *ReplacedInst = cast<Instruction>(Val: U->get());
4090 auto *PI = PredInfo->getPredicateInfoFor(V: ReplacedInst);
4091 if (!PI || DominatingLeader != PI->OriginalOp)
4092 patchReplacementInstruction(I: ReplacedInst, Repl: DominatingLeader);
4093 U->set(DominatingLeader);
4094 // This is now a use of the dominating leader, which means if the
4095 // dominating leader was dead, it's now live!
4096 auto &LeaderUseCount = UseCounts[DominatingLeader];
4097 // It's about to be alive again.
4098 if (LeaderUseCount == 0 && isa<Instruction>(Val: DominatingLeader))
4099 ProbablyDead.erase(Ptr: cast<Instruction>(Val: DominatingLeader));
4100 // For copy instructions, we use their operand as a leader,
4101 // which means we remove a user of the copy and it may become dead.
4102 if (isSSACopy) {
4103 auto It = UseCounts.find(Val: II);
4104 if (It != UseCounts.end()) {
4105 unsigned &IIUseCount = It->second;
4106 if (--IIUseCount == 0)
4107 ProbablyDead.insert(Ptr: II);
4108 }
4109 }
4110 ++LeaderUseCount;
4111 AnythingReplaced = true;
4112 }
4113 }
4114 }
4115
4116 // At this point, anything still in the ProbablyDead set is actually dead if
4117 // would be trivially dead.
4118 for (auto *I : ProbablyDead)
4119 if (wouldInstructionBeTriviallyDead(I))
4120 markInstructionForDeletion(I);
4121
4122 // Cleanup the congruence class.
4123 CongruenceClass::MemberSet MembersLeft;
4124 for (auto *Member : *CC)
4125 if (!isa<Instruction>(Val: Member) ||
4126 !InstructionsToErase.count(Ptr: cast<Instruction>(Val: Member)))
4127 MembersLeft.insert(Ptr: Member);
4128 CC->swap(Other&: MembersLeft);
4129
4130 // If we have possible dead stores to look at, try to eliminate them.
4131 if (CC->getStoreCount() > 0) {
4132 convertClassToLoadsAndStores(Dense: *CC, LoadsAndStores&: PossibleDeadStores);
4133 llvm::sort(C&: PossibleDeadStores);
4134 ValueDFSStack EliminationStack;
4135 for (auto &VD : PossibleDeadStores) {
4136 int MemberDFSIn = VD.DFSIn;
4137 int MemberDFSOut = VD.DFSOut;
4138 Instruction *Member = cast<Instruction>(Val: VD.Def.getPointer());
4139 if (EliminationStack.empty() ||
4140 !EliminationStack.isInScope(DFSIn: MemberDFSIn, DFSOut: MemberDFSOut)) {
4141 // Sync to our current scope.
4142 EliminationStack.popUntilDFSScope(DFSIn: MemberDFSIn, DFSOut: MemberDFSOut);
4143 if (EliminationStack.empty()) {
4144 EliminationStack.push_back(V: Member, DFSIn: MemberDFSIn, DFSOut: MemberDFSOut);
4145 continue;
4146 }
4147 }
4148 // We already did load elimination, so nothing to do here.
4149 if (isa<LoadInst>(Val: Member))
4150 continue;
4151 assert(!EliminationStack.empty());
4152 Instruction *Leader = cast<Instruction>(Val: EliminationStack.back());
4153 (void)Leader;
4154 assert(DT->dominates(Leader->getParent(), Member->getParent()));
4155 // Member is dominater by Leader, and thus dead
4156 LLVM_DEBUG(dbgs() << "Marking dead store " << *Member
4157 << " that is dominated by " << *Leader << "\n");
4158 markInstructionForDeletion(I: Member);
4159 CC->erase(M: Member);
4160 ++NumGVNDeadStores;
4161 }
4162 }
4163 }
4164 return AnythingReplaced;
4165}
4166
4167// This function provides global ranking of operations so that we can place them
4168// in a canonical order. Note that rank alone is not necessarily enough for a
4169// complete ordering, as constants all have the same rank. However, generally,
4170// we will simplify an operation with all constants so that it doesn't matter
4171// what order they appear in.
4172unsigned int NewGVN::getRank(const Value *V) const {
4173 // Prefer constants to undef to anything else
4174 // Undef is a constant, have to check it first.
4175 // Prefer poison to undef as it's less defined.
4176 // Prefer smaller constants to constantexprs
4177 // Note that the order here matters because of class inheritance
4178 if (isa<ConstantExpr>(Val: V))
4179 return 3;
4180 if (isa<PoisonValue>(Val: V))
4181 return 1;
4182 if (isa<UndefValue>(Val: V))
4183 return 2;
4184 if (isa<Constant>(Val: V))
4185 return 0;
4186 if (auto *A = dyn_cast<Argument>(Val: V))
4187 return 4 + A->getArgNo();
4188
4189 // Need to shift the instruction DFS by number of arguments + 5 to account for
4190 // the constant and argument ranking above.
4191 unsigned Result = InstrToDFSNum(V);
4192 if (Result > 0)
4193 return 5 + NumFuncArgs + Result;
4194 // Unreachable or something else, just return a really large number.
4195 return ~0;
4196}
4197
4198// This is a function that says whether two commutative operations should
4199// have their order swapped when canonicalizing.
4200bool NewGVN::shouldSwapOperands(const Value *A, const Value *B) const {
4201 // Because we only care about a total ordering, and don't rewrite expressions
4202 // in this order, we order by rank, which will give a strict weak ordering to
4203 // everything but constants, and then we order by pointer address.
4204 return std::make_pair(x: getRank(V: A), y&: A) > std::make_pair(x: getRank(V: B), y&: B);
4205}
4206
4207bool NewGVN::shouldSwapOperandsForIntrinsic(const Value *A, const Value *B,
4208 const IntrinsicInst *I) const {
4209 auto LookupResult = IntrinsicInstPred.find(Val: I);
4210 if (shouldSwapOperands(A, B)) {
4211 if (LookupResult == IntrinsicInstPred.end())
4212 IntrinsicInstPred.insert(KV: {I, B});
4213 else
4214 LookupResult->second = B;
4215 return true;
4216 }
4217
4218 if (LookupResult != IntrinsicInstPred.end()) {
4219 auto *SeenPredicate = LookupResult->second;
4220 if (SeenPredicate) {
4221 if (SeenPredicate == B)
4222 return true;
4223 else
4224 LookupResult->second = nullptr;
4225 }
4226 }
4227 return false;
4228}
4229
4230PreservedAnalyses NewGVNPass::run(Function &F, AnalysisManager<Function> &AM) {
4231 // Apparently the order in which we get these results matter for
4232 // the old GVN (see Chandler's comment in GVN.cpp). I'll keep
4233 // the same order here, just in case.
4234 auto &AC = AM.getResult<AssumptionAnalysis>(IR&: F);
4235 auto &DT = AM.getResult<DominatorTreeAnalysis>(IR&: F);
4236 auto &TLI = AM.getResult<TargetLibraryAnalysis>(IR&: F);
4237 auto &AA = AM.getResult<AAManager>(IR&: F);
4238 auto &MSSA = AM.getResult<MemorySSAAnalysis>(IR&: F).getMSSA();
4239 bool Changed =
4240 NewGVN(F, &DT, &AC, &TLI, &AA, &MSSA, F.getParent()->getDataLayout())
4241 .runGVN();
4242 if (!Changed)
4243 return PreservedAnalyses::all();
4244 PreservedAnalyses PA;
4245 PA.preserve<DominatorTreeAnalysis>();
4246 return PA;
4247}
4248

source code of llvm/lib/Transforms/Scalar/NewGVN.cpp