1//===- llvm/Transforms/Utils/LoopUtils.h - Loop utilities -------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines some loop transformation utilities.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_TRANSFORMS_UTILS_LOOPUTILS_H
14#define LLVM_TRANSFORMS_UTILS_LOOPUTILS_H
15
16#include "llvm/ADT/StringRef.h"
17#include "llvm/Analysis/IVDescriptors.h"
18#include "llvm/Analysis/TargetTransformInfo.h"
19#include "llvm/Transforms/Utils/ValueMapper.h"
20
21namespace llvm {
22
23template <typename T> class DomTreeNodeBase;
24using DomTreeNode = DomTreeNodeBase<BasicBlock>;
25class AAResults;
26class AliasSet;
27class AliasSetTracker;
28class BasicBlock;
29class BlockFrequencyInfo;
30class ICFLoopSafetyInfo;
31class IRBuilderBase;
32class Loop;
33class LoopInfo;
34class MemoryAccess;
35class MemorySSA;
36class MemorySSAUpdater;
37class OptimizationRemarkEmitter;
38class PredIteratorCache;
39class ScalarEvolution;
40class ScalarEvolutionExpander;
41class SCEV;
42class SCEVExpander;
43class TargetLibraryInfo;
44class LPPassManager;
45class Instruction;
46struct RuntimeCheckingPtrGroup;
47typedef std::pair<const RuntimeCheckingPtrGroup *,
48 const RuntimeCheckingPtrGroup *>
49 RuntimePointerCheck;
50
51template <typename T> class Optional;
52template <typename T, unsigned N> class SmallSetVector;
53template <typename T, unsigned N> class SmallVector;
54template <typename T> class SmallVectorImpl;
55template <typename T, unsigned N> class SmallPriorityWorklist;
56
57BasicBlock *InsertPreheaderForLoop(Loop *L, DominatorTree *DT, LoopInfo *LI,
58 MemorySSAUpdater *MSSAU, bool PreserveLCSSA);
59
60/// Ensure that all exit blocks of the loop are dedicated exits.
61///
62/// For any loop exit block with non-loop predecessors, we split the loop
63/// predecessors to use a dedicated loop exit block. We update the dominator
64/// tree and loop info if provided, and will preserve LCSSA if requested.
65bool formDedicatedExitBlocks(Loop *L, DominatorTree *DT, LoopInfo *LI,
66 MemorySSAUpdater *MSSAU, bool PreserveLCSSA);
67
68/// Ensures LCSSA form for every instruction from the Worklist in the scope of
69/// innermost containing loop.
70///
71/// For the given instruction which have uses outside of the loop, an LCSSA PHI
72/// node is inserted and the uses outside the loop are rewritten to use this
73/// node.
74///
75/// LoopInfo and DominatorTree are required and, since the routine makes no
76/// changes to CFG, preserved.
77///
78/// Returns true if any modifications are made.
79///
80/// This function may introduce unused PHI nodes. If \p PHIsToRemove is not
81/// nullptr, those are added to it (before removing, the caller has to check if
82/// they still do not have any uses). Otherwise the PHIs are directly removed.
83bool formLCSSAForInstructions(
84 SmallVectorImpl<Instruction *> &Worklist, const DominatorTree &DT,
85 const LoopInfo &LI, ScalarEvolution *SE, IRBuilderBase &Builder,
86 SmallVectorImpl<PHINode *> *PHIsToRemove = nullptr);
87
88/// Put loop into LCSSA form.
89///
90/// Looks at all instructions in the loop which have uses outside of the
91/// current loop. For each, an LCSSA PHI node is inserted and the uses outside
92/// the loop are rewritten to use this node. Sub-loops must be in LCSSA form
93/// already.
94///
95/// LoopInfo and DominatorTree are required and preserved.
96///
97/// If ScalarEvolution is passed in, it will be preserved.
98///
99/// Returns true if any modifications are made to the loop.
100bool formLCSSA(Loop &L, const DominatorTree &DT, const LoopInfo *LI,
101 ScalarEvolution *SE);
102
103/// Put a loop nest into LCSSA form.
104///
105/// This recursively forms LCSSA for a loop nest.
106///
107/// LoopInfo and DominatorTree are required and preserved.
108///
109/// If ScalarEvolution is passed in, it will be preserved.
110///
111/// Returns true if any modifications are made to the loop.
112bool formLCSSARecursively(Loop &L, const DominatorTree &DT, const LoopInfo *LI,
113 ScalarEvolution *SE);
114
115/// Flags controlling how much is checked when sinking or hoisting
116/// instructions. The number of memory access in the loop (and whether there
117/// are too many) is determined in the constructors when using MemorySSA.
118class SinkAndHoistLICMFlags {
119public:
120 // Explicitly set limits.
121 SinkAndHoistLICMFlags(unsigned LicmMssaOptCap,
122 unsigned LicmMssaNoAccForPromotionCap, bool IsSink,
123 Loop *L = nullptr, MemorySSA *MSSA = nullptr);
124 // Use default limits.
125 SinkAndHoistLICMFlags(bool IsSink, Loop *L = nullptr,
126 MemorySSA *MSSA = nullptr);
127
128 void setIsSink(bool B) { IsSink = B; }
129 bool getIsSink() { return IsSink; }
130 bool tooManyMemoryAccesses() { return NoOfMemAccTooLarge; }
131 bool tooManyClobberingCalls() { return LicmMssaOptCounter >= LicmMssaOptCap; }
132 void incrementClobberingCalls() { ++LicmMssaOptCounter; }
133
134protected:
135 bool NoOfMemAccTooLarge = false;
136 unsigned LicmMssaOptCounter = 0;
137 unsigned LicmMssaOptCap;
138 unsigned LicmMssaNoAccForPromotionCap;
139 bool IsSink;
140};
141
142/// Walk the specified region of the CFG (defined by all blocks
143/// dominated by the specified block, and that are in the current loop) in
144/// reverse depth first order w.r.t the DominatorTree. This allows us to visit
145/// uses before definitions, allowing us to sink a loop body in one pass without
146/// iteration. Takes DomTreeNode, AAResults, LoopInfo, DominatorTree,
147/// BlockFrequencyInfo, TargetLibraryInfo, Loop, AliasSet information for all
148/// instructions of the loop and loop safety information as
149/// arguments. Diagnostics is emitted via \p ORE. It returns changed status.
150bool sinkRegion(DomTreeNode *, AAResults *, LoopInfo *, DominatorTree *,
151 BlockFrequencyInfo *, TargetLibraryInfo *,
152 TargetTransformInfo *, Loop *, AliasSetTracker *,
153 MemorySSAUpdater *, ICFLoopSafetyInfo *,
154 SinkAndHoistLICMFlags &, OptimizationRemarkEmitter *);
155
156/// Walk the specified region of the CFG (defined by all blocks
157/// dominated by the specified block, and that are in the current loop) in depth
158/// first order w.r.t the DominatorTree. This allows us to visit definitions
159/// before uses, allowing us to hoist a loop body in one pass without iteration.
160/// Takes DomTreeNode, AAResults, LoopInfo, DominatorTree,
161/// BlockFrequencyInfo, TargetLibraryInfo, Loop, AliasSet information for all
162/// instructions of the loop and loop safety information as arguments.
163/// Diagnostics is emitted via \p ORE. It returns changed status.
164bool hoistRegion(DomTreeNode *, AAResults *, LoopInfo *, DominatorTree *,
165 BlockFrequencyInfo *, TargetLibraryInfo *, Loop *,
166 AliasSetTracker *, MemorySSAUpdater *, ScalarEvolution *,
167 ICFLoopSafetyInfo *, SinkAndHoistLICMFlags &,
168 OptimizationRemarkEmitter *);
169
170/// This function deletes dead loops. The caller of this function needs to
171/// guarantee that the loop is infact dead.
172/// The function requires a bunch or prerequisites to be present:
173/// - The loop needs to be in LCSSA form
174/// - The loop needs to have a Preheader
175/// - A unique dedicated exit block must exist
176///
177/// This also updates the relevant analysis information in \p DT, \p SE, \p LI
178/// and \p MSSA if pointers to those are provided.
179/// It also updates the loop PM if an updater struct is provided.
180
181void deleteDeadLoop(Loop *L, DominatorTree *DT, ScalarEvolution *SE,
182 LoopInfo *LI, MemorySSA *MSSA = nullptr);
183
184/// Remove the backedge of the specified loop. Handles loop nests and general
185/// loop structures subject to the precondition that the loop has no parent
186/// loop and has a single latch block. Preserves all listed analyses.
187void breakLoopBackedge(Loop *L, DominatorTree &DT, ScalarEvolution &SE,
188 LoopInfo &LI, MemorySSA *MSSA);
189
190/// Try to promote memory values to scalars by sinking stores out of
191/// the loop and moving loads to before the loop. We do this by looping over
192/// the stores in the loop, looking for stores to Must pointers which are
193/// loop invariant. It takes a set of must-alias values, Loop exit blocks
194/// vector, loop exit blocks insertion point vector, PredIteratorCache,
195/// LoopInfo, DominatorTree, Loop, AliasSet information for all instructions
196/// of the loop and loop safety information as arguments.
197/// Diagnostics is emitted via \p ORE. It returns changed status.
198bool promoteLoopAccessesToScalars(
199 const SmallSetVector<Value *, 8> &, SmallVectorImpl<BasicBlock *> &,
200 SmallVectorImpl<Instruction *> &, SmallVectorImpl<MemoryAccess *> &,
201 PredIteratorCache &, LoopInfo *, DominatorTree *, const TargetLibraryInfo *,
202 Loop *, AliasSetTracker *, MemorySSAUpdater *, ICFLoopSafetyInfo *,
203 OptimizationRemarkEmitter *);
204
205/// Does a BFS from a given node to all of its children inside a given loop.
206/// The returned vector of nodes includes the starting point.
207SmallVector<DomTreeNode *, 16> collectChildrenInLoop(DomTreeNode *N,
208 const Loop *CurLoop);
209
210/// Returns the instructions that use values defined in the loop.
211SmallVector<Instruction *, 8> findDefsUsedOutsideOfLoop(Loop *L);
212
213/// Find string metadata for loop
214///
215/// If it has a value (e.g. {"llvm.distribute", 1} return the value as an
216/// operand or null otherwise. If the string metadata is not found return
217/// Optional's not-a-value.
218Optional<const MDOperand *> findStringMetadataForLoop(const Loop *TheLoop,
219 StringRef Name);
220
221/// Find named metadata for a loop with an integer value.
222llvm::Optional<int> getOptionalIntLoopAttribute(const Loop *TheLoop,
223 StringRef Name);
224
225/// Find a combination of metadata ("llvm.loop.vectorize.width" and
226/// "llvm.loop.vectorize.scalable.enable") for a loop and use it to construct a
227/// ElementCount. If the metadata "llvm.loop.vectorize.width" cannot be found
228/// then None is returned.
229Optional<ElementCount>
230getOptionalElementCountLoopAttribute(const Loop *TheLoop);
231
232/// Create a new loop identifier for a loop created from a loop transformation.
233///
234/// @param OrigLoopID The loop ID of the loop before the transformation.
235/// @param FollowupAttrs List of attribute names that contain attributes to be
236/// added to the new loop ID.
237/// @param InheritOptionsAttrsPrefix Selects which attributes should be inherited
238/// from the original loop. The following values
239/// are considered:
240/// nullptr : Inherit all attributes from @p OrigLoopID.
241/// "" : Do not inherit any attribute from @p OrigLoopID; only use
242/// those specified by a followup attribute.
243/// "<prefix>": Inherit all attributes except those which start with
244/// <prefix>; commonly used to remove metadata for the
245/// applied transformation.
246/// @param AlwaysNew If true, do not try to reuse OrigLoopID and never return
247/// None.
248///
249/// @return The loop ID for the after-transformation loop. The following values
250/// can be returned:
251/// None : No followup attribute was found; it is up to the
252/// transformation to choose attributes that make sense.
253/// @p OrigLoopID: The original identifier can be reused.
254/// nullptr : The new loop has no attributes.
255/// MDNode* : A new unique loop identifier.
256Optional<MDNode *>
257makeFollowupLoopID(MDNode *OrigLoopID, ArrayRef<StringRef> FollowupAttrs,
258 const char *InheritOptionsAttrsPrefix = "",
259 bool AlwaysNew = false);
260
261/// Look for the loop attribute that disables all transformation heuristic.
262bool hasDisableAllTransformsHint(const Loop *L);
263
264/// Look for the loop attribute that disables the LICM transformation heuristics.
265bool hasDisableLICMTransformsHint(const Loop *L);
266
267/// Look for the loop attribute that requires progress within the loop.
268bool hasMustProgress(const Loop *L);
269
270/// The mode sets how eager a transformation should be applied.
271enum TransformationMode {
272 /// The pass can use heuristics to determine whether a transformation should
273 /// be applied.
274 TM_Unspecified,
275
276 /// The transformation should be applied without considering a cost model.
277 TM_Enable,
278
279 /// The transformation should not be applied.
280 TM_Disable,
281
282 /// Force is a flag and should not be used alone.
283 TM_Force = 0x04,
284
285 /// The transformation was directed by the user, e.g. by a #pragma in
286 /// the source code. If the transformation could not be applied, a
287 /// warning should be emitted.
288 TM_ForcedByUser = TM_Enable | TM_Force,
289
290 /// The transformation must not be applied. For instance, `#pragma clang loop
291 /// unroll(disable)` explicitly forbids any unrolling to take place. Unlike
292 /// general loop metadata, it must not be dropped. Most passes should not
293 /// behave differently under TM_Disable and TM_SuppressedByUser.
294 TM_SuppressedByUser = TM_Disable | TM_Force
295};
296
297/// @{
298/// Get the mode for LLVM's supported loop transformations.
299TransformationMode hasUnrollTransformation(const Loop *L);
300TransformationMode hasUnrollAndJamTransformation(const Loop *L);
301TransformationMode hasVectorizeTransformation(const Loop *L);
302TransformationMode hasDistributeTransformation(const Loop *L);
303TransformationMode hasLICMVersioningTransformation(const Loop *L);
304/// @}
305
306/// Set input string into loop metadata by keeping other values intact.
307/// If the string is already in loop metadata update value if it is
308/// different.
309void addStringMetadataToLoop(Loop *TheLoop, const char *MDString,
310 unsigned V = 0);
311
312/// Returns true if Name is applied to TheLoop and enabled.
313bool getBooleanLoopAttribute(const Loop *TheLoop, StringRef Name);
314
315/// Returns a loop's estimated trip count based on branch weight metadata.
316/// In addition if \p EstimatedLoopInvocationWeight is not null it is
317/// initialized with weight of loop's latch leading to the exit.
318/// Returns 0 when the count is estimated to be 0, or None when a meaningful
319/// estimate can not be made.
320Optional<unsigned>
321getLoopEstimatedTripCount(Loop *L,
322 unsigned *EstimatedLoopInvocationWeight = nullptr);
323
324/// Set a loop's branch weight metadata to reflect that loop has \p
325/// EstimatedTripCount iterations and \p EstimatedLoopInvocationWeight exits
326/// through latch. Returns true if metadata is successfully updated, false
327/// otherwise. Note that loop must have a latch block which controls loop exit
328/// in order to succeed.
329bool setLoopEstimatedTripCount(Loop *L, unsigned EstimatedTripCount,
330 unsigned EstimatedLoopInvocationWeight);
331
332/// Check inner loop (L) backedge count is known to be invariant on all
333/// iterations of its outer loop. If the loop has no parent, this is trivially
334/// true.
335bool hasIterationCountInvariantInParent(Loop *L, ScalarEvolution &SE);
336
337/// Helper to consistently add the set of standard passes to a loop pass's \c
338/// AnalysisUsage.
339///
340/// All loop passes should call this as part of implementing their \c
341/// getAnalysisUsage.
342void getLoopAnalysisUsage(AnalysisUsage &AU);
343
344/// Returns true if is legal to hoist or sink this instruction disregarding the
345/// possible introduction of faults. Reasoning about potential faulting
346/// instructions is the responsibility of the caller since it is challenging to
347/// do efficiently from within this routine.
348/// \p TargetExecutesOncePerLoop is true only when it is guaranteed that the
349/// target executes at most once per execution of the loop body. This is used
350/// to assess the legality of duplicating atomic loads. Generally, this is
351/// true when moving out of loop and not true when moving into loops.
352/// If \p ORE is set use it to emit optimization remarks.
353bool canSinkOrHoistInst(Instruction &I, AAResults *AA, DominatorTree *DT,
354 Loop *CurLoop, AliasSetTracker *CurAST,
355 MemorySSAUpdater *MSSAU, bool TargetExecutesOncePerLoop,
356 SinkAndHoistLICMFlags *LICMFlags = nullptr,
357 OptimizationRemarkEmitter *ORE = nullptr);
358
359/// Returns a Min/Max operation corresponding to MinMaxRecurrenceKind.
360/// The Builder's fast-math-flags must be set to propagate the expected values.
361Value *createMinMaxOp(IRBuilderBase &Builder, RecurKind RK, Value *Left,
362 Value *Right);
363
364/// Generates an ordered vector reduction using extracts to reduce the value.
365Value *getOrderedReduction(IRBuilderBase &Builder, Value *Acc, Value *Src,
366 unsigned Op, RecurKind MinMaxKind = RecurKind::None,
367 ArrayRef<Value *> RedOps = None);
368
369/// Generates a vector reduction using shufflevectors to reduce the value.
370/// Fast-math-flags are propagated using the IRBuilder's setting.
371Value *getShuffleReduction(IRBuilderBase &Builder, Value *Src, unsigned Op,
372 RecurKind MinMaxKind = RecurKind::None,
373 ArrayRef<Value *> RedOps = None);
374
375/// Create a target reduction of the given vector. The reduction operation
376/// is described by the \p Opcode parameter. min/max reductions require
377/// additional information supplied in \p RdxKind.
378/// The target is queried to determine if intrinsics or shuffle sequences are
379/// required to implement the reduction.
380/// Fast-math-flags are propagated using the IRBuilder's setting.
381Value *createSimpleTargetReduction(IRBuilderBase &B,
382 const TargetTransformInfo *TTI, Value *Src,
383 RecurKind RdxKind,
384 ArrayRef<Value *> RedOps = None);
385
386/// Create a generic target reduction using a recurrence descriptor \p Desc
387/// The target is queried to determine if intrinsics or shuffle sequences are
388/// required to implement the reduction.
389/// Fast-math-flags are propagated using the RecurrenceDescriptor.
390Value *createTargetReduction(IRBuilderBase &B, const TargetTransformInfo *TTI,
391 RecurrenceDescriptor &Desc, Value *Src);
392
393/// Create an ordered reduction intrinsic using the given recurrence
394/// descriptor \p Desc.
395Value *createOrderedReduction(IRBuilderBase &B, RecurrenceDescriptor &Desc,
396 Value *Src, Value *Start);
397
398/// Get the intersection (logical and) of all of the potential IR flags
399/// of each scalar operation (VL) that will be converted into a vector (I).
400/// If OpValue is non-null, we only consider operations similar to OpValue
401/// when intersecting.
402/// Flag set: NSW, NUW, exact, and all of fast-math.
403void propagateIRFlags(Value *I, ArrayRef<Value *> VL, Value *OpValue = nullptr);
404
405/// Returns true if we can prove that \p S is defined and always negative in
406/// loop \p L.
407bool isKnownNegativeInLoop(const SCEV *S, const Loop *L, ScalarEvolution &SE);
408
409/// Returns true if we can prove that \p S is defined and always non-negative in
410/// loop \p L.
411bool isKnownNonNegativeInLoop(const SCEV *S, const Loop *L,
412 ScalarEvolution &SE);
413
414/// Returns true if \p S is defined and never is equal to signed/unsigned max.
415bool cannotBeMaxInLoop(const SCEV *S, const Loop *L, ScalarEvolution &SE,
416 bool Signed);
417
418/// Returns true if \p S is defined and never is equal to signed/unsigned min.
419bool cannotBeMinInLoop(const SCEV *S, const Loop *L, ScalarEvolution &SE,
420 bool Signed);
421
422enum ReplaceExitVal { NeverRepl, OnlyCheapRepl, NoHardUse, AlwaysRepl };
423
424/// If the final value of any expressions that are recurrent in the loop can
425/// be computed, substitute the exit values from the loop into any instructions
426/// outside of the loop that use the final values of the current expressions.
427/// Return the number of loop exit values that have been replaced, and the
428/// corresponding phi node will be added to DeadInsts.
429int rewriteLoopExitValues(Loop *L, LoopInfo *LI, TargetLibraryInfo *TLI,
430 ScalarEvolution *SE, const TargetTransformInfo *TTI,
431 SCEVExpander &Rewriter, DominatorTree *DT,
432 ReplaceExitVal ReplaceExitValue,
433 SmallVector<WeakTrackingVH, 16> &DeadInsts);
434
435/// Set weights for \p UnrolledLoop and \p RemainderLoop based on weights for
436/// \p OrigLoop and the following distribution of \p OrigLoop iteration among \p
437/// UnrolledLoop and \p RemainderLoop. \p UnrolledLoop receives weights that
438/// reflect TC/UF iterations, and \p RemainderLoop receives weights that reflect
439/// the remaining TC%UF iterations.
440///
441/// Note that \p OrigLoop may be equal to either \p UnrolledLoop or \p
442/// RemainderLoop in which case weights for \p OrigLoop are updated accordingly.
443/// Note also behavior is undefined if \p UnrolledLoop and \p RemainderLoop are
444/// equal. \p UF must be greater than zero.
445/// If \p OrigLoop has no profile info associated nothing happens.
446///
447/// This utility may be useful for such optimizations as unroller and
448/// vectorizer as it's typical transformation for them.
449void setProfileInfoAfterUnrolling(Loop *OrigLoop, Loop *UnrolledLoop,
450 Loop *RemainderLoop, uint64_t UF);
451
452/// Utility that implements appending of loops onto a worklist given a range.
453/// We want to process loops in postorder, but the worklist is a LIFO data
454/// structure, so we append to it in *reverse* postorder.
455/// For trees, a preorder traversal is a viable reverse postorder, so we
456/// actually append using a preorder walk algorithm.
457template <typename RangeT>
458void appendLoopsToWorklist(RangeT &&, SmallPriorityWorklist<Loop *, 4> &);
459/// Utility that implements appending of loops onto a worklist given a range.
460/// It has the same behavior as appendLoopsToWorklist, but assumes the range of
461/// loops has already been reversed, so it processes loops in the given order.
462template <typename RangeT>
463void appendReversedLoopsToWorklist(RangeT &&,
464 SmallPriorityWorklist<Loop *, 4> &);
465
466/// Utility that implements appending of loops onto a worklist given LoopInfo.
467/// Calls the templated utility taking a Range of loops, handing it the Loops
468/// in LoopInfo, iterated in reverse. This is because the loops are stored in
469/// RPO w.r.t. the control flow graph in LoopInfo. For the purpose of unrolling,
470/// loop deletion, and LICM, we largely want to work forward across the CFG so
471/// that we visit defs before uses and can propagate simplifications from one
472/// loop nest into the next. Calls appendReversedLoopsToWorklist with the
473/// already reversed loops in LI.
474/// FIXME: Consider changing the order in LoopInfo.
475void appendLoopsToWorklist(LoopInfo &, SmallPriorityWorklist<Loop *, 4> &);
476
477/// Recursively clone the specified loop and all of its children,
478/// mapping the blocks with the specified map.
479Loop *cloneLoop(Loop *L, Loop *PL, ValueToValueMapTy &VM,
480 LoopInfo *LI, LPPassManager *LPM);
481
482/// Add code that checks at runtime if the accessed arrays in \p PointerChecks
483/// overlap.
484///
485/// Returns a pair of instructions where the first element is the first
486/// instruction generated in possibly a sequence of instructions and the
487/// second value is the final comparator value or NULL if no check is needed.
488std::pair<Instruction *, Instruction *>
489addRuntimeChecks(Instruction *Loc, Loop *TheLoop,
490 const SmallVectorImpl<RuntimePointerCheck> &PointerChecks,
491 SCEVExpander &Expander);
492
493/// Struct to hold information about a partially invariant condition.
494struct IVConditionInfo {
495 /// Instructions that need to be duplicated and checked for the unswitching
496 /// condition.
497 SmallVector<Instruction *> InstToDuplicate;
498
499 /// Constant to indicate for which value the condition is invariant.
500 Constant *KnownValue = nullptr;
501
502 /// True if the partially invariant path is no-op (=does not have any
503 /// side-effects and no loop value is used outside the loop).
504 bool PathIsNoop = true;
505
506 /// If the partially invariant path reaches a single exit block, ExitForPath
507 /// is set to that block. Otherwise it is nullptr.
508 BasicBlock *ExitForPath = nullptr;
509};
510
511/// Check if the loop header has a conditional branch that is not
512/// loop-invariant, because it involves load instructions. If all paths from
513/// either the true or false successor to the header or loop exists do not
514/// modify the memory feeding the condition, perform 'partial unswitching'. That
515/// is, duplicate the instructions feeding the condition in the pre-header. Then
516/// unswitch on the duplicated condition. The condition is now known in the
517/// unswitched version for the 'invariant' path through the original loop.
518///
519/// If the branch condition of the header is partially invariant, return a pair
520/// containing the instructions to duplicate and a boolean Constant to update
521/// the condition in the loops created for the true or false successors.
522Optional<IVConditionInfo> hasPartialIVCondition(Loop &L, unsigned MSSAThreshold,
523 MemorySSA &MSSA, AAResults &AA);
524
525} // end namespace llvm
526
527#endif // LLVM_TRANSFORMS_UTILS_LOOPUTILS_H
528