1 | //===- llvm/Analysis/ValueTracking.h - Walk computations --------*- C++ -*-===// |
---|---|

2 | // |

3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |

4 | // See https://llvm.org/LICENSE.txt for license information. |

5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |

6 | // |

7 | //===----------------------------------------------------------------------===// |

8 | // |

9 | // This file contains routines that help analyze properties that chains of |

10 | // computations have. |

11 | // |

12 | //===----------------------------------------------------------------------===// |

13 | |

14 | #ifndef LLVM_ANALYSIS_VALUETRACKING_H |

15 | #define LLVM_ANALYSIS_VALUETRACKING_H |

16 | |

17 | #include "llvm/ADT/ArrayRef.h" |

18 | #include "llvm/ADT/Optional.h" |

19 | #include "llvm/ADT/SmallSet.h" |

20 | #include "llvm/IR/Constants.h" |

21 | #include "llvm/IR/DataLayout.h" |

22 | #include "llvm/IR/InstrTypes.h" |

23 | #include "llvm/IR/Intrinsics.h" |

24 | #include "llvm/IR/Operator.h" |

25 | #include <cassert> |

26 | #include <cstdint> |

27 | |

28 | namespace llvm { |

29 | |

30 | class AddOperator; |

31 | class AllocaInst; |

32 | class APInt; |

33 | class AssumptionCache; |

34 | class DominatorTree; |

35 | class GEPOperator; |

36 | class IntrinsicInst; |

37 | class LoadInst; |

38 | class WithOverflowInst; |

39 | struct KnownBits; |

40 | class Loop; |

41 | class LoopInfo; |

42 | class MDNode; |

43 | class OptimizationRemarkEmitter; |

44 | class StringRef; |

45 | class TargetLibraryInfo; |

46 | class Value; |

47 | |

48 | constexpr unsigned MaxAnalysisRecursionDepth = 6; |

49 | |

50 | /// Determine which bits of V are known to be either zero or one and return |

51 | /// them in the KnownZero/KnownOne bit sets. |

52 | /// |

53 | /// This function is defined on values with integer type, values with pointer |

54 | /// type, and vectors of integers. In the case |

55 | /// where V is a vector, the known zero and known one values are the |

56 | /// same width as the vector element, and the bit is set only if it is true |

57 | /// for all of the elements in the vector. |

58 | void computeKnownBits(const Value *V, KnownBits &Known, |

59 | const DataLayout &DL, unsigned Depth = 0, |

60 | AssumptionCache *AC = nullptr, |

61 | const Instruction *CxtI = nullptr, |

62 | const DominatorTree *DT = nullptr, |

63 | OptimizationRemarkEmitter *ORE = nullptr, |

64 | bool UseInstrInfo = true); |

65 | |

66 | /// Determine which bits of V are known to be either zero or one and return |

67 | /// them in the KnownZero/KnownOne bit sets. |

68 | /// |

69 | /// This function is defined on values with integer type, values with pointer |

70 | /// type, and vectors of integers. In the case |

71 | /// where V is a vector, the known zero and known one values are the |

72 | /// same width as the vector element, and the bit is set only if it is true |

73 | /// for all of the demanded elements in the vector. |

74 | void computeKnownBits(const Value *V, const APInt &DemandedElts, |

75 | KnownBits &Known, const DataLayout &DL, |

76 | unsigned Depth = 0, AssumptionCache *AC = nullptr, |

77 | const Instruction *CxtI = nullptr, |

78 | const DominatorTree *DT = nullptr, |

79 | OptimizationRemarkEmitter *ORE = nullptr, |

80 | bool UseInstrInfo = true); |

81 | |

82 | /// Returns the known bits rather than passing by reference. |

83 | KnownBits computeKnownBits(const Value *V, const DataLayout &DL, |

84 | unsigned Depth = 0, AssumptionCache *AC = nullptr, |

85 | const Instruction *CxtI = nullptr, |

86 | const DominatorTree *DT = nullptr, |

87 | OptimizationRemarkEmitter *ORE = nullptr, |

88 | bool UseInstrInfo = true); |

89 | |

90 | /// Returns the known bits rather than passing by reference. |

91 | KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts, |

92 | const DataLayout &DL, unsigned Depth = 0, |

93 | AssumptionCache *AC = nullptr, |

94 | const Instruction *CxtI = nullptr, |

95 | const DominatorTree *DT = nullptr, |

96 | OptimizationRemarkEmitter *ORE = nullptr, |

97 | bool UseInstrInfo = true); |

98 | |

99 | /// Compute known bits from the range metadata. |

100 | /// \p KnownZero the set of bits that are known to be zero |

101 | /// \p KnownOne the set of bits that are known to be one |

102 | void computeKnownBitsFromRangeMetadata(const MDNode &Ranges, |

103 | KnownBits &Known); |

104 | |

105 | /// Return true if LHS and RHS have no common bits set. |

106 | bool haveNoCommonBitsSet(const Value *LHS, const Value *RHS, |

107 | const DataLayout &DL, |

108 | AssumptionCache *AC = nullptr, |

109 | const Instruction *CxtI = nullptr, |

110 | const DominatorTree *DT = nullptr, |

111 | bool UseInstrInfo = true); |

112 | |

113 | /// Return true if the given value is known to have exactly one bit set when |

114 | /// defined. For vectors return true if every element is known to be a power |

115 | /// of two when defined. Supports values with integer or pointer type and |

116 | /// vectors of integers. If 'OrZero' is set, then return true if the given |

117 | /// value is either a power of two or zero. |

118 | bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, |

119 | bool OrZero = false, unsigned Depth = 0, |

120 | AssumptionCache *AC = nullptr, |

121 | const Instruction *CxtI = nullptr, |

122 | const DominatorTree *DT = nullptr, |

123 | bool UseInstrInfo = true); |

124 | |

125 | bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI); |

126 | |

127 | /// Return true if the given value is known to be non-zero when defined. For |

128 | /// vectors, return true if every element is known to be non-zero when |

129 | /// defined. For pointers, if the context instruction and dominator tree are |

130 | /// specified, perform context-sensitive analysis and return true if the |

131 | /// pointer couldn't possibly be null at the specified instruction. |

132 | /// Supports values with integer or pointer type and vectors of integers. |

133 | bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth = 0, |

134 | AssumptionCache *AC = nullptr, |

135 | const Instruction *CxtI = nullptr, |

136 | const DominatorTree *DT = nullptr, |

137 | bool UseInstrInfo = true); |

138 | |

139 | /// Return true if the two given values are negation. |

140 | /// Currently can recoginze Value pair: |

141 | /// 1: <X, Y> if X = sub (0, Y) or Y = sub (0, X) |

142 | /// 2: <X, Y> if X = sub (A, B) and Y = sub (B, A) |

143 | bool isKnownNegation(const Value *X, const Value *Y, bool NeedNSW = false); |

144 | |

145 | /// Returns true if the give value is known to be non-negative. |

146 | bool isKnownNonNegative(const Value *V, const DataLayout &DL, |

147 | unsigned Depth = 0, |

148 | AssumptionCache *AC = nullptr, |

149 | const Instruction *CxtI = nullptr, |

150 | const DominatorTree *DT = nullptr, |

151 | bool UseInstrInfo = true); |

152 | |

153 | /// Returns true if the given value is known be positive (i.e. non-negative |

154 | /// and non-zero). |

155 | bool isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth = 0, |

156 | AssumptionCache *AC = nullptr, |

157 | const Instruction *CxtI = nullptr, |

158 | const DominatorTree *DT = nullptr, |

159 | bool UseInstrInfo = true); |

160 | |

161 | /// Returns true if the given value is known be negative (i.e. non-positive |

162 | /// and non-zero). |

163 | bool isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth = 0, |

164 | AssumptionCache *AC = nullptr, |

165 | const Instruction *CxtI = nullptr, |

166 | const DominatorTree *DT = nullptr, |

167 | bool UseInstrInfo = true); |

168 | |

169 | /// Return true if the given values are known to be non-equal when defined. |

170 | /// Supports scalar integer types only. |

171 | bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL, |

172 | AssumptionCache *AC = nullptr, |

173 | const Instruction *CxtI = nullptr, |

174 | const DominatorTree *DT = nullptr, |

175 | bool UseInstrInfo = true); |

176 | |

177 | /// Return true if 'V & Mask' is known to be zero. We use this predicate to |

178 | /// simplify operations downstream. Mask is known to be zero for bits that V |

179 | /// cannot have. |

180 | /// |

181 | /// This function is defined on values with integer type, values with pointer |

182 | /// type, and vectors of integers. In the case |

183 | /// where V is a vector, the mask, known zero, and known one values are the |

184 | /// same width as the vector element, and the bit is set only if it is true |

185 | /// for all of the elements in the vector. |

186 | bool MaskedValueIsZero(const Value *V, const APInt &Mask, |

187 | const DataLayout &DL, |

188 | unsigned Depth = 0, AssumptionCache *AC = nullptr, |

189 | const Instruction *CxtI = nullptr, |

190 | const DominatorTree *DT = nullptr, |

191 | bool UseInstrInfo = true); |

192 | |

193 | /// Return the number of times the sign bit of the register is replicated into |

194 | /// the other bits. We know that at least 1 bit is always equal to the sign |

195 | /// bit (itself), but other cases can give us information. For example, |

196 | /// immediately after an "ashr X, 2", we know that the top 3 bits are all |

197 | /// equal to each other, so we return 3. For vectors, return the number of |

198 | /// sign bits for the vector element with the mininum number of known sign |

199 | /// bits. |

200 | unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL, |

201 | unsigned Depth = 0, AssumptionCache *AC = nullptr, |

202 | const Instruction *CxtI = nullptr, |

203 | const DominatorTree *DT = nullptr, |

204 | bool UseInstrInfo = true); |

205 | |

206 | /// This function computes the integer multiple of Base that equals V. If |

207 | /// successful, it returns true and returns the multiple in Multiple. If |

208 | /// unsuccessful, it returns false. Also, if V can be simplified to an |

209 | /// integer, then the simplified V is returned in Val. Look through sext only |

210 | /// if LookThroughSExt=true. |

211 | bool ComputeMultiple(Value *V, unsigned Base, Value *&Multiple, |

212 | bool LookThroughSExt = false, |

213 | unsigned Depth = 0); |

214 | |

215 | /// Map a call instruction to an intrinsic ID. Libcalls which have equivalent |

216 | /// intrinsics are treated as-if they were intrinsics. |

217 | Intrinsic::ID getIntrinsicForCallSite(const CallBase &CB, |

218 | const TargetLibraryInfo *TLI); |

219 | |

220 | /// Return true if we can prove that the specified FP value is never equal to |

221 | /// -0.0. |

222 | bool CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI, |

223 | unsigned Depth = 0); |

224 | |

225 | /// Return true if we can prove that the specified FP value is either NaN or |

226 | /// never less than -0.0. |

227 | /// |

228 | /// NaN --> true |

229 | /// +0 --> true |

230 | /// -0 --> true |

231 | /// x > +0 --> true |

232 | /// x < -0 --> false |

233 | bool CannotBeOrderedLessThanZero(const Value *V, const TargetLibraryInfo *TLI); |

234 | |

235 | /// Return true if the floating-point scalar value is not an infinity or if |

236 | /// the floating-point vector value has no infinities. Return false if a value |

237 | /// could ever be infinity. |

238 | bool isKnownNeverInfinity(const Value *V, const TargetLibraryInfo *TLI, |

239 | unsigned Depth = 0); |

240 | |

241 | /// Return true if the floating-point scalar value is not a NaN or if the |

242 | /// floating-point vector value has no NaN elements. Return false if a value |

243 | /// could ever be NaN. |

244 | bool isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI, |

245 | unsigned Depth = 0); |

246 | |

247 | /// Return true if we can prove that the specified FP value's sign bit is 0. |

248 | /// |

249 | /// NaN --> true/false (depending on the NaN's sign bit) |

250 | /// +0 --> true |

251 | /// -0 --> false |

252 | /// x > +0 --> true |

253 | /// x < -0 --> false |

254 | bool SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI); |

255 | |

256 | /// If the specified value can be set by repeating the same byte in memory, |

257 | /// return the i8 value that it is represented with. This is true for all i8 |

258 | /// values obviously, but is also true for i32 0, i32 -1, i16 0xF0F0, double |

259 | /// 0.0 etc. If the value can't be handled with a repeated byte store (e.g. |

260 | /// i16 0x1234), return null. If the value is entirely undef and padding, |

261 | /// return undef. |

262 | Value *isBytewiseValue(Value *V, const DataLayout &DL); |

263 | |

264 | /// Given an aggregate and an sequence of indices, see if the scalar value |

265 | /// indexed is already around as a register, for example if it were inserted |

266 | /// directly into the aggregate. |

267 | /// |

268 | /// If InsertBefore is not null, this function will duplicate (modified) |

269 | /// insertvalues when a part of a nested struct is extracted. |

270 | Value *FindInsertedValue(Value *V, |

271 | ArrayRef<unsigned> idx_range, |

272 | Instruction *InsertBefore = nullptr); |

273 | |

274 | /// Analyze the specified pointer to see if it can be expressed as a base |

275 | /// pointer plus a constant offset. Return the base and offset to the caller. |

276 | /// |

277 | /// This is a wrapper around Value::stripAndAccumulateConstantOffsets that |

278 | /// creates and later unpacks the required APInt. |

279 | inline Value *GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, |

280 | const DataLayout &DL, |

281 | bool AllowNonInbounds = true) { |

282 | APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0); |

283 | Value *Base = |

284 | Ptr->stripAndAccumulateConstantOffsets(DL, OffsetAPInt, AllowNonInbounds); |

285 | |

286 | Offset = OffsetAPInt.getSExtValue(); |

287 | return Base; |

288 | } |

289 | inline const Value * |

290 | GetPointerBaseWithConstantOffset(const Value *Ptr, int64_t &Offset, |

291 | const DataLayout &DL, |

292 | bool AllowNonInbounds = true) { |

293 | return GetPointerBaseWithConstantOffset(const_cast<Value *>(Ptr), Offset, DL, |

294 | AllowNonInbounds); |

295 | } |

296 | |

297 | /// Returns true if the GEP is based on a pointer to a string (array of |

298 | // \p CharSize integers) and is indexing into this string. |

299 | bool isGEPBasedOnPointerToString(const GEPOperator *GEP, |

300 | unsigned CharSize = 8); |

301 | |

302 | /// Represents offset+length into a ConstantDataArray. |

303 | struct ConstantDataArraySlice { |

304 | /// ConstantDataArray pointer. nullptr indicates a zeroinitializer (a valid |

305 | /// initializer, it just doesn't fit the ConstantDataArray interface). |

306 | const ConstantDataArray *Array; |

307 | |

308 | /// Slice starts at this Offset. |

309 | uint64_t Offset; |

310 | |

311 | /// Length of the slice. |

312 | uint64_t Length; |

313 | |

314 | /// Moves the Offset and adjusts Length accordingly. |

315 | void move(uint64_t Delta) { |

316 | assert(Delta < Length); |

317 | Offset += Delta; |

318 | Length -= Delta; |

319 | } |

320 | |

321 | /// Convenience accessor for elements in the slice. |

322 | uint64_t operator[](unsigned I) const { |

323 | return Array==nullptr ? 0 : Array->getElementAsInteger(I + Offset); |

324 | } |

325 | }; |

326 | |

327 | /// Returns true if the value \p V is a pointer into a ConstantDataArray. |

328 | /// If successful \p Slice will point to a ConstantDataArray info object |

329 | /// with an appropriate offset. |

330 | bool getConstantDataArrayInfo(const Value *V, ConstantDataArraySlice &Slice, |

331 | unsigned ElementSize, uint64_t Offset = 0); |

332 | |

333 | /// This function computes the length of a null-terminated C string pointed to |

334 | /// by V. If successful, it returns true and returns the string in Str. If |

335 | /// unsuccessful, it returns false. This does not include the trailing null |

336 | /// character by default. If TrimAtNul is set to false, then this returns any |

337 | /// trailing null characters as well as any other characters that come after |

338 | /// it. |

339 | bool getConstantStringInfo(const Value *V, StringRef &Str, |

340 | uint64_t Offset = 0, bool TrimAtNul = true); |

341 | |

342 | /// If we can compute the length of the string pointed to by the specified |

343 | /// pointer, return 'len+1'. If we can't, return 0. |

344 | uint64_t GetStringLength(const Value *V, unsigned CharSize = 8); |

345 | |

346 | /// This function returns call pointer argument that is considered the same by |

347 | /// aliasing rules. You CAN'T use it to replace one value with another. If |

348 | /// \p MustPreserveNullness is true, the call must preserve the nullness of |

349 | /// the pointer. |

350 | const Value *getArgumentAliasingToReturnedPointer(const CallBase *Call, |

351 | bool MustPreserveNullness); |

352 | inline Value * |

353 | getArgumentAliasingToReturnedPointer(CallBase *Call, |

354 | bool MustPreserveNullness) { |

355 | return const_cast<Value *>(getArgumentAliasingToReturnedPointer( |

356 | const_cast<const CallBase *>(Call), MustPreserveNullness)); |

357 | } |

358 | |

359 | /// {launder,strip}.invariant.group returns pointer that aliases its argument, |

360 | /// and it only captures pointer by returning it. |

361 | /// These intrinsics are not marked as nocapture, because returning is |

362 | /// considered as capture. The arguments are not marked as returned neither, |

363 | /// because it would make it useless. If \p MustPreserveNullness is true, |

364 | /// the intrinsic must preserve the nullness of the pointer. |

365 | bool isIntrinsicReturningPointerAliasingArgumentWithoutCapturing( |

366 | const CallBase *Call, bool MustPreserveNullness); |

367 | |

368 | /// This method strips off any GEP address adjustments and pointer casts from |

369 | /// the specified value, returning the original object being addressed. Note |

370 | /// that the returned value has pointer type if the specified value does. If |

371 | /// the MaxLookup value is non-zero, it limits the number of instructions to |

372 | /// be stripped off. |

373 | const Value *getUnderlyingObject(const Value *V, unsigned MaxLookup = 6); |

374 | inline Value *getUnderlyingObject(Value *V, unsigned MaxLookup = 6) { |

375 | // Force const to avoid infinite recursion. |

376 | const Value *VConst = V; |

377 | return const_cast<Value *>(getUnderlyingObject(VConst, MaxLookup)); |

378 | } |

379 | |

380 | /// This method is similar to getUnderlyingObject except that it can |

381 | /// look through phi and select instructions and return multiple objects. |

382 | /// |

383 | /// If LoopInfo is passed, loop phis are further analyzed. If a pointer |

384 | /// accesses different objects in each iteration, we don't look through the |

385 | /// phi node. E.g. consider this loop nest: |

386 | /// |

387 | /// int **A; |

388 | /// for (i) |

389 | /// for (j) { |

390 | /// A[i][j] = A[i-1][j] * B[j] |

391 | /// } |

392 | /// |

393 | /// This is transformed by Load-PRE to stash away A[i] for the next iteration |

394 | /// of the outer loop: |

395 | /// |

396 | /// Curr = A[0]; // Prev_0 |

397 | /// for (i: 1..N) { |

398 | /// Prev = Curr; // Prev = PHI (Prev_0, Curr) |

399 | /// Curr = A[i]; |

400 | /// for (j: 0..N) { |

401 | /// Curr[j] = Prev[j] * B[j] |

402 | /// } |

403 | /// } |

404 | /// |

405 | /// Since A[i] and A[i-1] are independent pointers, getUnderlyingObjects |

406 | /// should not assume that Curr and Prev share the same underlying object thus |

407 | /// it shouldn't look through the phi above. |

408 | void getUnderlyingObjects(const Value *V, |

409 | SmallVectorImpl<const Value *> &Objects, |

410 | LoopInfo *LI = nullptr, unsigned MaxLookup = 6); |

411 | |

412 | /// This is a wrapper around getUnderlyingObjects and adds support for basic |

413 | /// ptrtoint+arithmetic+inttoptr sequences. |

414 | bool getUnderlyingObjectsForCodeGen(const Value *V, |

415 | SmallVectorImpl<Value *> &Objects); |

416 | |

417 | /// Returns unique alloca where the value comes from, or nullptr. |

418 | /// If OffsetZero is true check that V points to the begining of the alloca. |

419 | AllocaInst *findAllocaForValue(Value *V, bool OffsetZero = false); |

420 | inline const AllocaInst *findAllocaForValue(const Value *V, |

421 | bool OffsetZero = false) { |

422 | return findAllocaForValue(const_cast<Value *>(V), OffsetZero); |

423 | } |

424 | |

425 | /// Return true if the only users of this pointer are lifetime markers. |

426 | bool onlyUsedByLifetimeMarkers(const Value *V); |

427 | |

428 | /// Return true if the only users of this pointer are lifetime markers or |

429 | /// droppable instructions. |

430 | bool onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V); |

431 | |

432 | /// Return true if speculation of the given load must be suppressed to avoid |

433 | /// ordering or interfering with an active sanitizer. If not suppressed, |

434 | /// dereferenceability and alignment must be proven separately. Note: This |

435 | /// is only needed for raw reasoning; if you use the interface below |

436 | /// (isSafeToSpeculativelyExecute), this is handled internally. |

437 | bool mustSuppressSpeculation(const LoadInst &LI); |

438 | |

439 | /// Return true if the instruction does not have any effects besides |

440 | /// calculating the result and does not have undefined behavior. |

441 | /// |

442 | /// This method never returns true for an instruction that returns true for |

443 | /// mayHaveSideEffects; however, this method also does some other checks in |

444 | /// addition. It checks for undefined behavior, like dividing by zero or |

445 | /// loading from an invalid pointer (but not for undefined results, like a |

446 | /// shift with a shift amount larger than the width of the result). It checks |

447 | /// for malloc and alloca because speculatively executing them might cause a |

448 | /// memory leak. It also returns false for instructions related to control |

449 | /// flow, specifically terminators and PHI nodes. |

450 | /// |

451 | /// If the CtxI is specified this method performs context-sensitive analysis |

452 | /// and returns true if it is safe to execute the instruction immediately |

453 | /// before the CtxI. |

454 | /// |

455 | /// If the CtxI is NOT specified this method only looks at the instruction |

456 | /// itself and its operands, so if this method returns true, it is safe to |

457 | /// move the instruction as long as the correct dominance relationships for |

458 | /// the operands and users hold. |

459 | /// |

460 | /// This method can return true for instructions that read memory; |

461 | /// for such instructions, moving them may change the resulting value. |

462 | bool isSafeToSpeculativelyExecute(const Value *V, |

463 | const Instruction *CtxI = nullptr, |

464 | const DominatorTree *DT = nullptr, |

465 | const TargetLibraryInfo *TLI = nullptr); |

466 | |

467 | /// Returns true if the result or effects of the given instructions \p I |

468 | /// depend on or influence global memory. |

469 | /// Memory dependence arises for example if the instruction reads from |

470 | /// memory or may produce effects or undefined behaviour. Memory dependent |

471 | /// instructions generally cannot be reorderd with respect to other memory |

472 | /// dependent instructions or moved into non-dominated basic blocks. |

473 | /// Instructions which just compute a value based on the values of their |

474 | /// operands are not memory dependent. |

475 | bool mayBeMemoryDependent(const Instruction &I); |

476 | |

477 | /// Return true if it is an intrinsic that cannot be speculated but also |

478 | /// cannot trap. |

479 | bool isAssumeLikeIntrinsic(const Instruction *I); |

480 | |

481 | /// Return true if it is valid to use the assumptions provided by an |

482 | /// assume intrinsic, I, at the point in the control-flow identified by the |

483 | /// context instruction, CxtI. |

484 | bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, |

485 | const DominatorTree *DT = nullptr); |

486 | |

487 | enum class OverflowResult { |

488 | /// Always overflows in the direction of signed/unsigned min value. |

489 | AlwaysOverflowsLow, |

490 | /// Always overflows in the direction of signed/unsigned max value. |

491 | AlwaysOverflowsHigh, |

492 | /// May or may not overflow. |

493 | MayOverflow, |

494 | /// Never overflows. |

495 | NeverOverflows, |

496 | }; |

497 | |

498 | OverflowResult computeOverflowForUnsignedMul(const Value *LHS, |

499 | const Value *RHS, |

500 | const DataLayout &DL, |

501 | AssumptionCache *AC, |

502 | const Instruction *CxtI, |

503 | const DominatorTree *DT, |

504 | bool UseInstrInfo = true); |

505 | OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS, |

506 | const DataLayout &DL, |

507 | AssumptionCache *AC, |

508 | const Instruction *CxtI, |

509 | const DominatorTree *DT, |

510 | bool UseInstrInfo = true); |

511 | OverflowResult computeOverflowForUnsignedAdd(const Value *LHS, |

512 | const Value *RHS, |

513 | const DataLayout &DL, |

514 | AssumptionCache *AC, |

515 | const Instruction *CxtI, |

516 | const DominatorTree *DT, |

517 | bool UseInstrInfo = true); |

518 | OverflowResult computeOverflowForSignedAdd(const Value *LHS, const Value *RHS, |

519 | const DataLayout &DL, |

520 | AssumptionCache *AC = nullptr, |

521 | const Instruction *CxtI = nullptr, |

522 | const DominatorTree *DT = nullptr); |

523 | /// This version also leverages the sign bit of Add if known. |

524 | OverflowResult computeOverflowForSignedAdd(const AddOperator *Add, |

525 | const DataLayout &DL, |

526 | AssumptionCache *AC = nullptr, |

527 | const Instruction *CxtI = nullptr, |

528 | const DominatorTree *DT = nullptr); |

529 | OverflowResult computeOverflowForUnsignedSub(const Value *LHS, const Value *RHS, |

530 | const DataLayout &DL, |

531 | AssumptionCache *AC, |

532 | const Instruction *CxtI, |

533 | const DominatorTree *DT); |

534 | OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS, |

535 | const DataLayout &DL, |

536 | AssumptionCache *AC, |

537 | const Instruction *CxtI, |

538 | const DominatorTree *DT); |

539 | |

540 | /// Returns true if the arithmetic part of the \p WO 's result is |

541 | /// used only along the paths control dependent on the computation |

542 | /// not overflowing, \p WO being an <op>.with.overflow intrinsic. |

543 | bool isOverflowIntrinsicNoWrap(const WithOverflowInst *WO, |

544 | const DominatorTree &DT); |

545 | |

546 | |

547 | /// Determine the possible constant range of an integer or vector of integer |

548 | /// value. This is intended as a cheap, non-recursive check. |

549 | ConstantRange computeConstantRange(const Value *V, bool UseInstrInfo = true, |

550 | AssumptionCache *AC = nullptr, |

551 | const Instruction *CtxI = nullptr, |

552 | unsigned Depth = 0); |

553 | |

554 | /// Return true if this function can prove that the instruction I will |

555 | /// always transfer execution to one of its successors (including the next |

556 | /// instruction that follows within a basic block). E.g. this is not |

557 | /// guaranteed for function calls that could loop infinitely. |

558 | /// |

559 | /// In other words, this function returns false for instructions that may |

560 | /// transfer execution or fail to transfer execution in a way that is not |

561 | /// captured in the CFG nor in the sequence of instructions within a basic |

562 | /// block. |

563 | /// |

564 | /// Undefined behavior is assumed not to happen, so e.g. division is |

565 | /// guaranteed to transfer execution to the following instruction even |

566 | /// though division by zero might cause undefined behavior. |

567 | bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I); |

568 | |

569 | /// Returns true if this block does not contain a potential implicit exit. |

570 | /// This is equivelent to saying that all instructions within the basic block |

571 | /// are guaranteed to transfer execution to their successor within the basic |

572 | /// block. This has the same assumptions w.r.t. undefined behavior as the |

573 | /// instruction variant of this function. |

574 | bool isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB); |

575 | |

576 | /// Return true if this function can prove that the instruction I |

577 | /// is executed for every iteration of the loop L. |

578 | /// |

579 | /// Note that this currently only considers the loop header. |

580 | bool isGuaranteedToExecuteForEveryIteration(const Instruction *I, |

581 | const Loop *L); |

582 | |

583 | /// Return true if I yields poison or raises UB if any of its operands is |

584 | /// poison. |

585 | /// Formally, given I = `r = op v1 v2 .. vN`, propagatesPoison returns true |

586 | /// if, for all i, r is evaluated to poison or op raises UB if vi = poison. |

587 | /// If vi is a vector or an aggregate and r is a single value, any poison |

588 | /// element in vi should make r poison or raise UB. |

589 | /// To filter out operands that raise UB on poison, you can use |

590 | /// getGuaranteedNonPoisonOp. |

591 | bool propagatesPoison(const Operator *I); |

592 | |

593 | /// Insert operands of I into Ops such that I will trigger undefined behavior |

594 | /// if I is executed and that operand has a poison value. |

595 | void getGuaranteedNonPoisonOps(const Instruction *I, |

596 | SmallPtrSetImpl<const Value *> &Ops); |

597 | /// Insert operands of I into Ops such that I will trigger undefined behavior |

598 | /// if I is executed and that operand is not a well-defined value |

599 | /// (i.e. has undef bits or poison). |

600 | void getGuaranteedWellDefinedOps(const Instruction *I, |

601 | SmallPtrSetImpl<const Value *> &Ops); |

602 | |

603 | /// Return true if the given instruction must trigger undefined behavior |

604 | /// when I is executed with any operands which appear in KnownPoison holding |

605 | /// a poison value at the point of execution. |

606 | bool mustTriggerUB(const Instruction *I, |

607 | const SmallSet<const Value *, 16>& KnownPoison); |

608 | |

609 | /// Return true if this function can prove that if Inst is executed |

610 | /// and yields a poison value or undef bits, then that will trigger |

611 | /// undefined behavior. |

612 | /// |

613 | /// Note that this currently only considers the basic block that is |

614 | /// the parent of Inst. |

615 | bool programUndefinedIfUndefOrPoison(const Instruction *Inst); |

616 | bool programUndefinedIfPoison(const Instruction *Inst); |

617 | |

618 | /// canCreateUndefOrPoison returns true if Op can create undef or poison from |

619 | /// non-undef & non-poison operands. |

620 | /// For vectors, canCreateUndefOrPoison returns true if there is potential |

621 | /// poison or undef in any element of the result when vectors without |

622 | /// undef/poison poison are given as operands. |

623 | /// For example, given `Op = shl <2 x i32> %x, <0, 32>`, this function returns |

624 | /// true. If Op raises immediate UB but never creates poison or undef |

625 | /// (e.g. sdiv I, 0), canCreatePoison returns false. |

626 | /// |

627 | /// canCreatePoison returns true if Op can create poison from non-poison |

628 | /// operands. |

629 | bool canCreateUndefOrPoison(const Operator *Op); |

630 | bool canCreatePoison(const Operator *Op); |

631 | |

632 | /// Return true if V is poison given that ValAssumedPoison is already poison. |

633 | /// For example, if ValAssumedPoison is `icmp X, 10` and V is `icmp X, 5`, |

634 | /// impliesPoison returns true. |

635 | bool impliesPoison(const Value *ValAssumedPoison, const Value *V); |

636 | |

637 | /// Return true if this function can prove that V does not have undef bits |

638 | /// and is never poison. If V is an aggregate value or vector, check whether |

639 | /// all elements (except padding) are not undef or poison. |

640 | /// Note that this is different from canCreateUndefOrPoison because the |

641 | /// function assumes Op's operands are not poison/undef. |

642 | /// |

643 | /// If CtxI and DT are specified this method performs flow-sensitive analysis |

644 | /// and returns true if it is guaranteed to be never undef or poison |

645 | /// immediately before the CtxI. |

646 | bool isGuaranteedNotToBeUndefOrPoison(const Value *V, |

647 | AssumptionCache *AC = nullptr, |

648 | const Instruction *CtxI = nullptr, |

649 | const DominatorTree *DT = nullptr, |

650 | unsigned Depth = 0); |

651 | bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC = nullptr, |

652 | const Instruction *CtxI = nullptr, |

653 | const DominatorTree *DT = nullptr, |

654 | unsigned Depth = 0); |

655 | |

656 | /// Specific patterns of select instructions we can match. |

657 | enum SelectPatternFlavor { |

658 | SPF_UNKNOWN = 0, |

659 | SPF_SMIN, /// Signed minimum |

660 | SPF_UMIN, /// Unsigned minimum |

661 | SPF_SMAX, /// Signed maximum |

662 | SPF_UMAX, /// Unsigned maximum |

663 | SPF_FMINNUM, /// Floating point minnum |

664 | SPF_FMAXNUM, /// Floating point maxnum |

665 | SPF_ABS, /// Absolute value |

666 | SPF_NABS /// Negated absolute value |

667 | }; |

668 | |

669 | /// Behavior when a floating point min/max is given one NaN and one |

670 | /// non-NaN as input. |

671 | enum SelectPatternNaNBehavior { |

672 | SPNB_NA = 0, /// NaN behavior not applicable. |

673 | SPNB_RETURNS_NAN, /// Given one NaN input, returns the NaN. |

674 | SPNB_RETURNS_OTHER, /// Given one NaN input, returns the non-NaN. |

675 | SPNB_RETURNS_ANY /// Given one NaN input, can return either (or |

676 | /// it has been determined that no operands can |

677 | /// be NaN). |

678 | }; |

679 | |

680 | struct SelectPatternResult { |

681 | SelectPatternFlavor Flavor; |

682 | SelectPatternNaNBehavior NaNBehavior; /// Only applicable if Flavor is |

683 | /// SPF_FMINNUM or SPF_FMAXNUM. |

684 | bool Ordered; /// When implementing this min/max pattern as |

685 | /// fcmp; select, does the fcmp have to be |

686 | /// ordered? |

687 | |

688 | /// Return true if \p SPF is a min or a max pattern. |

689 | static bool isMinOrMax(SelectPatternFlavor SPF) { |

690 | return SPF != SPF_UNKNOWN && SPF != SPF_ABS && SPF != SPF_NABS; |

691 | } |

692 | }; |

693 | |

694 | /// Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind |

695 | /// and providing the out parameter results if we successfully match. |

696 | /// |

697 | /// For ABS/NABS, LHS will be set to the input to the abs idiom. RHS will be |

698 | /// the negation instruction from the idiom. |

699 | /// |

700 | /// If CastOp is not nullptr, also match MIN/MAX idioms where the type does |

701 | /// not match that of the original select. If this is the case, the cast |

702 | /// operation (one of Trunc,SExt,Zext) that must be done to transform the |

703 | /// type of LHS and RHS into the type of V is returned in CastOp. |

704 | /// |

705 | /// For example: |

706 | /// %1 = icmp slt i32 %a, i32 4 |

707 | /// %2 = sext i32 %a to i64 |

708 | /// %3 = select i1 %1, i64 %2, i64 4 |

709 | /// |

710 | /// -> LHS = %a, RHS = i32 4, *CastOp = Instruction::SExt |

711 | /// |

712 | SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, |

713 | Instruction::CastOps *CastOp = nullptr, |

714 | unsigned Depth = 0); |

715 | |

716 | inline SelectPatternResult |

717 | matchSelectPattern(const Value *V, const Value *&LHS, const Value *&RHS) { |

718 | Value *L = const_cast<Value *>(LHS); |

719 | Value *R = const_cast<Value *>(RHS); |

720 | auto Result = matchSelectPattern(const_cast<Value *>(V), L, R); |

721 | LHS = L; |

722 | RHS = R; |

723 | return Result; |

724 | } |

725 | |

726 | /// Determine the pattern that a select with the given compare as its |

727 | /// predicate and given values as its true/false operands would match. |

728 | SelectPatternResult matchDecomposedSelectPattern( |

729 | CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS, |

730 | Instruction::CastOps *CastOp = nullptr, unsigned Depth = 0); |

731 | |

732 | /// Return the canonical comparison predicate for the specified |

733 | /// minimum/maximum flavor. |

734 | CmpInst::Predicate getMinMaxPred(SelectPatternFlavor SPF, |

735 | bool Ordered = false); |

736 | |

737 | /// Return the inverse minimum/maximum flavor of the specified flavor. |

738 | /// For example, signed minimum is the inverse of signed maximum. |

739 | SelectPatternFlavor getInverseMinMaxFlavor(SelectPatternFlavor SPF); |

740 | |

741 | Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID); |

742 | |

743 | /// Return the canonical inverse comparison predicate for the specified |

744 | /// minimum/maximum flavor. |

745 | CmpInst::Predicate getInverseMinMaxPred(SelectPatternFlavor SPF); |

746 | |

747 | /// Check if the values in \p VL are select instructions that can be converted |

748 | /// to a min or max (vector) intrinsic. Returns the intrinsic ID, if such a |

749 | /// conversion is possible, together with a bool indicating whether all select |

750 | /// conditions are only used by the selects. Otherwise return |

751 | /// Intrinsic::not_intrinsic. |

752 | std::pair<Intrinsic::ID, bool> |

753 | canConvertToMinOrMaxIntrinsic(ArrayRef<Value *> VL); |

754 | |

755 | /// Attempt to match a simple first order recurrence cycle of the form: |

756 | /// %iv = phi Ty [%Start, %Entry], [%Inc, %backedge] |

757 | /// %inc = binop %iv, %step |

758 | /// OR |

759 | /// %iv = phi Ty [%Start, %Entry], [%Inc, %backedge] |

760 | /// %inc = binop %step, %iv |

761 | /// |

762 | /// A first order recurrence is a formula with the form: X_n = f(X_(n-1)) |

763 | /// |

764 | /// A couple of notes on subtleties in that definition: |

765 | /// * The Step does not have to be loop invariant. In math terms, it can |

766 | /// be a free variable. We allow recurrences with both constant and |

767 | /// variable coefficients. Callers may wish to filter cases where Step |

768 | /// does not dominate P. |

769 | /// * For non-commutative operators, we will match both forms. This |

770 | /// results in some odd recurrence structures. Callers may wish to filter |

771 | /// out recurrences where the phi is not the LHS of the returned operator. |

772 | /// * Because of the structure matched, the caller can assume as a post |

773 | /// condition of the match the presence of a Loop with P's parent as it's |

774 | /// header *except* in unreachable code. (Dominance decays in unreachable |

775 | /// code.) |

776 | /// |

777 | /// NOTE: This is intentional simple. If you want the ability to analyze |

778 | /// non-trivial loop conditons, see ScalarEvolution instead. |

779 | bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, |

780 | Value *&Start, Value *&Step); |

781 | |

782 | /// Analogous to the above, but starting from the binary operator |

783 | bool matchSimpleRecurrence(const BinaryOperator *I, PHINode *&P, |

784 | Value *&Start, Value *&Step); |

785 | |

786 | /// Return true if RHS is known to be implied true by LHS. Return false if |

787 | /// RHS is known to be implied false by LHS. Otherwise, return None if no |

788 | /// implication can be made. |

789 | /// A & B must be i1 (boolean) values or a vector of such values. Note that |

790 | /// the truth table for implication is the same as <=u on i1 values (but not |

791 | /// <=s!). The truth table for both is: |

792 | /// | T | F (B) |

793 | /// T | T | F |

794 | /// F | T | T |

795 | /// (A) |

796 | Optional<bool> isImpliedCondition(const Value *LHS, const Value *RHS, |

797 | const DataLayout &DL, bool LHSIsTrue = true, |

798 | unsigned Depth = 0); |

799 | Optional<bool> isImpliedCondition(const Value *LHS, |

800 | CmpInst::Predicate RHSPred, |

801 | const Value *RHSOp0, const Value *RHSOp1, |

802 | const DataLayout &DL, bool LHSIsTrue = true, |

803 | unsigned Depth = 0); |

804 | |

805 | /// Return the boolean condition value in the context of the given instruction |

806 | /// if it is known based on dominating conditions. |

807 | Optional<bool> isImpliedByDomCondition(const Value *Cond, |

808 | const Instruction *ContextI, |

809 | const DataLayout &DL); |

810 | Optional<bool> isImpliedByDomCondition(CmpInst::Predicate Pred, |

811 | const Value *LHS, const Value *RHS, |

812 | const Instruction *ContextI, |

813 | const DataLayout &DL); |

814 | |

815 | /// If Ptr1 is provably equal to Ptr2 plus a constant offset, return that |

816 | /// offset. For example, Ptr1 might be &A[42], and Ptr2 might be &A[40]. In |

817 | /// this case offset would be -8. |

818 | Optional<int64_t> isPointerOffset(const Value *Ptr1, const Value *Ptr2, |

819 | const DataLayout &DL); |

820 | } // end namespace llvm |

821 | |

822 | #endif // LLVM_ANALYSIS_VALUETRACKING_H |

823 |