1//===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file exposes the class definitions of all of the subclasses of the
10// Instruction class. This is meant to be an easy way to get access to all
11// instruction subclasses.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_IR_INSTRUCTIONS_H
16#define LLVM_IR_INSTRUCTIONS_H
17
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/Bitfields.h"
20#include "llvm/ADT/MapVector.h"
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/SmallVector.h"
23#include "llvm/ADT/Twine.h"
24#include "llvm/ADT/iterator.h"
25#include "llvm/ADT/iterator_range.h"
26#include "llvm/IR/CFG.h"
27#include "llvm/IR/Constant.h"
28#include "llvm/IR/DerivedTypes.h"
29#include "llvm/IR/InstrTypes.h"
30#include "llvm/IR/Instruction.h"
31#include "llvm/IR/OperandTraits.h"
32#include "llvm/IR/Use.h"
33#include "llvm/IR/User.h"
34#include "llvm/Support/AtomicOrdering.h"
35#include "llvm/Support/ErrorHandling.h"
36#include <cassert>
37#include <cstddef>
38#include <cstdint>
39#include <iterator>
40#include <optional>
41
42namespace llvm {
43
44class APFloat;
45class APInt;
46class BasicBlock;
47class ConstantInt;
48class DataLayout;
49class StringRef;
50class Type;
51class Value;
52class UnreachableInst;
53
54//===----------------------------------------------------------------------===//
55// AllocaInst Class
56//===----------------------------------------------------------------------===//
57
58/// an instruction to allocate memory on the stack
59class AllocaInst : public UnaryInstruction {
60 Type *AllocatedType;
61
62 using AlignmentField = AlignmentBitfieldElementT<0>;
63 using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>;
64 using SwiftErrorField = BoolBitfieldElementT<UsedWithInAllocaField::NextBit>;
65 static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField,
66 SwiftErrorField>(),
67 "Bitfields must be contiguous");
68
69protected:
70 // Note: Instruction needs to be a friend here to call cloneImpl.
71 friend class Instruction;
72
73 AllocaInst *cloneImpl() const;
74
75public:
76 explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
77 const Twine &Name, Instruction *InsertBefore);
78 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
79 const Twine &Name, BasicBlock *InsertAtEnd);
80
81 AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
82 Instruction *InsertBefore);
83 AllocaInst(Type *Ty, unsigned AddrSpace,
84 const Twine &Name, BasicBlock *InsertAtEnd);
85
86 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
87 const Twine &Name = "", Instruction *InsertBefore = nullptr);
88 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
89 const Twine &Name, BasicBlock *InsertAtEnd);
90
91 /// Return true if there is an allocation size parameter to the allocation
92 /// instruction that is not 1.
93 bool isArrayAllocation() const;
94
95 /// Get the number of elements allocated. For a simple allocation of a single
96 /// element, this will return a constant 1 value.
97 const Value *getArraySize() const { return getOperand(i_nocapture: 0); }
98 Value *getArraySize() { return getOperand(i_nocapture: 0); }
99
100 /// Overload to return most specific pointer type.
101 PointerType *getType() const {
102 return cast<PointerType>(Val: Instruction::getType());
103 }
104
105 /// Return the address space for the allocation.
106 unsigned getAddressSpace() const {
107 return getType()->getAddressSpace();
108 }
109
110 /// Get allocation size in bytes. Returns std::nullopt if size can't be
111 /// determined, e.g. in case of a VLA.
112 std::optional<TypeSize> getAllocationSize(const DataLayout &DL) const;
113
114 /// Get allocation size in bits. Returns std::nullopt if size can't be
115 /// determined, e.g. in case of a VLA.
116 std::optional<TypeSize> getAllocationSizeInBits(const DataLayout &DL) const;
117
118 /// Return the type that is being allocated by the instruction.
119 Type *getAllocatedType() const { return AllocatedType; }
120 /// for use only in special circumstances that need to generically
121 /// transform a whole instruction (eg: IR linking and vectorization).
122 void setAllocatedType(Type *Ty) { AllocatedType = Ty; }
123
124 /// Return the alignment of the memory that is being allocated by the
125 /// instruction.
126 Align getAlign() const {
127 return Align(1ULL << getSubclassData<AlignmentField>());
128 }
129
130 void setAlignment(Align Align) {
131 setSubclassData<AlignmentField>(Log2(A: Align));
132 }
133
134 /// Return true if this alloca is in the entry block of the function and is a
135 /// constant size. If so, the code generator will fold it into the
136 /// prolog/epilog code, so it is basically free.
137 bool isStaticAlloca() const;
138
139 /// Return true if this alloca is used as an inalloca argument to a call. Such
140 /// allocas are never considered static even if they are in the entry block.
141 bool isUsedWithInAlloca() const {
142 return getSubclassData<UsedWithInAllocaField>();
143 }
144
145 /// Specify whether this alloca is used to represent the arguments to a call.
146 void setUsedWithInAlloca(bool V) {
147 setSubclassData<UsedWithInAllocaField>(V);
148 }
149
150 /// Return true if this alloca is used as a swifterror argument to a call.
151 bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); }
152 /// Specify whether this alloca is used to represent a swifterror.
153 void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); }
154
155 // Methods for support type inquiry through isa, cast, and dyn_cast:
156 static bool classof(const Instruction *I) {
157 return (I->getOpcode() == Instruction::Alloca);
158 }
159 static bool classof(const Value *V) {
160 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
161 }
162
163private:
164 // Shadow Instruction::setInstructionSubclassData with a private forwarding
165 // method so that subclasses cannot accidentally use it.
166 template <typename Bitfield>
167 void setSubclassData(typename Bitfield::Type Value) {
168 Instruction::setSubclassData<Bitfield>(Value);
169 }
170};
171
172//===----------------------------------------------------------------------===//
173// LoadInst Class
174//===----------------------------------------------------------------------===//
175
176/// An instruction for reading from memory. This uses the SubclassData field in
177/// Value to store whether or not the load is volatile.
178class LoadInst : public UnaryInstruction {
179 using VolatileField = BoolBitfieldElementT<0>;
180 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
181 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
182 static_assert(
183 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
184 "Bitfields must be contiguous");
185
186 void AssertOK();
187
188protected:
189 // Note: Instruction needs to be a friend here to call cloneImpl.
190 friend class Instruction;
191
192 LoadInst *cloneImpl() const;
193
194public:
195 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr,
196 Instruction *InsertBefore);
197 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
198 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
199 Instruction *InsertBefore);
200 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
201 BasicBlock *InsertAtEnd);
202 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
203 Align Align, Instruction *InsertBefore = nullptr);
204 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
205 Align Align, BasicBlock *InsertAtEnd);
206 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
207 Align Align, AtomicOrdering Order,
208 SyncScope::ID SSID = SyncScope::System,
209 Instruction *InsertBefore = nullptr);
210 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
211 Align Align, AtomicOrdering Order, SyncScope::ID SSID,
212 BasicBlock *InsertAtEnd);
213
214 /// Return true if this is a load from a volatile memory location.
215 bool isVolatile() const { return getSubclassData<VolatileField>(); }
216
217 /// Specify whether this is a volatile load or not.
218 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
219
220 /// Return the alignment of the access that is being performed.
221 Align getAlign() const {
222 return Align(1ULL << (getSubclassData<AlignmentField>()));
223 }
224
225 void setAlignment(Align Align) {
226 setSubclassData<AlignmentField>(Log2(A: Align));
227 }
228
229 /// Returns the ordering constraint of this load instruction.
230 AtomicOrdering getOrdering() const {
231 return getSubclassData<OrderingField>();
232 }
233 /// Sets the ordering constraint of this load instruction. May not be Release
234 /// or AcquireRelease.
235 void setOrdering(AtomicOrdering Ordering) {
236 setSubclassData<OrderingField>(Ordering);
237 }
238
239 /// Returns the synchronization scope ID of this load instruction.
240 SyncScope::ID getSyncScopeID() const {
241 return SSID;
242 }
243
244 /// Sets the synchronization scope ID of this load instruction.
245 void setSyncScopeID(SyncScope::ID SSID) {
246 this->SSID = SSID;
247 }
248
249 /// Sets the ordering constraint and the synchronization scope ID of this load
250 /// instruction.
251 void setAtomic(AtomicOrdering Ordering,
252 SyncScope::ID SSID = SyncScope::System) {
253 setOrdering(Ordering);
254 setSyncScopeID(SSID);
255 }
256
257 bool isSimple() const { return !isAtomic() && !isVolatile(); }
258
259 bool isUnordered() const {
260 return (getOrdering() == AtomicOrdering::NotAtomic ||
261 getOrdering() == AtomicOrdering::Unordered) &&
262 !isVolatile();
263 }
264
265 Value *getPointerOperand() { return getOperand(i_nocapture: 0); }
266 const Value *getPointerOperand() const { return getOperand(i_nocapture: 0); }
267 static unsigned getPointerOperandIndex() { return 0U; }
268 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
269
270 /// Returns the address space of the pointer operand.
271 unsigned getPointerAddressSpace() const {
272 return getPointerOperandType()->getPointerAddressSpace();
273 }
274
275 // Methods for support type inquiry through isa, cast, and dyn_cast:
276 static bool classof(const Instruction *I) {
277 return I->getOpcode() == Instruction::Load;
278 }
279 static bool classof(const Value *V) {
280 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
281 }
282
283private:
284 // Shadow Instruction::setInstructionSubclassData with a private forwarding
285 // method so that subclasses cannot accidentally use it.
286 template <typename Bitfield>
287 void setSubclassData(typename Bitfield::Type Value) {
288 Instruction::setSubclassData<Bitfield>(Value);
289 }
290
291 /// The synchronization scope ID of this load instruction. Not quite enough
292 /// room in SubClassData for everything, so synchronization scope ID gets its
293 /// own field.
294 SyncScope::ID SSID;
295};
296
297//===----------------------------------------------------------------------===//
298// StoreInst Class
299//===----------------------------------------------------------------------===//
300
301/// An instruction for storing to memory.
302class StoreInst : public Instruction {
303 using VolatileField = BoolBitfieldElementT<0>;
304 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
305 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
306 static_assert(
307 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
308 "Bitfields must be contiguous");
309
310 void AssertOK();
311
312protected:
313 // Note: Instruction needs to be a friend here to call cloneImpl.
314 friend class Instruction;
315
316 StoreInst *cloneImpl() const;
317
318public:
319 StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore);
320 StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
321 StoreInst(Value *Val, Value *Ptr, BasicBlock::iterator InsertBefore);
322 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Instruction *InsertBefore);
323 StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
324 StoreInst(Value *Val, Value *Ptr, bool isVolatile,
325 BasicBlock::iterator InsertBefore);
326 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
327 Instruction *InsertBefore = nullptr);
328 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
329 BasicBlock *InsertAtEnd);
330 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
331 BasicBlock::iterator InsertBefore);
332 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
333 AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System,
334 Instruction *InsertBefore = nullptr);
335 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
336 AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd);
337 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
338 AtomicOrdering Order, SyncScope::ID SSID,
339 BasicBlock::iterator InsertBefore);
340
341 // allocate space for exactly two operands
342 void *operator new(size_t S) { return User::operator new(Size: S, Us: 2); }
343 void operator delete(void *Ptr) { User::operator delete(Usr: Ptr); }
344
345 /// Return true if this is a store to a volatile memory location.
346 bool isVolatile() const { return getSubclassData<VolatileField>(); }
347
348 /// Specify whether this is a volatile store or not.
349 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
350
351 /// Transparently provide more efficient getOperand methods.
352 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
353
354 Align getAlign() const {
355 return Align(1ULL << (getSubclassData<AlignmentField>()));
356 }
357
358 void setAlignment(Align Align) {
359 setSubclassData<AlignmentField>(Log2(A: Align));
360 }
361
362 /// Returns the ordering constraint of this store instruction.
363 AtomicOrdering getOrdering() const {
364 return getSubclassData<OrderingField>();
365 }
366
367 /// Sets the ordering constraint of this store instruction. May not be
368 /// Acquire or AcquireRelease.
369 void setOrdering(AtomicOrdering Ordering) {
370 setSubclassData<OrderingField>(Ordering);
371 }
372
373 /// Returns the synchronization scope ID of this store instruction.
374 SyncScope::ID getSyncScopeID() const {
375 return SSID;
376 }
377
378 /// Sets the synchronization scope ID of this store instruction.
379 void setSyncScopeID(SyncScope::ID SSID) {
380 this->SSID = SSID;
381 }
382
383 /// Sets the ordering constraint and the synchronization scope ID of this
384 /// store instruction.
385 void setAtomic(AtomicOrdering Ordering,
386 SyncScope::ID SSID = SyncScope::System) {
387 setOrdering(Ordering);
388 setSyncScopeID(SSID);
389 }
390
391 bool isSimple() const { return !isAtomic() && !isVolatile(); }
392
393 bool isUnordered() const {
394 return (getOrdering() == AtomicOrdering::NotAtomic ||
395 getOrdering() == AtomicOrdering::Unordered) &&
396 !isVolatile();
397 }
398
399 Value *getValueOperand() { return getOperand(0); }
400 const Value *getValueOperand() const { return getOperand(0); }
401
402 Value *getPointerOperand() { return getOperand(1); }
403 const Value *getPointerOperand() const { return getOperand(1); }
404 static unsigned getPointerOperandIndex() { return 1U; }
405 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
406
407 /// Returns the address space of the pointer operand.
408 unsigned getPointerAddressSpace() const {
409 return getPointerOperandType()->getPointerAddressSpace();
410 }
411
412 // Methods for support type inquiry through isa, cast, and dyn_cast:
413 static bool classof(const Instruction *I) {
414 return I->getOpcode() == Instruction::Store;
415 }
416 static bool classof(const Value *V) {
417 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
418 }
419
420private:
421 // Shadow Instruction::setInstructionSubclassData with a private forwarding
422 // method so that subclasses cannot accidentally use it.
423 template <typename Bitfield>
424 void setSubclassData(typename Bitfield::Type Value) {
425 Instruction::setSubclassData<Bitfield>(Value);
426 }
427
428 /// The synchronization scope ID of this store instruction. Not quite enough
429 /// room in SubClassData for everything, so synchronization scope ID gets its
430 /// own field.
431 SyncScope::ID SSID;
432};
433
434template <>
435struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> {
436};
437
438DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)
439
440//===----------------------------------------------------------------------===//
441// FenceInst Class
442//===----------------------------------------------------------------------===//
443
444/// An instruction for ordering other memory operations.
445class FenceInst : public Instruction {
446 using OrderingField = AtomicOrderingBitfieldElementT<0>;
447
448 void Init(AtomicOrdering Ordering, SyncScope::ID SSID);
449
450protected:
451 // Note: Instruction needs to be a friend here to call cloneImpl.
452 friend class Instruction;
453
454 FenceInst *cloneImpl() const;
455
456public:
457 // Ordering may only be Acquire, Release, AcquireRelease, or
458 // SequentiallyConsistent.
459 FenceInst(LLVMContext &C, AtomicOrdering Ordering,
460 SyncScope::ID SSID = SyncScope::System,
461 Instruction *InsertBefore = nullptr);
462 FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID,
463 BasicBlock *InsertAtEnd);
464
465 // allocate space for exactly zero operands
466 void *operator new(size_t S) { return User::operator new(Size: S, Us: 0); }
467 void operator delete(void *Ptr) { User::operator delete(Usr: Ptr); }
468
469 /// Returns the ordering constraint of this fence instruction.
470 AtomicOrdering getOrdering() const {
471 return getSubclassData<OrderingField>();
472 }
473
474 /// Sets the ordering constraint of this fence instruction. May only be
475 /// Acquire, Release, AcquireRelease, or SequentiallyConsistent.
476 void setOrdering(AtomicOrdering Ordering) {
477 setSubclassData<OrderingField>(Ordering);
478 }
479
480 /// Returns the synchronization scope ID of this fence instruction.
481 SyncScope::ID getSyncScopeID() const {
482 return SSID;
483 }
484
485 /// Sets the synchronization scope ID of this fence instruction.
486 void setSyncScopeID(SyncScope::ID SSID) {
487 this->SSID = SSID;
488 }
489
490 // Methods for support type inquiry through isa, cast, and dyn_cast:
491 static bool classof(const Instruction *I) {
492 return I->getOpcode() == Instruction::Fence;
493 }
494 static bool classof(const Value *V) {
495 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
496 }
497
498private:
499 // Shadow Instruction::setInstructionSubclassData with a private forwarding
500 // method so that subclasses cannot accidentally use it.
501 template <typename Bitfield>
502 void setSubclassData(typename Bitfield::Type Value) {
503 Instruction::setSubclassData<Bitfield>(Value);
504 }
505
506 /// The synchronization scope ID of this fence instruction. Not quite enough
507 /// room in SubClassData for everything, so synchronization scope ID gets its
508 /// own field.
509 SyncScope::ID SSID;
510};
511
512//===----------------------------------------------------------------------===//
513// AtomicCmpXchgInst Class
514//===----------------------------------------------------------------------===//
515
516/// An instruction that atomically checks whether a
517/// specified value is in a memory location, and, if it is, stores a new value
518/// there. The value returned by this instruction is a pair containing the
519/// original value as first element, and an i1 indicating success (true) or
520/// failure (false) as second element.
521///
522class AtomicCmpXchgInst : public Instruction {
523 void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align,
524 AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
525 SyncScope::ID SSID);
526
527 template <unsigned Offset>
528 using AtomicOrderingBitfieldElement =
529 typename Bitfield::Element<AtomicOrdering, Offset, 3,
530 AtomicOrdering::LAST>;
531
532protected:
533 // Note: Instruction needs to be a friend here to call cloneImpl.
534 friend class Instruction;
535
536 AtomicCmpXchgInst *cloneImpl() const;
537
538public:
539 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
540 AtomicOrdering SuccessOrdering,
541 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
542 Instruction *InsertBefore = nullptr);
543 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
544 AtomicOrdering SuccessOrdering,
545 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
546 BasicBlock *InsertAtEnd);
547
548 // allocate space for exactly three operands
549 void *operator new(size_t S) { return User::operator new(Size: S, Us: 3); }
550 void operator delete(void *Ptr) { User::operator delete(Usr: Ptr); }
551
552 using VolatileField = BoolBitfieldElementT<0>;
553 using WeakField = BoolBitfieldElementT<VolatileField::NextBit>;
554 using SuccessOrderingField =
555 AtomicOrderingBitfieldElementT<WeakField::NextBit>;
556 using FailureOrderingField =
557 AtomicOrderingBitfieldElementT<SuccessOrderingField::NextBit>;
558 using AlignmentField =
559 AlignmentBitfieldElementT<FailureOrderingField::NextBit>;
560 static_assert(
561 Bitfield::areContiguous<VolatileField, WeakField, SuccessOrderingField,
562 FailureOrderingField, AlignmentField>(),
563 "Bitfields must be contiguous");
564
565 /// Return the alignment of the memory that is being allocated by the
566 /// instruction.
567 Align getAlign() const {
568 return Align(1ULL << getSubclassData<AlignmentField>());
569 }
570
571 void setAlignment(Align Align) {
572 setSubclassData<AlignmentField>(Log2(A: Align));
573 }
574
575 /// Return true if this is a cmpxchg from a volatile memory
576 /// location.
577 ///
578 bool isVolatile() const { return getSubclassData<VolatileField>(); }
579
580 /// Specify whether this is a volatile cmpxchg.
581 ///
582 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
583
584 /// Return true if this cmpxchg may spuriously fail.
585 bool isWeak() const { return getSubclassData<WeakField>(); }
586
587 void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); }
588
589 /// Transparently provide more efficient getOperand methods.
590 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
591
592 static bool isValidSuccessOrdering(AtomicOrdering Ordering) {
593 return Ordering != AtomicOrdering::NotAtomic &&
594 Ordering != AtomicOrdering::Unordered;
595 }
596
597 static bool isValidFailureOrdering(AtomicOrdering Ordering) {
598 return Ordering != AtomicOrdering::NotAtomic &&
599 Ordering != AtomicOrdering::Unordered &&
600 Ordering != AtomicOrdering::AcquireRelease &&
601 Ordering != AtomicOrdering::Release;
602 }
603
604 /// Returns the success ordering constraint of this cmpxchg instruction.
605 AtomicOrdering getSuccessOrdering() const {
606 return getSubclassData<SuccessOrderingField>();
607 }
608
609 /// Sets the success ordering constraint of this cmpxchg instruction.
610 void setSuccessOrdering(AtomicOrdering Ordering) {
611 assert(isValidSuccessOrdering(Ordering) &&
612 "invalid CmpXchg success ordering");
613 setSubclassData<SuccessOrderingField>(Ordering);
614 }
615
616 /// Returns the failure ordering constraint of this cmpxchg instruction.
617 AtomicOrdering getFailureOrdering() const {
618 return getSubclassData<FailureOrderingField>();
619 }
620
621 /// Sets the failure ordering constraint of this cmpxchg instruction.
622 void setFailureOrdering(AtomicOrdering Ordering) {
623 assert(isValidFailureOrdering(Ordering) &&
624 "invalid CmpXchg failure ordering");
625 setSubclassData<FailureOrderingField>(Ordering);
626 }
627
628 /// Returns a single ordering which is at least as strong as both the
629 /// success and failure orderings for this cmpxchg.
630 AtomicOrdering getMergedOrdering() const {
631 if (getFailureOrdering() == AtomicOrdering::SequentiallyConsistent)
632 return AtomicOrdering::SequentiallyConsistent;
633 if (getFailureOrdering() == AtomicOrdering::Acquire) {
634 if (getSuccessOrdering() == AtomicOrdering::Monotonic)
635 return AtomicOrdering::Acquire;
636 if (getSuccessOrdering() == AtomicOrdering::Release)
637 return AtomicOrdering::AcquireRelease;
638 }
639 return getSuccessOrdering();
640 }
641
642 /// Returns the synchronization scope ID of this cmpxchg instruction.
643 SyncScope::ID getSyncScopeID() const {
644 return SSID;
645 }
646
647 /// Sets the synchronization scope ID of this cmpxchg instruction.
648 void setSyncScopeID(SyncScope::ID SSID) {
649 this->SSID = SSID;
650 }
651
652 Value *getPointerOperand() { return getOperand(0); }
653 const Value *getPointerOperand() const { return getOperand(0); }
654 static unsigned getPointerOperandIndex() { return 0U; }
655
656 Value *getCompareOperand() { return getOperand(1); }
657 const Value *getCompareOperand() const { return getOperand(1); }
658
659 Value *getNewValOperand() { return getOperand(2); }
660 const Value *getNewValOperand() const { return getOperand(2); }
661
662 /// Returns the address space of the pointer operand.
663 unsigned getPointerAddressSpace() const {
664 return getPointerOperand()->getType()->getPointerAddressSpace();
665 }
666
667 /// Returns the strongest permitted ordering on failure, given the
668 /// desired ordering on success.
669 ///
670 /// If the comparison in a cmpxchg operation fails, there is no atomic store
671 /// so release semantics cannot be provided. So this function drops explicit
672 /// Release requests from the AtomicOrdering. A SequentiallyConsistent
673 /// operation would remain SequentiallyConsistent.
674 static AtomicOrdering
675 getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) {
676 switch (SuccessOrdering) {
677 default:
678 llvm_unreachable("invalid cmpxchg success ordering");
679 case AtomicOrdering::Release:
680 case AtomicOrdering::Monotonic:
681 return AtomicOrdering::Monotonic;
682 case AtomicOrdering::AcquireRelease:
683 case AtomicOrdering::Acquire:
684 return AtomicOrdering::Acquire;
685 case AtomicOrdering::SequentiallyConsistent:
686 return AtomicOrdering::SequentiallyConsistent;
687 }
688 }
689
690 // Methods for support type inquiry through isa, cast, and dyn_cast:
691 static bool classof(const Instruction *I) {
692 return I->getOpcode() == Instruction::AtomicCmpXchg;
693 }
694 static bool classof(const Value *V) {
695 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
696 }
697
698private:
699 // Shadow Instruction::setInstructionSubclassData with a private forwarding
700 // method so that subclasses cannot accidentally use it.
701 template <typename Bitfield>
702 void setSubclassData(typename Bitfield::Type Value) {
703 Instruction::setSubclassData<Bitfield>(Value);
704 }
705
706 /// The synchronization scope ID of this cmpxchg instruction. Not quite
707 /// enough room in SubClassData for everything, so synchronization scope ID
708 /// gets its own field.
709 SyncScope::ID SSID;
710};
711
712template <>
713struct OperandTraits<AtomicCmpXchgInst> :
714 public FixedNumOperandTraits<AtomicCmpXchgInst, 3> {
715};
716
717DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value)
718
719//===----------------------------------------------------------------------===//
720// AtomicRMWInst Class
721//===----------------------------------------------------------------------===//
722
723/// an instruction that atomically reads a memory location,
724/// combines it with another value, and then stores the result back. Returns
725/// the old value.
726///
727class AtomicRMWInst : public Instruction {
728protected:
729 // Note: Instruction needs to be a friend here to call cloneImpl.
730 friend class Instruction;
731
732 AtomicRMWInst *cloneImpl() const;
733
734public:
735 /// This enumeration lists the possible modifications atomicrmw can make. In
736 /// the descriptions, 'p' is the pointer to the instruction's memory location,
737 /// 'old' is the initial value of *p, and 'v' is the other value passed to the
738 /// instruction. These instructions always return 'old'.
739 enum BinOp : unsigned {
740 /// *p = v
741 Xchg,
742 /// *p = old + v
743 Add,
744 /// *p = old - v
745 Sub,
746 /// *p = old & v
747 And,
748 /// *p = ~(old & v)
749 Nand,
750 /// *p = old | v
751 Or,
752 /// *p = old ^ v
753 Xor,
754 /// *p = old >signed v ? old : v
755 Max,
756 /// *p = old <signed v ? old : v
757 Min,
758 /// *p = old >unsigned v ? old : v
759 UMax,
760 /// *p = old <unsigned v ? old : v
761 UMin,
762
763 /// *p = old + v
764 FAdd,
765
766 /// *p = old - v
767 FSub,
768
769 /// *p = maxnum(old, v)
770 /// \p maxnum matches the behavior of \p llvm.maxnum.*.
771 FMax,
772
773 /// *p = minnum(old, v)
774 /// \p minnum matches the behavior of \p llvm.minnum.*.
775 FMin,
776
777 /// Increment one up to a maximum value.
778 /// *p = (old u>= v) ? 0 : (old + 1)
779 UIncWrap,
780
781 /// Decrement one until a minimum value or zero.
782 /// *p = ((old == 0) || (old u> v)) ? v : (old - 1)
783 UDecWrap,
784
785 FIRST_BINOP = Xchg,
786 LAST_BINOP = UDecWrap,
787 BAD_BINOP
788 };
789
790private:
791 template <unsigned Offset>
792 using AtomicOrderingBitfieldElement =
793 typename Bitfield::Element<AtomicOrdering, Offset, 3,
794 AtomicOrdering::LAST>;
795
796 template <unsigned Offset>
797 using BinOpBitfieldElement =
798 typename Bitfield::Element<BinOp, Offset, 5, BinOp::LAST_BINOP>;
799
800public:
801 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
802 AtomicOrdering Ordering, SyncScope::ID SSID,
803 Instruction *InsertBefore = nullptr);
804 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
805 AtomicOrdering Ordering, SyncScope::ID SSID,
806 BasicBlock *InsertAtEnd);
807
808 // allocate space for exactly two operands
809 void *operator new(size_t S) { return User::operator new(Size: S, Us: 2); }
810 void operator delete(void *Ptr) { User::operator delete(Usr: Ptr); }
811
812 using VolatileField = BoolBitfieldElementT<0>;
813 using AtomicOrderingField =
814 AtomicOrderingBitfieldElementT<VolatileField::NextBit>;
815 using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>;
816 using AlignmentField = AlignmentBitfieldElementT<OperationField::NextBit>;
817 static_assert(Bitfield::areContiguous<VolatileField, AtomicOrderingField,
818 OperationField, AlignmentField>(),
819 "Bitfields must be contiguous");
820
821 BinOp getOperation() const { return getSubclassData<OperationField>(); }
822
823 static StringRef getOperationName(BinOp Op);
824
825 static bool isFPOperation(BinOp Op) {
826 switch (Op) {
827 case AtomicRMWInst::FAdd:
828 case AtomicRMWInst::FSub:
829 case AtomicRMWInst::FMax:
830 case AtomicRMWInst::FMin:
831 return true;
832 default:
833 return false;
834 }
835 }
836
837 void setOperation(BinOp Operation) {
838 setSubclassData<OperationField>(Operation);
839 }
840
841 /// Return the alignment of the memory that is being allocated by the
842 /// instruction.
843 Align getAlign() const {
844 return Align(1ULL << getSubclassData<AlignmentField>());
845 }
846
847 void setAlignment(Align Align) {
848 setSubclassData<AlignmentField>(Log2(A: Align));
849 }
850
851 /// Return true if this is a RMW on a volatile memory location.
852 ///
853 bool isVolatile() const { return getSubclassData<VolatileField>(); }
854
855 /// Specify whether this is a volatile RMW or not.
856 ///
857 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
858
859 /// Transparently provide more efficient getOperand methods.
860 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
861
862 /// Returns the ordering constraint of this rmw instruction.
863 AtomicOrdering getOrdering() const {
864 return getSubclassData<AtomicOrderingField>();
865 }
866
867 /// Sets the ordering constraint of this rmw instruction.
868 void setOrdering(AtomicOrdering Ordering) {
869 assert(Ordering != AtomicOrdering::NotAtomic &&
870 "atomicrmw instructions can only be atomic.");
871 assert(Ordering != AtomicOrdering::Unordered &&
872 "atomicrmw instructions cannot be unordered.");
873 setSubclassData<AtomicOrderingField>(Ordering);
874 }
875
876 /// Returns the synchronization scope ID of this rmw instruction.
877 SyncScope::ID getSyncScopeID() const {
878 return SSID;
879 }
880
881 /// Sets the synchronization scope ID of this rmw instruction.
882 void setSyncScopeID(SyncScope::ID SSID) {
883 this->SSID = SSID;
884 }
885
886 Value *getPointerOperand() { return getOperand(0); }
887 const Value *getPointerOperand() const { return getOperand(0); }
888 static unsigned getPointerOperandIndex() { return 0U; }
889
890 Value *getValOperand() { return getOperand(1); }
891 const Value *getValOperand() const { return getOperand(1); }
892
893 /// Returns the address space of the pointer operand.
894 unsigned getPointerAddressSpace() const {
895 return getPointerOperand()->getType()->getPointerAddressSpace();
896 }
897
898 bool isFloatingPointOperation() const {
899 return isFPOperation(Op: getOperation());
900 }
901
902 // Methods for support type inquiry through isa, cast, and dyn_cast:
903 static bool classof(const Instruction *I) {
904 return I->getOpcode() == Instruction::AtomicRMW;
905 }
906 static bool classof(const Value *V) {
907 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
908 }
909
910private:
911 void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align,
912 AtomicOrdering Ordering, SyncScope::ID SSID);
913
914 // Shadow Instruction::setInstructionSubclassData with a private forwarding
915 // method so that subclasses cannot accidentally use it.
916 template <typename Bitfield>
917 void setSubclassData(typename Bitfield::Type Value) {
918 Instruction::setSubclassData<Bitfield>(Value);
919 }
920
921 /// The synchronization scope ID of this rmw instruction. Not quite enough
922 /// room in SubClassData for everything, so synchronization scope ID gets its
923 /// own field.
924 SyncScope::ID SSID;
925};
926
927template <>
928struct OperandTraits<AtomicRMWInst>
929 : public FixedNumOperandTraits<AtomicRMWInst,2> {
930};
931
932DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value)
933
934//===----------------------------------------------------------------------===//
935// GetElementPtrInst Class
936//===----------------------------------------------------------------------===//
937
938// checkGEPType - Simple wrapper function to give a better assertion failure
939// message on bad indexes for a gep instruction.
940//
941inline Type *checkGEPType(Type *Ty) {
942 assert(Ty && "Invalid GetElementPtrInst indices for type!");
943 return Ty;
944}
945
946/// an instruction for type-safe pointer arithmetic to
947/// access elements of arrays and structs
948///
949class GetElementPtrInst : public Instruction {
950 Type *SourceElementType;
951 Type *ResultElementType;
952
953 GetElementPtrInst(const GetElementPtrInst &GEPI);
954
955 /// Constructors - Create a getelementptr instruction with a base pointer an
956 /// list of indices. The first ctor can optionally insert before an existing
957 /// instruction, the second appends the new instruction to the specified
958 /// BasicBlock.
959 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
960 ArrayRef<Value *> IdxList, unsigned Values,
961 const Twine &NameStr, Instruction *InsertBefore);
962 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
963 ArrayRef<Value *> IdxList, unsigned Values,
964 const Twine &NameStr, BasicBlock *InsertAtEnd);
965
966 void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr);
967
968protected:
969 // Note: Instruction needs to be a friend here to call cloneImpl.
970 friend class Instruction;
971
972 GetElementPtrInst *cloneImpl() const;
973
974public:
975 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
976 ArrayRef<Value *> IdxList,
977 const Twine &NameStr = "",
978 Instruction *InsertBefore = nullptr) {
979 unsigned Values = 1 + unsigned(IdxList.size());
980 assert(PointeeType && "Must specify element type");
981 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
982 NameStr, InsertBefore);
983 }
984
985 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
986 ArrayRef<Value *> IdxList,
987 const Twine &NameStr,
988 BasicBlock *InsertAtEnd) {
989 unsigned Values = 1 + unsigned(IdxList.size());
990 assert(PointeeType && "Must specify element type");
991 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
992 NameStr, InsertAtEnd);
993 }
994
995 /// Create an "inbounds" getelementptr. See the documentation for the
996 /// "inbounds" flag in LangRef.html for details.
997 static GetElementPtrInst *
998 CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList,
999 const Twine &NameStr = "",
1000 Instruction *InsertBefore = nullptr) {
1001 GetElementPtrInst *GEP =
1002 Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore);
1003 GEP->setIsInBounds(true);
1004 return GEP;
1005 }
1006
1007 static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr,
1008 ArrayRef<Value *> IdxList,
1009 const Twine &NameStr,
1010 BasicBlock *InsertAtEnd) {
1011 GetElementPtrInst *GEP =
1012 Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd);
1013 GEP->setIsInBounds(true);
1014 return GEP;
1015 }
1016
1017 /// Transparently provide more efficient getOperand methods.
1018 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
1019
1020 Type *getSourceElementType() const { return SourceElementType; }
1021
1022 void setSourceElementType(Type *Ty) { SourceElementType = Ty; }
1023 void setResultElementType(Type *Ty) { ResultElementType = Ty; }
1024
1025 Type *getResultElementType() const {
1026 return ResultElementType;
1027 }
1028
1029 /// Returns the address space of this instruction's pointer type.
1030 unsigned getAddressSpace() const {
1031 // Note that this is always the same as the pointer operand's address space
1032 // and that is cheaper to compute, so cheat here.
1033 return getPointerAddressSpace();
1034 }
1035
1036 /// Returns the result type of a getelementptr with the given source
1037 /// element type and indexes.
1038 ///
1039 /// Null is returned if the indices are invalid for the specified
1040 /// source element type.
1041 static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList);
1042 static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList);
1043 static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList);
1044
1045 /// Return the type of the element at the given index of an indexable
1046 /// type. This is equivalent to "getIndexedType(Agg, {Zero, Idx})".
1047 ///
1048 /// Returns null if the type can't be indexed, or the given index is not
1049 /// legal for the given type.
1050 static Type *getTypeAtIndex(Type *Ty, Value *Idx);
1051 static Type *getTypeAtIndex(Type *Ty, uint64_t Idx);
1052
1053 inline op_iterator idx_begin() { return op_begin()+1; }
1054 inline const_op_iterator idx_begin() const { return op_begin()+1; }
1055 inline op_iterator idx_end() { return op_end(); }
1056 inline const_op_iterator idx_end() const { return op_end(); }
1057
1058 inline iterator_range<op_iterator> indices() {
1059 return make_range(x: idx_begin(), y: idx_end());
1060 }
1061
1062 inline iterator_range<const_op_iterator> indices() const {
1063 return make_range(x: idx_begin(), y: idx_end());
1064 }
1065
1066 Value *getPointerOperand() {
1067 return getOperand(0);
1068 }
1069 const Value *getPointerOperand() const {
1070 return getOperand(0);
1071 }
1072 static unsigned getPointerOperandIndex() {
1073 return 0U; // get index for modifying correct operand.
1074 }
1075
1076 /// Method to return the pointer operand as a
1077 /// PointerType.
1078 Type *getPointerOperandType() const {
1079 return getPointerOperand()->getType();
1080 }
1081
1082 /// Returns the address space of the pointer operand.
1083 unsigned getPointerAddressSpace() const {
1084 return getPointerOperandType()->getPointerAddressSpace();
1085 }
1086
1087 /// Returns the pointer type returned by the GEP
1088 /// instruction, which may be a vector of pointers.
1089 static Type *getGEPReturnType(Value *Ptr, ArrayRef<Value *> IdxList) {
1090 // Vector GEP
1091 Type *Ty = Ptr->getType();
1092 if (Ty->isVectorTy())
1093 return Ty;
1094
1095 for (Value *Index : IdxList)
1096 if (auto *IndexVTy = dyn_cast<VectorType>(Val: Index->getType())) {
1097 ElementCount EltCount = IndexVTy->getElementCount();
1098 return VectorType::get(ElementType: Ty, EC: EltCount);
1099 }
1100 // Scalar GEP
1101 return Ty;
1102 }
1103
1104 unsigned getNumIndices() const { // Note: always non-negative
1105 return getNumOperands() - 1;
1106 }
1107
1108 bool hasIndices() const {
1109 return getNumOperands() > 1;
1110 }
1111
1112 /// Return true if all of the indices of this GEP are
1113 /// zeros. If so, the result pointer and the first operand have the same
1114 /// value, just potentially different types.
1115 bool hasAllZeroIndices() const;
1116
1117 /// Return true if all of the indices of this GEP are
1118 /// constant integers. If so, the result pointer and the first operand have
1119 /// a constant offset between them.
1120 bool hasAllConstantIndices() const;
1121
1122 /// Set or clear the inbounds flag on this GEP instruction.
1123 /// See LangRef.html for the meaning of inbounds on a getelementptr.
1124 void setIsInBounds(bool b = true);
1125
1126 /// Determine whether the GEP has the inbounds flag.
1127 bool isInBounds() const;
1128
1129 /// Accumulate the constant address offset of this GEP if possible.
1130 ///
1131 /// This routine accepts an APInt into which it will accumulate the constant
1132 /// offset of this GEP if the GEP is in fact constant. If the GEP is not
1133 /// all-constant, it returns false and the value of the offset APInt is
1134 /// undefined (it is *not* preserved!). The APInt passed into this routine
1135 /// must be at least as wide as the IntPtr type for the address space of
1136 /// the base GEP pointer.
1137 bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const;
1138 bool collectOffset(const DataLayout &DL, unsigned BitWidth,
1139 MapVector<Value *, APInt> &VariableOffsets,
1140 APInt &ConstantOffset) const;
1141 // Methods for support type inquiry through isa, cast, and dyn_cast:
1142 static bool classof(const Instruction *I) {
1143 return (I->getOpcode() == Instruction::GetElementPtr);
1144 }
1145 static bool classof(const Value *V) {
1146 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
1147 }
1148};
1149
1150template <>
1151struct OperandTraits<GetElementPtrInst> :
1152 public VariadicOperandTraits<GetElementPtrInst, 1> {
1153};
1154
1155GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1156 ArrayRef<Value *> IdxList, unsigned Values,
1157 const Twine &NameStr,
1158 Instruction *InsertBefore)
1159 : Instruction(getGEPReturnType(Ptr, IdxList), GetElementPtr,
1160 OperandTraits<GetElementPtrInst>::op_end(U: this) - Values,
1161 Values, InsertBefore),
1162 SourceElementType(PointeeType),
1163 ResultElementType(getIndexedType(Ty: PointeeType, IdxList)) {
1164 init(Ptr, IdxList, NameStr);
1165}
1166
1167GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1168 ArrayRef<Value *> IdxList, unsigned Values,
1169 const Twine &NameStr,
1170 BasicBlock *InsertAtEnd)
1171 : Instruction(getGEPReturnType(Ptr, IdxList), GetElementPtr,
1172 OperandTraits<GetElementPtrInst>::op_end(U: this) - Values,
1173 Values, InsertAtEnd),
1174 SourceElementType(PointeeType),
1175 ResultElementType(getIndexedType(Ty: PointeeType, IdxList)) {
1176 init(Ptr, IdxList, NameStr);
1177}
1178
1179DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)
1180
1181//===----------------------------------------------------------------------===//
1182// ICmpInst Class
1183//===----------------------------------------------------------------------===//
1184
1185/// This instruction compares its operands according to the predicate given
1186/// to the constructor. It only operates on integers or pointers. The operands
1187/// must be identical types.
1188/// Represent an integer comparison operator.
1189class ICmpInst: public CmpInst {
1190 void AssertOK() {
1191 assert(isIntPredicate() &&
1192 "Invalid ICmp predicate value");
1193 assert(getOperand(0)->getType() == getOperand(1)->getType() &&
1194 "Both operands to ICmp instruction are not of the same type!");
1195 // Check that the operands are the right type
1196 assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||
1197 getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
1198 "Invalid operand types for ICmp instruction");
1199 }
1200
1201protected:
1202 // Note: Instruction needs to be a friend here to call cloneImpl.
1203 friend class Instruction;
1204
1205 /// Clone an identical ICmpInst
1206 ICmpInst *cloneImpl() const;
1207
1208public:
1209 /// Constructor with insert-before-instruction semantics.
1210 ICmpInst(
1211 Instruction *InsertBefore, ///< Where to insert
1212 Predicate pred, ///< The predicate to use for the comparison
1213 Value *LHS, ///< The left-hand-side of the expression
1214 Value *RHS, ///< The right-hand-side of the expression
1215 const Twine &NameStr = "" ///< Name of the instruction
1216 ) : CmpInst(makeCmpResultType(opnd_type: LHS->getType()),
1217 Instruction::ICmp, pred, LHS, RHS, NameStr,
1218 InsertBefore) {
1219#ifndef NDEBUG
1220 AssertOK();
1221#endif
1222 }
1223
1224 /// Constructor with insert-at-end semantics.
1225 ICmpInst(
1226 BasicBlock &InsertAtEnd, ///< Block to insert into.
1227 Predicate pred, ///< The predicate to use for the comparison
1228 Value *LHS, ///< The left-hand-side of the expression
1229 Value *RHS, ///< The right-hand-side of the expression
1230 const Twine &NameStr = "" ///< Name of the instruction
1231 ) : CmpInst(makeCmpResultType(opnd_type: LHS->getType()),
1232 Instruction::ICmp, pred, LHS, RHS, NameStr,
1233 &InsertAtEnd) {
1234#ifndef NDEBUG
1235 AssertOK();
1236#endif
1237 }
1238
1239 /// Constructor with no-insertion semantics
1240 ICmpInst(
1241 Predicate pred, ///< The predicate to use for the comparison
1242 Value *LHS, ///< The left-hand-side of the expression
1243 Value *RHS, ///< The right-hand-side of the expression
1244 const Twine &NameStr = "" ///< Name of the instruction
1245 ) : CmpInst(makeCmpResultType(opnd_type: LHS->getType()),
1246 Instruction::ICmp, pred, LHS, RHS, NameStr) {
1247#ifndef NDEBUG
1248 AssertOK();
1249#endif
1250 }
1251
1252 /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
1253 /// @returns the predicate that would be the result if the operand were
1254 /// regarded as signed.
1255 /// Return the signed version of the predicate
1256 Predicate getSignedPredicate() const {
1257 return getSignedPredicate(pred: getPredicate());
1258 }
1259
1260 /// This is a static version that you can use without an instruction.
1261 /// Return the signed version of the predicate.
1262 static Predicate getSignedPredicate(Predicate pred);
1263
1264 /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
1265 /// @returns the predicate that would be the result if the operand were
1266 /// regarded as unsigned.
1267 /// Return the unsigned version of the predicate
1268 Predicate getUnsignedPredicate() const {
1269 return getUnsignedPredicate(pred: getPredicate());
1270 }
1271
1272 /// This is a static version that you can use without an instruction.
1273 /// Return the unsigned version of the predicate.
1274 static Predicate getUnsignedPredicate(Predicate pred);
1275
1276 /// Return true if this predicate is either EQ or NE. This also
1277 /// tests for commutativity.
1278 static bool isEquality(Predicate P) {
1279 return P == ICMP_EQ || P == ICMP_NE;
1280 }
1281
1282 /// Return true if this predicate is either EQ or NE. This also
1283 /// tests for commutativity.
1284 bool isEquality() const {
1285 return isEquality(P: getPredicate());
1286 }
1287
1288 /// @returns true if the predicate of this ICmpInst is commutative
1289 /// Determine if this relation is commutative.
1290 bool isCommutative() const { return isEquality(); }
1291
1292 /// Return true if the predicate is relational (not EQ or NE).
1293 ///
1294 bool isRelational() const {
1295 return !isEquality();
1296 }
1297
1298 /// Return true if the predicate is relational (not EQ or NE).
1299 ///
1300 static bool isRelational(Predicate P) {
1301 return !isEquality(P);
1302 }
1303
1304 /// Return true if the predicate is SGT or UGT.
1305 ///
1306 static bool isGT(Predicate P) {
1307 return P == ICMP_SGT || P == ICMP_UGT;
1308 }
1309
1310 /// Return true if the predicate is SLT or ULT.
1311 ///
1312 static bool isLT(Predicate P) {
1313 return P == ICMP_SLT || P == ICMP_ULT;
1314 }
1315
1316 /// Return true if the predicate is SGE or UGE.
1317 ///
1318 static bool isGE(Predicate P) {
1319 return P == ICMP_SGE || P == ICMP_UGE;
1320 }
1321
1322 /// Return true if the predicate is SLE or ULE.
1323 ///
1324 static bool isLE(Predicate P) {
1325 return P == ICMP_SLE || P == ICMP_ULE;
1326 }
1327
1328 /// Returns the sequence of all ICmp predicates.
1329 ///
1330 static auto predicates() { return ICmpPredicates(); }
1331
1332 /// Exchange the two operands to this instruction in such a way that it does
1333 /// not modify the semantics of the instruction. The predicate value may be
1334 /// changed to retain the same result if the predicate is order dependent
1335 /// (e.g. ult).
1336 /// Swap operands and adjust predicate.
1337 void swapOperands() {
1338 setPredicate(getSwappedPredicate());
1339 Op<0>().swap(RHS&: Op<1>());
1340 }
1341
1342 /// Return result of `LHS Pred RHS` comparison.
1343 static bool compare(const APInt &LHS, const APInt &RHS,
1344 ICmpInst::Predicate Pred);
1345
1346 // Methods for support type inquiry through isa, cast, and dyn_cast:
1347 static bool classof(const Instruction *I) {
1348 return I->getOpcode() == Instruction::ICmp;
1349 }
1350 static bool classof(const Value *V) {
1351 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
1352 }
1353};
1354
1355//===----------------------------------------------------------------------===//
1356// FCmpInst Class
1357//===----------------------------------------------------------------------===//
1358
1359/// This instruction compares its operands according to the predicate given
1360/// to the constructor. It only operates on floating point values or packed
1361/// vectors of floating point values. The operands must be identical types.
1362/// Represents a floating point comparison operator.
1363class FCmpInst: public CmpInst {
1364 void AssertOK() {
1365 assert(isFPPredicate() && "Invalid FCmp predicate value");
1366 assert(getOperand(0)->getType() == getOperand(1)->getType() &&
1367 "Both operands to FCmp instruction are not of the same type!");
1368 // Check that the operands are the right type
1369 assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&
1370 "Invalid operand types for FCmp instruction");
1371 }
1372
1373protected:
1374 // Note: Instruction needs to be a friend here to call cloneImpl.
1375 friend class Instruction;
1376
1377 /// Clone an identical FCmpInst
1378 FCmpInst *cloneImpl() const;
1379
1380public:
1381 /// Constructor with insert-before-instruction semantics.
1382 FCmpInst(
1383 Instruction *InsertBefore, ///< Where to insert
1384 Predicate pred, ///< The predicate to use for the comparison
1385 Value *LHS, ///< The left-hand-side of the expression
1386 Value *RHS, ///< The right-hand-side of the expression
1387 const Twine &NameStr = "" ///< Name of the instruction
1388 ) : CmpInst(makeCmpResultType(opnd_type: LHS->getType()),
1389 Instruction::FCmp, pred, LHS, RHS, NameStr,
1390 InsertBefore) {
1391 AssertOK();
1392 }
1393
1394 /// Constructor with insert-at-end semantics.
1395 FCmpInst(
1396 BasicBlock &InsertAtEnd, ///< Block to insert into.
1397 Predicate pred, ///< The predicate to use for the comparison
1398 Value *LHS, ///< The left-hand-side of the expression
1399 Value *RHS, ///< The right-hand-side of the expression
1400 const Twine &NameStr = "" ///< Name of the instruction
1401 ) : CmpInst(makeCmpResultType(opnd_type: LHS->getType()),
1402 Instruction::FCmp, pred, LHS, RHS, NameStr,
1403 &InsertAtEnd) {
1404 AssertOK();
1405 }
1406
1407 /// Constructor with no-insertion semantics
1408 FCmpInst(
1409 Predicate Pred, ///< The predicate to use for the comparison
1410 Value *LHS, ///< The left-hand-side of the expression
1411 Value *RHS, ///< The right-hand-side of the expression
1412 const Twine &NameStr = "", ///< Name of the instruction
1413 Instruction *FlagsSource = nullptr
1414 ) : CmpInst(makeCmpResultType(opnd_type: LHS->getType()), Instruction::FCmp, Pred, LHS,
1415 RHS, NameStr, nullptr, FlagsSource) {
1416 AssertOK();
1417 }
1418
1419 /// @returns true if the predicate of this instruction is EQ or NE.
1420 /// Determine if this is an equality predicate.
1421 static bool isEquality(Predicate Pred) {
1422 return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ ||
1423 Pred == FCMP_UNE;
1424 }
1425
1426 /// @returns true if the predicate of this instruction is EQ or NE.
1427 /// Determine if this is an equality predicate.
1428 bool isEquality() const { return isEquality(Pred: getPredicate()); }
1429
1430 /// @returns true if the predicate of this instruction is commutative.
1431 /// Determine if this is a commutative predicate.
1432 bool isCommutative() const {
1433 return isEquality() ||
1434 getPredicate() == FCMP_FALSE ||
1435 getPredicate() == FCMP_TRUE ||
1436 getPredicate() == FCMP_ORD ||
1437 getPredicate() == FCMP_UNO;
1438 }
1439
1440 /// @returns true if the predicate is relational (not EQ or NE).
1441 /// Determine if this a relational predicate.
1442 bool isRelational() const { return !isEquality(); }
1443
1444 /// Exchange the two operands to this instruction in such a way that it does
1445 /// not modify the semantics of the instruction. The predicate value may be
1446 /// changed to retain the same result if the predicate is order dependent
1447 /// (e.g. ult).
1448 /// Swap operands and adjust predicate.
1449 void swapOperands() {
1450 setPredicate(getSwappedPredicate());
1451 Op<0>().swap(RHS&: Op<1>());
1452 }
1453
1454 /// Returns the sequence of all FCmp predicates.
1455 ///
1456 static auto predicates() { return FCmpPredicates(); }
1457
1458 /// Return result of `LHS Pred RHS` comparison.
1459 static bool compare(const APFloat &LHS, const APFloat &RHS,
1460 FCmpInst::Predicate Pred);
1461
1462 /// Methods for support type inquiry through isa, cast, and dyn_cast:
1463 static bool classof(const Instruction *I) {
1464 return I->getOpcode() == Instruction::FCmp;
1465 }
1466 static bool classof(const Value *V) {
1467 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
1468 }
1469};
1470
1471//===----------------------------------------------------------------------===//
1472/// This class represents a function call, abstracting a target
1473/// machine's calling convention. This class uses low bit of the SubClassData
1474/// field to indicate whether or not this is a tail call. The rest of the bits
1475/// hold the calling convention of the call.
1476///
1477class CallInst : public CallBase {
1478 CallInst(const CallInst &CI);
1479
1480 /// Construct a CallInst given a range of arguments.
1481 /// Construct a CallInst from a range of arguments
1482 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1483 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1484 Instruction *InsertBefore);
1485
1486 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1487 const Twine &NameStr, Instruction *InsertBefore)
1488 : CallInst(Ty, Func, Args, std::nullopt, NameStr, InsertBefore) {}
1489
1490 /// Construct a CallInst given a range of arguments.
1491 /// Construct a CallInst from a range of arguments
1492 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1493 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1494 BasicBlock *InsertAtEnd);
1495
1496 explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr,
1497 Instruction *InsertBefore);
1498
1499 CallInst(FunctionType *ty, Value *F, const Twine &NameStr,
1500 BasicBlock *InsertAtEnd);
1501
1502 void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
1503 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
1504 void init(FunctionType *FTy, Value *Func, const Twine &NameStr);
1505
1506 /// Compute the number of operands to allocate.
1507 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
1508 // We need one operand for the called function, plus the input operand
1509 // counts provided.
1510 return 1 + NumArgs + NumBundleInputs;
1511 }
1512
1513protected:
1514 // Note: Instruction needs to be a friend here to call cloneImpl.
1515 friend class Instruction;
1516
1517 CallInst *cloneImpl() const;
1518
1519public:
1520 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "",
1521 Instruction *InsertBefore = nullptr) {
1522 return new (ComputeNumOperands(NumArgs: 0)) CallInst(Ty, F, NameStr, InsertBefore);
1523 }
1524
1525 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1526 const Twine &NameStr,
1527 Instruction *InsertBefore = nullptr) {
1528 return new (ComputeNumOperands(NumArgs: Args.size()))
1529 CallInst(Ty, Func, Args, std::nullopt, NameStr, InsertBefore);
1530 }
1531
1532 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1533 ArrayRef<OperandBundleDef> Bundles = std::nullopt,
1534 const Twine &NameStr = "",
1535 Instruction *InsertBefore = nullptr) {
1536 const int NumOperands =
1537 ComputeNumOperands(NumArgs: Args.size(), NumBundleInputs: CountBundleInputs(Bundles));
1538 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1539
1540 return new (NumOperands, DescriptorBytes)
1541 CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore);
1542 }
1543
1544 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr,
1545 BasicBlock *InsertAtEnd) {
1546 return new (ComputeNumOperands(NumArgs: 0)) CallInst(Ty, F, NameStr, InsertAtEnd);
1547 }
1548
1549 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1550 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1551 return new (ComputeNumOperands(NumArgs: Args.size()))
1552 CallInst(Ty, Func, Args, std::nullopt, NameStr, InsertAtEnd);
1553 }
1554
1555 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1556 ArrayRef<OperandBundleDef> Bundles,
1557 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1558 const int NumOperands =
1559 ComputeNumOperands(NumArgs: Args.size(), NumBundleInputs: CountBundleInputs(Bundles));
1560 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1561
1562 return new (NumOperands, DescriptorBytes)
1563 CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd);
1564 }
1565
1566 static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "",
1567 Instruction *InsertBefore = nullptr) {
1568 return Create(Ty: Func.getFunctionType(), F: Func.getCallee(), NameStr,
1569 InsertBefore);
1570 }
1571
1572 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1573 ArrayRef<OperandBundleDef> Bundles = std::nullopt,
1574 const Twine &NameStr = "",
1575 Instruction *InsertBefore = nullptr) {
1576 return Create(Ty: Func.getFunctionType(), Func: Func.getCallee(), Args, Bundles,
1577 NameStr, InsertBefore);
1578 }
1579
1580 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1581 const Twine &NameStr,
1582 Instruction *InsertBefore = nullptr) {
1583 return Create(Ty: Func.getFunctionType(), Func: Func.getCallee(), Args, NameStr,
1584 InsertBefore);
1585 }
1586
1587 static CallInst *Create(FunctionCallee Func, const Twine &NameStr,
1588 BasicBlock *InsertAtEnd) {
1589 return Create(Ty: Func.getFunctionType(), F: Func.getCallee(), NameStr,
1590 InsertAtEnd);
1591 }
1592
1593 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1594 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1595 return Create(Ty: Func.getFunctionType(), Func: Func.getCallee(), Args, NameStr,
1596 InsertAtEnd);
1597 }
1598
1599 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1600 ArrayRef<OperandBundleDef> Bundles,
1601 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1602 return Create(Ty: Func.getFunctionType(), Func: Func.getCallee(), Args, Bundles,
1603 NameStr, InsertAtEnd);
1604 }
1605
1606 /// Create a clone of \p CI with a different set of operand bundles and
1607 /// insert it before \p InsertPt.
1608 ///
1609 /// The returned call instruction is identical \p CI in every way except that
1610 /// the operand bundles for the new instruction are set to the operand bundles
1611 /// in \p Bundles.
1612 static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles,
1613 Instruction *InsertPt = nullptr);
1614
1615 // Note that 'musttail' implies 'tail'.
1616 enum TailCallKind : unsigned {
1617 TCK_None = 0,
1618 TCK_Tail = 1,
1619 TCK_MustTail = 2,
1620 TCK_NoTail = 3,
1621 TCK_LAST = TCK_NoTail
1622 };
1623
1624 using TailCallKindField = Bitfield::Element<TailCallKind, 0, 2, TCK_LAST>;
1625 static_assert(
1626 Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(),
1627 "Bitfields must be contiguous");
1628
1629 TailCallKind getTailCallKind() const {
1630 return getSubclassData<TailCallKindField>();
1631 }
1632
1633 bool isTailCall() const {
1634 TailCallKind Kind = getTailCallKind();
1635 return Kind == TCK_Tail || Kind == TCK_MustTail;
1636 }
1637
1638 bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; }
1639
1640 bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; }
1641
1642 void setTailCallKind(TailCallKind TCK) {
1643 setSubclassData<TailCallKindField>(TCK);
1644 }
1645
1646 void setTailCall(bool IsTc = true) {
1647 setTailCallKind(IsTc ? TCK_Tail : TCK_None);
1648 }
1649
1650 /// Return true if the call can return twice
1651 bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); }
1652 void setCanReturnTwice() { addFnAttr(Attribute::ReturnsTwice); }
1653
1654 // Methods for support type inquiry through isa, cast, and dyn_cast:
1655 static bool classof(const Instruction *I) {
1656 return I->getOpcode() == Instruction::Call;
1657 }
1658 static bool classof(const Value *V) {
1659 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
1660 }
1661
1662 /// Updates profile metadata by scaling it by \p S / \p T.
1663 void updateProfWeight(uint64_t S, uint64_t T);
1664
1665private:
1666 // Shadow Instruction::setInstructionSubclassData with a private forwarding
1667 // method so that subclasses cannot accidentally use it.
1668 template <typename Bitfield>
1669 void setSubclassData(typename Bitfield::Type Value) {
1670 Instruction::setSubclassData<Bitfield>(Value);
1671 }
1672};
1673
1674CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1675 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1676 BasicBlock *InsertAtEnd)
1677 : CallBase(Ty->getReturnType(), Instruction::Call,
1678 OperandTraits<CallBase>::op_end(U: this) -
1679 (Args.size() + CountBundleInputs(Bundles) + 1),
1680 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1681 InsertAtEnd) {
1682 init(FTy: Ty, Func, Args, Bundles, NameStr);
1683}
1684
1685CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1686 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1687 Instruction *InsertBefore)
1688 : CallBase(Ty->getReturnType(), Instruction::Call,
1689 OperandTraits<CallBase>::op_end(U: this) -
1690 (Args.size() + CountBundleInputs(Bundles) + 1),
1691 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1692 InsertBefore) {
1693 init(FTy: Ty, Func, Args, Bundles, NameStr);
1694}
1695
1696//===----------------------------------------------------------------------===//
1697// SelectInst Class
1698//===----------------------------------------------------------------------===//
1699
1700/// This class represents the LLVM 'select' instruction.
1701///
1702class SelectInst : public Instruction {
1703 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1704 Instruction *InsertBefore)
1705 : Instruction(S1->getType(), Instruction::Select,
1706 &Op<0>(), 3, InsertBefore) {
1707 init(C, S1, S2);
1708 setName(NameStr);
1709 }
1710
1711 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1712 BasicBlock *InsertAtEnd)
1713 : Instruction(S1->getType(), Instruction::Select,
1714 &Op<0>(), 3, InsertAtEnd) {
1715 init(C, S1, S2);
1716 setName(NameStr);
1717 }
1718
1719 void init(Value *C, Value *S1, Value *S2) {
1720 assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select");
1721 Op<0>() = C;
1722 Op<1>() = S1;
1723 Op<2>() = S2;
1724 }
1725
1726protected:
1727 // Note: Instruction needs to be a friend here to call cloneImpl.
1728 friend class Instruction;
1729
1730 SelectInst *cloneImpl() const;
1731
1732public:
1733 static SelectInst *Create(Value *C, Value *S1, Value *S2,
1734 const Twine &NameStr = "",
1735 Instruction *InsertBefore = nullptr,
1736 Instruction *MDFrom = nullptr) {
1737 SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore);
1738 if (MDFrom)
1739 Sel->copyMetadata(SrcInst: *MDFrom);
1740 return Sel;
1741 }
1742
1743 static SelectInst *Create(Value *C, Value *S1, Value *S2,
1744 const Twine &NameStr,
1745 BasicBlock *InsertAtEnd) {
1746 return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd);
1747 }
1748
1749 const Value *getCondition() const { return Op<0>(); }
1750 const Value *getTrueValue() const { return Op<1>(); }
1751 const Value *getFalseValue() const { return Op<2>(); }
1752 Value *getCondition() { return Op<0>(); }
1753 Value *getTrueValue() { return Op<1>(); }
1754 Value *getFalseValue() { return Op<2>(); }
1755
1756 void setCondition(Value *V) { Op<0>() = V; }
1757 void setTrueValue(Value *V) { Op<1>() = V; }
1758 void setFalseValue(Value *V) { Op<2>() = V; }
1759
1760 /// Swap the true and false values of the select instruction.
1761 /// This doesn't swap prof metadata.
1762 void swapValues() { Op<1>().swap(RHS&: Op<2>()); }
1763
1764 /// Return a string if the specified operands are invalid
1765 /// for a select operation, otherwise return null.
1766 static const char *areInvalidOperands(Value *Cond, Value *True, Value *False);
1767
1768 /// Transparently provide more efficient getOperand methods.
1769 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
1770
1771 OtherOps getOpcode() const {
1772 return static_cast<OtherOps>(Instruction::getOpcode());
1773 }
1774
1775 // Methods for support type inquiry through isa, cast, and dyn_cast:
1776 static bool classof(const Instruction *I) {
1777 return I->getOpcode() == Instruction::Select;
1778 }
1779 static bool classof(const Value *V) {
1780 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
1781 }
1782};
1783
1784template <>
1785struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> {
1786};
1787
1788DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value)
1789
1790//===----------------------------------------------------------------------===//
1791// VAArgInst Class
1792//===----------------------------------------------------------------------===//
1793
1794/// This class represents the va_arg llvm instruction, which returns
1795/// an argument of the specified type given a va_list and increments that list
1796///
1797class VAArgInst : public UnaryInstruction {
1798protected:
1799 // Note: Instruction needs to be a friend here to call cloneImpl.
1800 friend class Instruction;
1801
1802 VAArgInst *cloneImpl() const;
1803
1804public:
1805 VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "",
1806 Instruction *InsertBefore = nullptr)
1807 : UnaryInstruction(Ty, VAArg, List, InsertBefore) {
1808 setName(NameStr);
1809 }
1810
1811 VAArgInst(Value *List, Type *Ty, const Twine &NameStr,
1812 BasicBlock *InsertAtEnd)
1813 : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) {
1814 setName(NameStr);
1815 }
1816
1817 Value *getPointerOperand() { return getOperand(i_nocapture: 0); }
1818 const Value *getPointerOperand() const { return getOperand(i_nocapture: 0); }
1819 static unsigned getPointerOperandIndex() { return 0U; }
1820
1821 // Methods for support type inquiry through isa, cast, and dyn_cast:
1822 static bool classof(const Instruction *I) {
1823 return I->getOpcode() == VAArg;
1824 }
1825 static bool classof(const Value *V) {
1826 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
1827 }
1828};
1829
1830//===----------------------------------------------------------------------===//
1831// ExtractElementInst Class
1832//===----------------------------------------------------------------------===//
1833
1834/// This instruction extracts a single (scalar)
1835/// element from a VectorType value
1836///
1837class ExtractElementInst : public Instruction {
1838 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "",
1839 Instruction *InsertBefore = nullptr);
1840 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr,
1841 BasicBlock *InsertAtEnd);
1842
1843protected:
1844 // Note: Instruction needs to be a friend here to call cloneImpl.
1845 friend class Instruction;
1846
1847 ExtractElementInst *cloneImpl() const;
1848
1849public:
1850 static ExtractElementInst *Create(Value *Vec, Value *Idx,
1851 const Twine &NameStr = "",
1852 Instruction *InsertBefore = nullptr) {
1853 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore);
1854 }
1855
1856 static ExtractElementInst *Create(Value *Vec, Value *Idx,
1857 const Twine &NameStr,
1858 BasicBlock *InsertAtEnd) {
1859 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd);
1860 }
1861
1862 /// Return true if an extractelement instruction can be
1863 /// formed with the specified operands.
1864 static bool isValidOperands(const Value *Vec, const Value *Idx);
1865
1866 Value *getVectorOperand() { return Op<0>(); }
1867 Value *getIndexOperand() { return Op<1>(); }
1868 const Value *getVectorOperand() const { return Op<0>(); }
1869 const Value *getIndexOperand() const { return Op<1>(); }
1870
1871 VectorType *getVectorOperandType() const {
1872 return cast<VectorType>(Val: getVectorOperand()->getType());
1873 }
1874
1875 /// Transparently provide more efficient getOperand methods.
1876 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
1877
1878 // Methods for support type inquiry through isa, cast, and dyn_cast:
1879 static bool classof(const Instruction *I) {
1880 return I->getOpcode() == Instruction::ExtractElement;
1881 }
1882 static bool classof(const Value *V) {
1883 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
1884 }
1885};
1886
1887template <>
1888struct OperandTraits<ExtractElementInst> :
1889 public FixedNumOperandTraits<ExtractElementInst, 2> {
1890};
1891
1892DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value)
1893
1894//===----------------------------------------------------------------------===//
1895// InsertElementInst Class
1896//===----------------------------------------------------------------------===//
1897
1898/// This instruction inserts a single (scalar)
1899/// element into a VectorType value
1900///
1901class InsertElementInst : public Instruction {
1902 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx,
1903 const Twine &NameStr = "",
1904 Instruction *InsertBefore = nullptr);
1905 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr,
1906 BasicBlock *InsertAtEnd);
1907
1908protected:
1909 // Note: Instruction needs to be a friend here to call cloneImpl.
1910 friend class Instruction;
1911
1912 InsertElementInst *cloneImpl() const;
1913
1914public:
1915 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1916 const Twine &NameStr = "",
1917 Instruction *InsertBefore = nullptr) {
1918 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore);
1919 }
1920
1921 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1922 const Twine &NameStr,
1923 BasicBlock *InsertAtEnd) {
1924 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd);
1925 }
1926
1927 /// Return true if an insertelement instruction can be
1928 /// formed with the specified operands.
1929 static bool isValidOperands(const Value *Vec, const Value *NewElt,
1930 const Value *Idx);
1931
1932 /// Overload to return most specific vector type.
1933 ///
1934 VectorType *getType() const {
1935 return cast<VectorType>(Val: Instruction::getType());
1936 }
1937
1938 /// Transparently provide more efficient getOperand methods.
1939 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
1940
1941 // Methods for support type inquiry through isa, cast, and dyn_cast:
1942 static bool classof(const Instruction *I) {
1943 return I->getOpcode() == Instruction::InsertElement;
1944 }
1945 static bool classof(const Value *V) {
1946 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
1947 }
1948};
1949
1950template <>
1951struct OperandTraits<InsertElementInst> :
1952 public FixedNumOperandTraits<InsertElementInst, 3> {
1953};
1954
1955DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value)
1956
1957//===----------------------------------------------------------------------===//
1958// ShuffleVectorInst Class
1959//===----------------------------------------------------------------------===//
1960
1961constexpr int PoisonMaskElem = -1;
1962
1963/// This instruction constructs a fixed permutation of two
1964/// input vectors.
1965///
1966/// For each element of the result vector, the shuffle mask selects an element
1967/// from one of the input vectors to copy to the result. Non-negative elements
1968/// in the mask represent an index into the concatenated pair of input vectors.
1969/// PoisonMaskElem (-1) specifies that the result element is poison.
1970///
1971/// For scalable vectors, all the elements of the mask must be 0 or -1. This
1972/// requirement may be relaxed in the future.
1973class ShuffleVectorInst : public Instruction {
1974 SmallVector<int, 4> ShuffleMask;
1975 Constant *ShuffleMaskForBitcode;
1976
1977protected:
1978 // Note: Instruction needs to be a friend here to call cloneImpl.
1979 friend class Instruction;
1980
1981 ShuffleVectorInst *cloneImpl() const;
1982
1983public:
1984 ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr = "",
1985 Instruction *InsertBefore = nullptr);
1986 ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr,
1987 BasicBlock *InsertAtEnd);
1988 ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr = "",
1989 Instruction *InsertBefore = nullptr);
1990 ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr,
1991 BasicBlock *InsertAtEnd);
1992 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
1993 const Twine &NameStr = "",
1994 Instruction *InsertBefor = nullptr);
1995 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
1996 const Twine &NameStr, BasicBlock *InsertAtEnd);
1997 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
1998 const Twine &NameStr = "",
1999 Instruction *InsertBefor = nullptr);
2000 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
2001 const Twine &NameStr, BasicBlock *InsertAtEnd);
2002
2003 void *operator new(size_t S) { return User::operator new(Size: S, Us: 2); }
2004 void operator delete(void *Ptr) { return User::operator delete(Usr: Ptr); }
2005
2006 /// Swap the operands and adjust the mask to preserve the semantics
2007 /// of the instruction.
2008 void commute();
2009
2010 /// Return true if a shufflevector instruction can be
2011 /// formed with the specified operands.
2012 static bool isValidOperands(const Value *V1, const Value *V2,
2013 const Value *Mask);
2014 static bool isValidOperands(const Value *V1, const Value *V2,
2015 ArrayRef<int> Mask);
2016
2017 /// Overload to return most specific vector type.
2018 ///
2019 VectorType *getType() const {
2020 return cast<VectorType>(Val: Instruction::getType());
2021 }
2022
2023 /// Transparently provide more efficient getOperand methods.
2024 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
2025
2026 /// Return the shuffle mask value of this instruction for the given element
2027 /// index. Return PoisonMaskElem if the element is undef.
2028 int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; }
2029
2030 /// Convert the input shuffle mask operand to a vector of integers. Undefined
2031 /// elements of the mask are returned as PoisonMaskElem.
2032 static void getShuffleMask(const Constant *Mask,
2033 SmallVectorImpl<int> &Result);
2034
2035 /// Return the mask for this instruction as a vector of integers. Undefined
2036 /// elements of the mask are returned as PoisonMaskElem.
2037 void getShuffleMask(SmallVectorImpl<int> &Result) const {
2038 Result.assign(in_start: ShuffleMask.begin(), in_end: ShuffleMask.end());
2039 }
2040
2041 /// Return the mask for this instruction, for use in bitcode.
2042 ///
2043 /// TODO: This is temporary until we decide a new bitcode encoding for
2044 /// shufflevector.
2045 Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; }
2046
2047 static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask,
2048 Type *ResultTy);
2049
2050 void setShuffleMask(ArrayRef<int> Mask);
2051
2052 ArrayRef<int> getShuffleMask() const { return ShuffleMask; }
2053
2054 /// Return true if this shuffle returns a vector with a different number of
2055 /// elements than its source vectors.
2056 /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3>
2057 /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5>
2058 bool changesLength() const {
2059 unsigned NumSourceElts = cast<VectorType>(Val: Op<0>()->getType())
2060 ->getElementCount()
2061 .getKnownMinValue();
2062 unsigned NumMaskElts = ShuffleMask.size();
2063 return NumSourceElts != NumMaskElts;
2064 }
2065
2066 /// Return true if this shuffle returns a vector with a greater number of
2067 /// elements than its source vectors.
2068 /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3>
2069 bool increasesLength() const {
2070 unsigned NumSourceElts = cast<VectorType>(Val: Op<0>()->getType())
2071 ->getElementCount()
2072 .getKnownMinValue();
2073 unsigned NumMaskElts = ShuffleMask.size();
2074 return NumSourceElts < NumMaskElts;
2075 }
2076
2077 /// Return true if this shuffle mask chooses elements from exactly one source
2078 /// vector.
2079 /// Example: <7,5,undef,7>
2080 /// This assumes that vector operands (of length \p NumSrcElts) are the same
2081 /// length as the mask.
2082 static bool isSingleSourceMask(ArrayRef<int> Mask, int NumSrcElts);
2083 static bool isSingleSourceMask(const Constant *Mask, int NumSrcElts) {
2084 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2085 SmallVector<int, 16> MaskAsInts;
2086 getShuffleMask(Mask, Result&: MaskAsInts);
2087 return isSingleSourceMask(Mask: MaskAsInts, NumSrcElts);
2088 }
2089
2090 /// Return true if this shuffle chooses elements from exactly one source
2091 /// vector without changing the length of that vector.
2092 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3>
2093 /// TODO: Optionally allow length-changing shuffles.
2094 bool isSingleSource() const {
2095 return !changesLength() &&
2096 isSingleSourceMask(Mask: ShuffleMask, NumSrcElts: ShuffleMask.size());
2097 }
2098
2099 /// Return true if this shuffle mask chooses elements from exactly one source
2100 /// vector without lane crossings. A shuffle using this mask is not
2101 /// necessarily a no-op because it may change the number of elements from its
2102 /// input vectors or it may provide demanded bits knowledge via undef lanes.
2103 /// Example: <undef,undef,2,3>
2104 static bool isIdentityMask(ArrayRef<int> Mask, int NumSrcElts);
2105 static bool isIdentityMask(const Constant *Mask, int NumSrcElts) {
2106 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2107
2108 // Not possible to express a shuffle mask for a scalable vector for this
2109 // case.
2110 if (isa<ScalableVectorType>(Val: Mask->getType()))
2111 return false;
2112
2113 SmallVector<int, 16> MaskAsInts;
2114 getShuffleMask(Mask, Result&: MaskAsInts);
2115 return isIdentityMask(Mask: MaskAsInts, NumSrcElts);
2116 }
2117
2118 /// Return true if this shuffle chooses elements from exactly one source
2119 /// vector without lane crossings and does not change the number of elements
2120 /// from its input vectors.
2121 /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef>
2122 bool isIdentity() const {
2123 // Not possible to express a shuffle mask for a scalable vector for this
2124 // case.
2125 if (isa<ScalableVectorType>(Val: getType()))
2126 return false;
2127
2128 return !changesLength() && isIdentityMask(Mask: ShuffleMask, NumSrcElts: ShuffleMask.size());
2129 }
2130
2131 /// Return true if this shuffle lengthens exactly one source vector with
2132 /// undefs in the high elements.
2133 bool isIdentityWithPadding() const;
2134
2135 /// Return true if this shuffle extracts the first N elements of exactly one
2136 /// source vector.
2137 bool isIdentityWithExtract() const;
2138
2139 /// Return true if this shuffle concatenates its 2 source vectors. This
2140 /// returns false if either input is undefined. In that case, the shuffle is
2141 /// is better classified as an identity with padding operation.
2142 bool isConcat() const;
2143
2144 /// Return true if this shuffle mask chooses elements from its source vectors
2145 /// without lane crossings. A shuffle using this mask would be
2146 /// equivalent to a vector select with a constant condition operand.
2147 /// Example: <4,1,6,undef>
2148 /// This returns false if the mask does not choose from both input vectors.
2149 /// In that case, the shuffle is better classified as an identity shuffle.
2150 /// This assumes that vector operands are the same length as the mask
2151 /// (a length-changing shuffle can never be equivalent to a vector select).
2152 static bool isSelectMask(ArrayRef<int> Mask, int NumSrcElts);
2153 static bool isSelectMask(const Constant *Mask, int NumSrcElts) {
2154 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2155 SmallVector<int, 16> MaskAsInts;
2156 getShuffleMask(Mask, Result&: MaskAsInts);
2157 return isSelectMask(Mask: MaskAsInts, NumSrcElts);
2158 }
2159
2160 /// Return true if this shuffle chooses elements from its source vectors
2161 /// without lane crossings and all operands have the same number of elements.
2162 /// In other words, this shuffle is equivalent to a vector select with a
2163 /// constant condition operand.
2164 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3>
2165 /// This returns false if the mask does not choose from both input vectors.
2166 /// In that case, the shuffle is better classified as an identity shuffle.
2167 /// TODO: Optionally allow length-changing shuffles.
2168 bool isSelect() const {
2169 return !changesLength() && isSelectMask(Mask: ShuffleMask, NumSrcElts: ShuffleMask.size());
2170 }
2171
2172 /// Return true if this shuffle mask swaps the order of elements from exactly
2173 /// one source vector.
2174 /// Example: <7,6,undef,4>
2175 /// This assumes that vector operands (of length \p NumSrcElts) are the same
2176 /// length as the mask.
2177 static bool isReverseMask(ArrayRef<int> Mask, int NumSrcElts);
2178 static bool isReverseMask(const Constant *Mask, int NumSrcElts) {
2179 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2180 SmallVector<int, 16> MaskAsInts;
2181 getShuffleMask(Mask, Result&: MaskAsInts);
2182 return isReverseMask(Mask: MaskAsInts, NumSrcElts);
2183 }
2184
2185 /// Return true if this shuffle swaps the order of elements from exactly
2186 /// one source vector.
2187 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef>
2188 /// TODO: Optionally allow length-changing shuffles.
2189 bool isReverse() const {
2190 return !changesLength() && isReverseMask(Mask: ShuffleMask, NumSrcElts: ShuffleMask.size());
2191 }
2192
2193 /// Return true if this shuffle mask chooses all elements with the same value
2194 /// as the first element of exactly one source vector.
2195 /// Example: <4,undef,undef,4>
2196 /// This assumes that vector operands (of length \p NumSrcElts) are the same
2197 /// length as the mask.
2198 static bool isZeroEltSplatMask(ArrayRef<int> Mask, int NumSrcElts);
2199 static bool isZeroEltSplatMask(const Constant *Mask, int NumSrcElts) {
2200 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2201 SmallVector<int, 16> MaskAsInts;
2202 getShuffleMask(Mask, Result&: MaskAsInts);
2203 return isZeroEltSplatMask(Mask: MaskAsInts, NumSrcElts);
2204 }
2205
2206 /// Return true if all elements of this shuffle are the same value as the
2207 /// first element of exactly one source vector without changing the length
2208 /// of that vector.
2209 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0>
2210 /// TODO: Optionally allow length-changing shuffles.
2211 /// TODO: Optionally allow splats from other elements.
2212 bool isZeroEltSplat() const {
2213 return !changesLength() &&
2214 isZeroEltSplatMask(Mask: ShuffleMask, NumSrcElts: ShuffleMask.size());
2215 }
2216
2217 /// Return true if this shuffle mask is a transpose mask.
2218 /// Transpose vector masks transpose a 2xn matrix. They read corresponding
2219 /// even- or odd-numbered vector elements from two n-dimensional source
2220 /// vectors and write each result into consecutive elements of an
2221 /// n-dimensional destination vector. Two shuffles are necessary to complete
2222 /// the transpose, one for the even elements and another for the odd elements.
2223 /// This description closely follows how the TRN1 and TRN2 AArch64
2224 /// instructions operate.
2225 ///
2226 /// For example, a simple 2x2 matrix can be transposed with:
2227 ///
2228 /// ; Original matrix
2229 /// m0 = < a, b >
2230 /// m1 = < c, d >
2231 ///
2232 /// ; Transposed matrix
2233 /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 >
2234 /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 >
2235 ///
2236 /// For matrices having greater than n columns, the resulting nx2 transposed
2237 /// matrix is stored in two result vectors such that one vector contains
2238 /// interleaved elements from all the even-numbered rows and the other vector
2239 /// contains interleaved elements from all the odd-numbered rows. For example,
2240 /// a 2x4 matrix can be transposed with:
2241 ///
2242 /// ; Original matrix
2243 /// m0 = < a, b, c, d >
2244 /// m1 = < e, f, g, h >
2245 ///
2246 /// ; Transposed matrix
2247 /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 >
2248 /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 >
2249 static bool isTransposeMask(ArrayRef<int> Mask, int NumSrcElts);
2250 static bool isTransposeMask(const Constant *Mask, int NumSrcElts) {
2251 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2252 SmallVector<int, 16> MaskAsInts;
2253 getShuffleMask(Mask, Result&: MaskAsInts);
2254 return isTransposeMask(Mask: MaskAsInts, NumSrcElts);
2255 }
2256
2257 /// Return true if this shuffle transposes the elements of its inputs without
2258 /// changing the length of the vectors. This operation may also be known as a
2259 /// merge or interleave. See the description for isTransposeMask() for the
2260 /// exact specification.
2261 /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6>
2262 bool isTranspose() const {
2263 return !changesLength() && isTransposeMask(Mask: ShuffleMask, NumSrcElts: ShuffleMask.size());
2264 }
2265
2266 /// Return true if this shuffle mask is a splice mask, concatenating the two
2267 /// inputs together and then extracts an original width vector starting from
2268 /// the splice index.
2269 /// Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2270 /// This assumes that vector operands (of length \p NumSrcElts) are the same
2271 /// length as the mask.
2272 static bool isSpliceMask(ArrayRef<int> Mask, int NumSrcElts, int &Index);
2273 static bool isSpliceMask(const Constant *Mask, int NumSrcElts, int &Index) {
2274 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2275 SmallVector<int, 16> MaskAsInts;
2276 getShuffleMask(Mask, Result&: MaskAsInts);
2277 return isSpliceMask(Mask: MaskAsInts, NumSrcElts, Index);
2278 }
2279
2280 /// Return true if this shuffle splices two inputs without changing the length
2281 /// of the vectors. This operation concatenates the two inputs together and
2282 /// then extracts an original width vector starting from the splice index.
2283 /// Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2284 bool isSplice(int &Index) const {
2285 return !changesLength() &&
2286 isSpliceMask(Mask: ShuffleMask, NumSrcElts: ShuffleMask.size(), Index);
2287 }
2288
2289 /// Return true if this shuffle mask is an extract subvector mask.
2290 /// A valid extract subvector mask returns a smaller vector from a single
2291 /// source operand. The base extraction index is returned as well.
2292 static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2293 int &Index);
2294 static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts,
2295 int &Index) {
2296 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2297 // Not possible to express a shuffle mask for a scalable vector for this
2298 // case.
2299 if (isa<ScalableVectorType>(Val: Mask->getType()))
2300 return false;
2301 SmallVector<int, 16> MaskAsInts;
2302 getShuffleMask(Mask, Result&: MaskAsInts);
2303 return isExtractSubvectorMask(Mask: MaskAsInts, NumSrcElts, Index);
2304 }
2305
2306 /// Return true if this shuffle mask is an extract subvector mask.
2307 bool isExtractSubvectorMask(int &Index) const {
2308 // Not possible to express a shuffle mask for a scalable vector for this
2309 // case.
2310 if (isa<ScalableVectorType>(Val: getType()))
2311 return false;
2312
2313 int NumSrcElts =
2314 cast<FixedVectorType>(Val: Op<0>()->getType())->getNumElements();
2315 return isExtractSubvectorMask(Mask: ShuffleMask, NumSrcElts, Index);
2316 }
2317
2318 /// Return true if this shuffle mask is an insert subvector mask.
2319 /// A valid insert subvector mask inserts the lowest elements of a second
2320 /// source operand into an in-place first source operand.
2321 /// Both the sub vector width and the insertion index is returned.
2322 static bool isInsertSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2323 int &NumSubElts, int &Index);
2324 static bool isInsertSubvectorMask(const Constant *Mask, int NumSrcElts,
2325 int &NumSubElts, int &Index) {
2326 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2327 // Not possible to express a shuffle mask for a scalable vector for this
2328 // case.
2329 if (isa<ScalableVectorType>(Val: Mask->getType()))
2330 return false;
2331 SmallVector<int, 16> MaskAsInts;
2332 getShuffleMask(Mask, Result&: MaskAsInts);
2333 return isInsertSubvectorMask(Mask: MaskAsInts, NumSrcElts, NumSubElts, Index);
2334 }
2335
2336 /// Return true if this shuffle mask is an insert subvector mask.
2337 bool isInsertSubvectorMask(int &NumSubElts, int &Index) const {
2338 // Not possible to express a shuffle mask for a scalable vector for this
2339 // case.
2340 if (isa<ScalableVectorType>(Val: getType()))
2341 return false;
2342
2343 int NumSrcElts =
2344 cast<FixedVectorType>(Val: Op<0>()->getType())->getNumElements();
2345 return isInsertSubvectorMask(Mask: ShuffleMask, NumSrcElts, NumSubElts, Index);
2346 }
2347
2348 /// Return true if this shuffle mask replicates each of the \p VF elements
2349 /// in a vector \p ReplicationFactor times.
2350 /// For example, the mask for \p ReplicationFactor=3 and \p VF=4 is:
2351 /// <0,0,0,1,1,1,2,2,2,3,3,3>
2352 static bool isReplicationMask(ArrayRef<int> Mask, int &ReplicationFactor,
2353 int &VF);
2354 static bool isReplicationMask(const Constant *Mask, int &ReplicationFactor,
2355 int &VF) {
2356 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2357 // Not possible to express a shuffle mask for a scalable vector for this
2358 // case.
2359 if (isa<ScalableVectorType>(Val: Mask->getType()))
2360 return false;
2361 SmallVector<int, 16> MaskAsInts;
2362 getShuffleMask(Mask, Result&: MaskAsInts);
2363 return isReplicationMask(Mask: MaskAsInts, ReplicationFactor, VF);
2364 }
2365
2366 /// Return true if this shuffle mask is a replication mask.
2367 bool isReplicationMask(int &ReplicationFactor, int &VF) const;
2368
2369 /// Return true if this shuffle mask represents "clustered" mask of size VF,
2370 /// i.e. each index between [0..VF) is used exactly once in each submask of
2371 /// size VF.
2372 /// For example, the mask for \p VF=4 is:
2373 /// 0, 1, 2, 3, 3, 2, 0, 1 - "clustered", because each submask of size 4
2374 /// (0,1,2,3 and 3,2,0,1) uses indices [0..VF) exactly one time.
2375 /// 0, 1, 2, 3, 3, 3, 1, 0 - not "clustered", because
2376 /// element 3 is used twice in the second submask
2377 /// (3,3,1,0) and index 2 is not used at all.
2378 static bool isOneUseSingleSourceMask(ArrayRef<int> Mask, int VF);
2379
2380 /// Return true if this shuffle mask is a one-use-single-source("clustered")
2381 /// mask.
2382 bool isOneUseSingleSourceMask(int VF) const;
2383
2384 /// Change values in a shuffle permute mask assuming the two vector operands
2385 /// of length InVecNumElts have swapped position.
2386 static void commuteShuffleMask(MutableArrayRef<int> Mask,
2387 unsigned InVecNumElts) {
2388 for (int &Idx : Mask) {
2389 if (Idx == -1)
2390 continue;
2391 Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts;
2392 assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&
2393 "shufflevector mask index out of range");
2394 }
2395 }
2396
2397 /// Return if this shuffle interleaves its two input vectors together.
2398 bool isInterleave(unsigned Factor);
2399
2400 /// Return true if the mask interleaves one or more input vectors together.
2401 ///
2402 /// I.e. <0, LaneLen, ... , LaneLen*(Factor - 1), 1, LaneLen + 1, ...>
2403 /// E.g. For a Factor of 2 (LaneLen=4):
2404 /// <0, 4, 1, 5, 2, 6, 3, 7>
2405 /// E.g. For a Factor of 3 (LaneLen=4):
2406 /// <4, 0, 9, 5, 1, 10, 6, 2, 11, 7, 3, 12>
2407 /// E.g. For a Factor of 4 (LaneLen=2):
2408 /// <0, 2, 6, 4, 1, 3, 7, 5>
2409 ///
2410 /// NumInputElts is the total number of elements in the input vectors.
2411 ///
2412 /// StartIndexes are the first indexes of each vector being interleaved,
2413 /// substituting any indexes that were undef
2414 /// E.g. <4, -1, 2, 5, 1, 3> (Factor=3): StartIndexes=<4, 0, 2>
2415 ///
2416 /// Note that this does not check if the input vectors are consecutive:
2417 /// It will return true for masks such as
2418 /// <0, 4, 6, 1, 5, 7> (Factor=3, LaneLen=2)
2419 static bool isInterleaveMask(ArrayRef<int> Mask, unsigned Factor,
2420 unsigned NumInputElts,
2421 SmallVectorImpl<unsigned> &StartIndexes);
2422 static bool isInterleaveMask(ArrayRef<int> Mask, unsigned Factor,
2423 unsigned NumInputElts) {
2424 SmallVector<unsigned, 8> StartIndexes;
2425 return isInterleaveMask(Mask, Factor, NumInputElts, StartIndexes);
2426 }
2427
2428 /// Checks if the shuffle is a bit rotation of the first operand across
2429 /// multiple subelements, e.g:
2430 ///
2431 /// shuffle <8 x i8> %a, <8 x i8> poison, <8 x i32> <1, 0, 3, 2, 5, 4, 7, 6>
2432 ///
2433 /// could be expressed as
2434 ///
2435 /// rotl <4 x i16> %a, 8
2436 ///
2437 /// If it can be expressed as a rotation, returns the number of subelements to
2438 /// group by in NumSubElts and the number of bits to rotate left in RotateAmt.
2439 static bool isBitRotateMask(ArrayRef<int> Mask, unsigned EltSizeInBits,
2440 unsigned MinSubElts, unsigned MaxSubElts,
2441 unsigned &NumSubElts, unsigned &RotateAmt);
2442
2443 // Methods for support type inquiry through isa, cast, and dyn_cast:
2444 static bool classof(const Instruction *I) {
2445 return I->getOpcode() == Instruction::ShuffleVector;
2446 }
2447 static bool classof(const Value *V) {
2448 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
2449 }
2450};
2451
2452template <>
2453struct OperandTraits<ShuffleVectorInst>
2454 : public FixedNumOperandTraits<ShuffleVectorInst, 2> {};
2455
2456DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value)
2457
2458//===----------------------------------------------------------------------===//
2459// ExtractValueInst Class
2460//===----------------------------------------------------------------------===//
2461
2462/// This instruction extracts a struct member or array
2463/// element value from an aggregate value.
2464///
2465class ExtractValueInst : public UnaryInstruction {
2466 SmallVector<unsigned, 4> Indices;
2467
2468 ExtractValueInst(const ExtractValueInst &EVI);
2469
2470 /// Constructors - Create a extractvalue instruction with a base aggregate
2471 /// value and a list of indices. The first ctor can optionally insert before
2472 /// an existing instruction, the second appends the new instruction to the
2473 /// specified BasicBlock.
2474 inline ExtractValueInst(Value *Agg,
2475 ArrayRef<unsigned> Idxs,
2476 const Twine &NameStr,
2477 Instruction *InsertBefore);
2478 inline ExtractValueInst(Value *Agg,
2479 ArrayRef<unsigned> Idxs,
2480 const Twine &NameStr, BasicBlock *InsertAtEnd);
2481
2482 void init(ArrayRef<unsigned> Idxs, const Twine &NameStr);
2483
2484protected:
2485 // Note: Instruction needs to be a friend here to call cloneImpl.
2486 friend class Instruction;
2487
2488 ExtractValueInst *cloneImpl() const;
2489
2490public:
2491 static ExtractValueInst *Create(Value *Agg,
2492 ArrayRef<unsigned> Idxs,
2493 const Twine &NameStr = "",
2494 Instruction *InsertBefore = nullptr) {
2495 return new
2496 ExtractValueInst(Agg, Idxs, NameStr, InsertBefore);
2497 }
2498
2499 static ExtractValueInst *Create(Value *Agg,
2500 ArrayRef<unsigned> Idxs,
2501 const Twine &NameStr,
2502 BasicBlock *InsertAtEnd) {
2503 return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd);
2504 }
2505
2506 /// Returns the type of the element that would be extracted
2507 /// with an extractvalue instruction with the specified parameters.
2508 ///
2509 /// Null is returned if the indices are invalid for the specified type.
2510 static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs);
2511
2512 using idx_iterator = const unsigned*;
2513
2514 inline idx_iterator idx_begin() const { return Indices.begin(); }
2515 inline idx_iterator idx_end() const { return Indices.end(); }
2516 inline iterator_range<idx_iterator> indices() const {
2517 return make_range(x: idx_begin(), y: idx_end());
2518 }
2519
2520 Value *getAggregateOperand() {
2521 return getOperand(i_nocapture: 0);
2522 }
2523 const Value *getAggregateOperand() const {
2524 return getOperand(i_nocapture: 0);
2525 }
2526 static unsigned getAggregateOperandIndex() {
2527 return 0U; // get index for modifying correct operand
2528 }
2529
2530 ArrayRef<unsigned> getIndices() const {
2531 return Indices;
2532 }
2533
2534 unsigned getNumIndices() const {
2535 return (unsigned)Indices.size();
2536 }
2537
2538 bool hasIndices() const {
2539 return true;
2540 }
2541
2542 // Methods for support type inquiry through isa, cast, and dyn_cast:
2543 static bool classof(const Instruction *I) {
2544 return I->getOpcode() == Instruction::ExtractValue;
2545 }
2546 static bool classof(const Value *V) {
2547 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
2548 }
2549};
2550
2551ExtractValueInst::ExtractValueInst(Value *Agg,
2552 ArrayRef<unsigned> Idxs,
2553 const Twine &NameStr,
2554 Instruction *InsertBefore)
2555 : UnaryInstruction(checkGEPType(Ty: getIndexedType(Agg: Agg->getType(), Idxs)),
2556 ExtractValue, Agg, InsertBefore) {
2557 init(Idxs, NameStr);
2558}
2559
2560ExtractValueInst::ExtractValueInst(Value *Agg,
2561 ArrayRef<unsigned> Idxs,
2562 const Twine &NameStr,
2563 BasicBlock *InsertAtEnd)
2564 : UnaryInstruction(checkGEPType(Ty: getIndexedType(Agg: Agg->getType(), Idxs)),
2565 ExtractValue, Agg, InsertAtEnd) {
2566 init(Idxs, NameStr);
2567}
2568
2569//===----------------------------------------------------------------------===//
2570// InsertValueInst Class
2571//===----------------------------------------------------------------------===//
2572
2573/// This instruction inserts a struct field of array element
2574/// value into an aggregate value.
2575///
2576class InsertValueInst : public Instruction {
2577 SmallVector<unsigned, 4> Indices;
2578
2579 InsertValueInst(const InsertValueInst &IVI);
2580
2581 /// Constructors - Create a insertvalue instruction with a base aggregate
2582 /// value, a value to insert, and a list of indices. The first ctor can
2583 /// optionally insert before an existing instruction, the second appends
2584 /// the new instruction to the specified BasicBlock.
2585 inline InsertValueInst(Value *Agg, Value *Val,
2586 ArrayRef<unsigned> Idxs,
2587 const Twine &NameStr,
2588 Instruction *InsertBefore);
2589 inline InsertValueInst(Value *Agg, Value *Val,
2590 ArrayRef<unsigned> Idxs,
2591 const Twine &NameStr, BasicBlock *InsertAtEnd);
2592
2593 /// Constructors - These two constructors are convenience methods because one
2594 /// and two index insertvalue instructions are so common.
2595 InsertValueInst(Value *Agg, Value *Val, unsigned Idx,
2596 const Twine &NameStr = "",
2597 Instruction *InsertBefore = nullptr);
2598 InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr,
2599 BasicBlock *InsertAtEnd);
2600
2601 void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2602 const Twine &NameStr);
2603
2604protected:
2605 // Note: Instruction needs to be a friend here to call cloneImpl.
2606 friend class Instruction;
2607
2608 InsertValueInst *cloneImpl() const;
2609
2610public:
2611 // allocate space for exactly two operands
2612 void *operator new(size_t S) { return User::operator new(Size: S, Us: 2); }
2613 void operator delete(void *Ptr) { User::operator delete(Usr: Ptr); }
2614
2615 static InsertValueInst *Create(Value *Agg, Value *Val,
2616 ArrayRef<unsigned> Idxs,
2617 const Twine &NameStr = "",
2618 Instruction *InsertBefore = nullptr) {
2619 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore);
2620 }
2621
2622 static InsertValueInst *Create(Value *Agg, Value *Val,
2623 ArrayRef<unsigned> Idxs,
2624 const Twine &NameStr,
2625 BasicBlock *InsertAtEnd) {
2626 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd);
2627 }
2628
2629 /// Transparently provide more efficient getOperand methods.
2630 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
2631
2632 using idx_iterator = const unsigned*;
2633
2634 inline idx_iterator idx_begin() const { return Indices.begin(); }
2635 inline idx_iterator idx_end() const { return Indices.end(); }
2636 inline iterator_range<idx_iterator> indices() const {
2637 return make_range(x: idx_begin(), y: idx_end());
2638 }
2639
2640 Value *getAggregateOperand() {
2641 return getOperand(0);
2642 }
2643 const Value *getAggregateOperand() const {
2644 return getOperand(0);
2645 }
2646 static unsigned getAggregateOperandIndex() {
2647 return 0U; // get index for modifying correct operand
2648 }
2649
2650 Value *getInsertedValueOperand() {
2651 return getOperand(1);
2652 }
2653 const Value *getInsertedValueOperand() const {
2654 return getOperand(1);
2655 }
2656 static unsigned getInsertedValueOperandIndex() {
2657 return 1U; // get index for modifying correct operand
2658 }
2659
2660 ArrayRef<unsigned> getIndices() const {
2661 return Indices;
2662 }
2663
2664 unsigned getNumIndices() const {
2665 return (unsigned)Indices.size();
2666 }
2667
2668 bool hasIndices() const {
2669 return true;
2670 }
2671
2672 // Methods for support type inquiry through isa, cast, and dyn_cast:
2673 static bool classof(const Instruction *I) {
2674 return I->getOpcode() == Instruction::InsertValue;
2675 }
2676 static bool classof(const Value *V) {
2677 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
2678 }
2679};
2680
2681template <>
2682struct OperandTraits<InsertValueInst> :
2683 public FixedNumOperandTraits<InsertValueInst, 2> {
2684};
2685
2686InsertValueInst::InsertValueInst(Value *Agg,
2687 Value *Val,
2688 ArrayRef<unsigned> Idxs,
2689 const Twine &NameStr,
2690 Instruction *InsertBefore)
2691 : Instruction(Agg->getType(), InsertValue,
2692 OperandTraits<InsertValueInst>::op_begin(U: this),
2693 2, InsertBefore) {
2694 init(Agg, Val, Idxs, NameStr);
2695}
2696
2697InsertValueInst::InsertValueInst(Value *Agg,
2698 Value *Val,
2699 ArrayRef<unsigned> Idxs,
2700 const Twine &NameStr,
2701 BasicBlock *InsertAtEnd)
2702 : Instruction(Agg->getType(), InsertValue,
2703 OperandTraits<InsertValueInst>::op_begin(U: this),
2704 2, InsertAtEnd) {
2705 init(Agg, Val, Idxs, NameStr);
2706}
2707
2708DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value)
2709
2710//===----------------------------------------------------------------------===//
2711// PHINode Class
2712//===----------------------------------------------------------------------===//
2713
2714// PHINode - The PHINode class is used to represent the magical mystical PHI
2715// node, that can not exist in nature, but can be synthesized in a computer
2716// scientist's overactive imagination.
2717//
2718class PHINode : public Instruction {
2719 /// The number of operands actually allocated. NumOperands is
2720 /// the number actually in use.
2721 unsigned ReservedSpace;
2722
2723 PHINode(const PHINode &PN);
2724
2725 explicit PHINode(Type *Ty, unsigned NumReservedValues,
2726 const Twine &NameStr = "",
2727 Instruction *InsertBefore = nullptr)
2728 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore),
2729 ReservedSpace(NumReservedValues) {
2730 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!");
2731 setName(NameStr);
2732 allocHungoffUses(N: ReservedSpace);
2733 }
2734
2735 PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr,
2736 BasicBlock *InsertAtEnd)
2737 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd),
2738 ReservedSpace(NumReservedValues) {
2739 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!");
2740 setName(NameStr);
2741 allocHungoffUses(N: ReservedSpace);
2742 }
2743
2744protected:
2745 // Note: Instruction needs to be a friend here to call cloneImpl.
2746 friend class Instruction;
2747
2748 PHINode *cloneImpl() const;
2749
2750 // allocHungoffUses - this is more complicated than the generic
2751 // User::allocHungoffUses, because we have to allocate Uses for the incoming
2752 // values and pointers to the incoming blocks, all in one allocation.
2753 void allocHungoffUses(unsigned N) {
2754 User::allocHungoffUses(N, /* IsPhi */ IsPhi: true);
2755 }
2756
2757public:
2758 /// Constructors - NumReservedValues is a hint for the number of incoming
2759 /// edges that this phi node will have (use 0 if you really have no idea).
2760 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2761 const Twine &NameStr = "",
2762 Instruction *InsertBefore = nullptr) {
2763 return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore);
2764 }
2765
2766 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2767 const Twine &NameStr, BasicBlock *InsertAtEnd) {
2768 return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd);
2769 }
2770
2771 /// Provide fast operand accessors
2772 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
2773
2774 // Block iterator interface. This provides access to the list of incoming
2775 // basic blocks, which parallels the list of incoming values.
2776 // Please note that we are not providing non-const iterators for blocks to
2777 // force all updates go through an interface function.
2778
2779 using block_iterator = BasicBlock **;
2780 using const_block_iterator = BasicBlock * const *;
2781
2782 const_block_iterator block_begin() const {
2783 return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace);
2784 }
2785
2786 const_block_iterator block_end() const {
2787 return block_begin() + getNumOperands();
2788 }
2789
2790 iterator_range<const_block_iterator> blocks() const {
2791 return make_range(x: block_begin(), y: block_end());
2792 }
2793
2794 op_range incoming_values() { return operands(); }
2795
2796 const_op_range incoming_values() const { return operands(); }
2797
2798 /// Return the number of incoming edges
2799 ///
2800 unsigned getNumIncomingValues() const { return getNumOperands(); }
2801
2802 /// Return incoming value number x
2803 ///
2804 Value *getIncomingValue(unsigned i) const {
2805 return getOperand(i);
2806 }
2807 void setIncomingValue(unsigned i, Value *V) {
2808 assert(V && "PHI node got a null value!");
2809 assert(getType() == V->getType() &&
2810 "All operands to PHI node must be the same type as the PHI node!");
2811 setOperand(i, V);
2812 }
2813
2814 static unsigned getOperandNumForIncomingValue(unsigned i) {
2815 return i;
2816 }
2817
2818 static unsigned getIncomingValueNumForOperand(unsigned i) {
2819 return i;
2820 }
2821
2822 /// Return incoming basic block number @p i.
2823 ///
2824 BasicBlock *getIncomingBlock(unsigned i) const {
2825 return block_begin()[i];
2826 }
2827
2828 /// Return incoming basic block corresponding
2829 /// to an operand of the PHI.
2830 ///
2831 BasicBlock *getIncomingBlock(const Use &U) const {
2832 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?");
2833 return getIncomingBlock(i: unsigned(&U - op_begin()));
2834 }
2835
2836 /// Return incoming basic block corresponding
2837 /// to value use iterator.
2838 ///
2839 BasicBlock *getIncomingBlock(Value::const_user_iterator I) const {
2840 return getIncomingBlock(U: I.getUse());
2841 }
2842
2843 void setIncomingBlock(unsigned i, BasicBlock *BB) {
2844 const_cast<block_iterator>(block_begin())[i] = BB;
2845 }
2846
2847 /// Copies the basic blocks from \p BBRange to the incoming basic block list
2848 /// of this PHINode, starting at \p ToIdx.
2849 void copyIncomingBlocks(iterator_range<const_block_iterator> BBRange,
2850 uint32_t ToIdx = 0) {
2851 copy(Range&: BBRange, Out: const_cast<block_iterator>(block_begin()) + ToIdx);
2852 }
2853
2854 /// Replace every incoming basic block \p Old to basic block \p New.
2855 void replaceIncomingBlockWith(const BasicBlock *Old, BasicBlock *New) {
2856 assert(New && Old && "PHI node got a null basic block!");
2857 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2858 if (getIncomingBlock(i: Op) == Old)
2859 setIncomingBlock(i: Op, BB: New);
2860 }
2861
2862 /// Add an incoming value to the end of the PHI list
2863 ///
2864 void addIncoming(Value *V, BasicBlock *BB) {
2865 if (getNumOperands() == ReservedSpace)
2866 growOperands(); // Get more space!
2867 // Initialize some new operands.
2868 setNumHungOffUseOperands(getNumOperands() + 1);
2869 setIncomingValue(i: getNumOperands() - 1, V);
2870 setIncomingBlock(i: getNumOperands() - 1, BB);
2871 }
2872
2873 /// Remove an incoming value. This is useful if a
2874 /// predecessor basic block is deleted. The value removed is returned.
2875 ///
2876 /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty
2877 /// is true), the PHI node is destroyed and any uses of it are replaced with
2878 /// dummy values. The only time there should be zero incoming values to a PHI
2879 /// node is when the block is dead, so this strategy is sound.
2880 ///
2881 Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true);
2882
2883 Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) {
2884 int Idx = getBasicBlockIndex(BB);
2885 assert(Idx >= 0 && "Invalid basic block argument to remove!");
2886 return removeIncomingValue(Idx, DeletePHIIfEmpty);
2887 }
2888
2889 /// Remove all incoming values for which the predicate returns true.
2890 /// The predicate accepts the incoming value index.
2891 void removeIncomingValueIf(function_ref<bool(unsigned)> Predicate,
2892 bool DeletePHIIfEmpty = true);
2893
2894 /// Return the first index of the specified basic
2895 /// block in the value list for this PHI. Returns -1 if no instance.
2896 ///
2897 int getBasicBlockIndex(const BasicBlock *BB) const {
2898 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
2899 if (block_begin()[i] == BB)
2900 return i;
2901 return -1;
2902 }
2903
2904 Value *getIncomingValueForBlock(const BasicBlock *BB) const {
2905 int Idx = getBasicBlockIndex(BB);
2906 assert(Idx >= 0 && "Invalid basic block argument!");
2907 return getIncomingValue(i: Idx);
2908 }
2909
2910 /// Set every incoming value(s) for block \p BB to \p V.
2911 void setIncomingValueForBlock(const BasicBlock *BB, Value *V) {
2912 assert(BB && "PHI node got a null basic block!");
2913 bool Found = false;
2914 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2915 if (getIncomingBlock(i: Op) == BB) {
2916 Found = true;
2917 setIncomingValue(i: Op, V);
2918 }
2919 (void)Found;
2920 assert(Found && "Invalid basic block argument to set!");
2921 }
2922
2923 /// If the specified PHI node always merges together the
2924 /// same value, return the value, otherwise return null.
2925 Value *hasConstantValue() const;
2926
2927 /// Whether the specified PHI node always merges
2928 /// together the same value, assuming undefs are equal to a unique
2929 /// non-undef value.
2930 bool hasConstantOrUndefValue() const;
2931
2932 /// If the PHI node is complete which means all of its parent's predecessors
2933 /// have incoming value in this PHI, return true, otherwise return false.
2934 bool isComplete() const {
2935 return llvm::all_of(Range: predecessors(BB: getParent()),
2936 P: [this](const BasicBlock *Pred) {
2937 return getBasicBlockIndex(BB: Pred) >= 0;
2938 });
2939 }
2940
2941 /// Methods for support type inquiry through isa, cast, and dyn_cast:
2942 static bool classof(const Instruction *I) {
2943 return I->getOpcode() == Instruction::PHI;
2944 }
2945 static bool classof(const Value *V) {
2946 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
2947 }
2948
2949private:
2950 void growOperands();
2951};
2952
2953template <>
2954struct OperandTraits<PHINode> : public HungoffOperandTraits<2> {
2955};
2956
2957DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value)
2958
2959//===----------------------------------------------------------------------===//
2960// LandingPadInst Class
2961//===----------------------------------------------------------------------===//
2962
2963//===---------------------------------------------------------------------------
2964/// The landingpad instruction holds all of the information
2965/// necessary to generate correct exception handling. The landingpad instruction
2966/// cannot be moved from the top of a landing pad block, which itself is
2967/// accessible only from the 'unwind' edge of an invoke. This uses the
2968/// SubclassData field in Value to store whether or not the landingpad is a
2969/// cleanup.
2970///
2971class LandingPadInst : public Instruction {
2972 using CleanupField = BoolBitfieldElementT<0>;
2973
2974 /// The number of operands actually allocated. NumOperands is
2975 /// the number actually in use.
2976 unsigned ReservedSpace;
2977
2978 LandingPadInst(const LandingPadInst &LP);
2979
2980public:
2981 enum ClauseType { Catch, Filter };
2982
2983private:
2984 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2985 const Twine &NameStr, Instruction *InsertBefore);
2986 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2987 const Twine &NameStr, BasicBlock *InsertAtEnd);
2988
2989 // Allocate space for exactly zero operands.
2990 void *operator new(size_t S) { return User::operator new(Size: S); }
2991
2992 void growOperands(unsigned Size);
2993 void init(unsigned NumReservedValues, const Twine &NameStr);
2994
2995protected:
2996 // Note: Instruction needs to be a friend here to call cloneImpl.
2997 friend class Instruction;
2998
2999 LandingPadInst *cloneImpl() const;
3000
3001public:
3002 void operator delete(void *Ptr) { User::operator delete(Usr: Ptr); }
3003
3004 /// Constructors - NumReservedClauses is a hint for the number of incoming
3005 /// clauses that this landingpad will have (use 0 if you really have no idea).
3006 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
3007 const Twine &NameStr = "",
3008 Instruction *InsertBefore = nullptr);
3009 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
3010 const Twine &NameStr, BasicBlock *InsertAtEnd);
3011
3012 /// Provide fast operand accessors
3013 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
3014
3015 /// Return 'true' if this landingpad instruction is a
3016 /// cleanup. I.e., it should be run when unwinding even if its landing pad
3017 /// doesn't catch the exception.
3018 bool isCleanup() const { return getSubclassData<CleanupField>(); }
3019
3020 /// Indicate that this landingpad instruction is a cleanup.
3021 void setCleanup(bool V) { setSubclassData<CleanupField>(V); }
3022
3023 /// Add a catch or filter clause to the landing pad.
3024 void addClause(Constant *ClauseVal);
3025
3026 /// Get the value of the clause at index Idx. Use isCatch/isFilter to
3027 /// determine what type of clause this is.
3028 Constant *getClause(unsigned Idx) const {
3029 return cast<Constant>(Val: getOperandList()[Idx]);
3030 }
3031
3032 /// Return 'true' if the clause and index Idx is a catch clause.
3033 bool isCatch(unsigned Idx) const {
3034 return !isa<ArrayType>(Val: getOperandList()[Idx]->getType());
3035 }
3036
3037 /// Return 'true' if the clause and index Idx is a filter clause.
3038 bool isFilter(unsigned Idx) const {
3039 return isa<ArrayType>(Val: getOperandList()[Idx]->getType());
3040 }
3041
3042 /// Get the number of clauses for this landing pad.
3043 unsigned getNumClauses() const { return getNumOperands(); }
3044
3045 /// Grow the size of the operand list to accommodate the new
3046 /// number of clauses.
3047 void reserveClauses(unsigned Size) { growOperands(Size); }
3048
3049 // Methods for support type inquiry through isa, cast, and dyn_cast:
3050 static bool classof(const Instruction *I) {
3051 return I->getOpcode() == Instruction::LandingPad;
3052 }
3053 static bool classof(const Value *V) {
3054 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
3055 }
3056};
3057
3058template <>
3059struct OperandTraits<LandingPadInst> : public HungoffOperandTraits<1> {
3060};
3061
3062DEFINE_TRANSPARENT_OPERAND_ACCESSORS(LandingPadInst, Value)
3063
3064//===----------------------------------------------------------------------===//
3065// ReturnInst Class
3066//===----------------------------------------------------------------------===//
3067
3068//===---------------------------------------------------------------------------
3069/// Return a value (possibly void), from a function. Execution
3070/// does not continue in this function any longer.
3071///
3072class ReturnInst : public Instruction {
3073 ReturnInst(const ReturnInst &RI);
3074
3075private:
3076 // ReturnInst constructors:
3077 // ReturnInst() - 'ret void' instruction
3078 // ReturnInst( null) - 'ret void' instruction
3079 // ReturnInst(Value* X) - 'ret X' instruction
3080 // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I
3081 // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I
3082 // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B
3083 // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B
3084 //
3085 // NOTE: If the Value* passed is of type void then the constructor behaves as
3086 // if it was passed NULL.
3087 explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr,
3088 Instruction *InsertBefore = nullptr);
3089 ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd);
3090 explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd);
3091
3092protected:
3093 // Note: Instruction needs to be a friend here to call cloneImpl.
3094 friend class Instruction;
3095
3096 ReturnInst *cloneImpl() const;
3097
3098public:
3099 static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr,
3100 Instruction *InsertBefore = nullptr) {
3101 return new(!!retVal) ReturnInst(C, retVal, InsertBefore);
3102 }
3103
3104 static ReturnInst* Create(LLVMContext &C, Value *retVal,
3105 BasicBlock *InsertAtEnd) {
3106 return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd);
3107 }
3108
3109 static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) {
3110 return new(0) ReturnInst(C, InsertAtEnd);
3111 }
3112
3113 /// Provide fast operand accessors
3114 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
3115
3116 /// Convenience accessor. Returns null if there is no return value.
3117 Value *getReturnValue() const {
3118 return getNumOperands() != 0 ? getOperand(0) : nullptr;
3119 }
3120
3121 unsigned getNumSuccessors() const { return 0; }
3122
3123 // Methods for support type inquiry through isa, cast, and dyn_cast:
3124 static bool classof(const Instruction *I) {
3125 return (I->getOpcode() == Instruction::Ret);
3126 }
3127 static bool classof(const Value *V) {
3128 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
3129 }
3130
3131private:
3132 BasicBlock *getSuccessor(unsigned idx) const {
3133 llvm_unreachable("ReturnInst has no successors!");
3134 }
3135
3136 void setSuccessor(unsigned idx, BasicBlock *B) {
3137 llvm_unreachable("ReturnInst has no successors!");
3138 }
3139};
3140
3141template <>
3142struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> {
3143};
3144
3145DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value)
3146
3147//===----------------------------------------------------------------------===//
3148// BranchInst Class
3149//===----------------------------------------------------------------------===//
3150
3151//===---------------------------------------------------------------------------
3152/// Conditional or Unconditional Branch instruction.
3153///
3154class BranchInst : public Instruction {
3155 /// Ops list - Branches are strange. The operands are ordered:
3156 /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because
3157 /// they don't have to check for cond/uncond branchness. These are mostly
3158 /// accessed relative from op_end().
3159 BranchInst(const BranchInst &BI);
3160 // BranchInst constructors (where {B, T, F} are blocks, and C is a condition):
3161 // BranchInst(BB *B) - 'br B'
3162 // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F'
3163 // BranchInst(BB* B, Inst *I) - 'br B' insert before I
3164 // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I
3165 // BranchInst(BB* B, BB *I) - 'br B' insert at end
3166 // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end
3167 explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr);
3168 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3169 Instruction *InsertBefore = nullptr);
3170 BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd);
3171 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3172 BasicBlock *InsertAtEnd);
3173
3174 void AssertOK();
3175
3176protected:
3177 // Note: Instruction needs to be a friend here to call cloneImpl.
3178 friend class Instruction;
3179
3180 BranchInst *cloneImpl() const;
3181
3182public:
3183 /// Iterator type that casts an operand to a basic block.
3184 ///
3185 /// This only makes sense because the successors are stored as adjacent
3186 /// operands for branch instructions.
3187 struct succ_op_iterator
3188 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3189 std::random_access_iterator_tag, BasicBlock *,
3190 ptrdiff_t, BasicBlock *, BasicBlock *> {
3191 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {}
3192
3193 BasicBlock *operator*() const { return cast<BasicBlock>(Val: *I); }
3194 BasicBlock *operator->() const { return operator*(); }
3195 };
3196
3197 /// The const version of `succ_op_iterator`.
3198 struct const_succ_op_iterator
3199 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3200 std::random_access_iterator_tag,
3201 const BasicBlock *, ptrdiff_t, const BasicBlock *,
3202 const BasicBlock *> {
3203 explicit const_succ_op_iterator(const_value_op_iterator I)
3204 : iterator_adaptor_base(I) {}
3205
3206 const BasicBlock *operator*() const { return cast<BasicBlock>(Val: *I); }
3207 const BasicBlock *operator->() const { return operator*(); }
3208 };
3209
3210 static BranchInst *Create(BasicBlock *IfTrue,
3211 Instruction *InsertBefore = nullptr) {
3212 return new(1) BranchInst(IfTrue, InsertBefore);
3213 }
3214
3215 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3216 Value *Cond, Instruction *InsertBefore = nullptr) {
3217 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore);
3218 }
3219
3220 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) {
3221 return new(1) BranchInst(IfTrue, InsertAtEnd);
3222 }
3223
3224 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3225 Value *Cond, BasicBlock *InsertAtEnd) {
3226 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd);
3227 }
3228
3229 /// Transparently provide more efficient getOperand methods.
3230 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
3231
3232 bool isUnconditional() const { return getNumOperands() == 1; }
3233 bool isConditional() const { return getNumOperands() == 3; }
3234
3235 Value *getCondition() const {
3236 assert(isConditional() && "Cannot get condition of an uncond branch!");
3237 return Op<-3>();
3238 }
3239
3240 void setCondition(Value *V) {
3241 assert(isConditional() && "Cannot set condition of unconditional branch!");
3242 Op<-3>() = V;
3243 }
3244
3245 unsigned getNumSuccessors() const { return 1+isConditional(); }
3246
3247 BasicBlock *getSuccessor(unsigned i) const {
3248 assert(i < getNumSuccessors() && "Successor # out of range for Branch!");
3249 return cast_or_null<BasicBlock>(Val: (&Op<-1>() - i)->get());
3250 }
3251
3252 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3253 assert(idx < getNumSuccessors() && "Successor # out of range for Branch!");
3254 *(&Op<-1>() - idx) = NewSucc;
3255 }
3256
3257 /// Swap the successors of this branch instruction.
3258 ///
3259 /// Swaps the successors of the branch instruction. This also swaps any
3260 /// branch weight metadata associated with the instruction so that it
3261 /// continues to map correctly to each operand.
3262 void swapSuccessors();
3263
3264 iterator_range<succ_op_iterator> successors() {
3265 return make_range(
3266 x: succ_op_iterator(std::next(x: value_op_begin(), n: isConditional() ? 1 : 0)),
3267 y: succ_op_iterator(value_op_end()));
3268 }
3269
3270 iterator_range<const_succ_op_iterator> successors() const {
3271 return make_range(x: const_succ_op_iterator(
3272 std::next(x: value_op_begin(), n: isConditional() ? 1 : 0)),
3273 y: const_succ_op_iterator(value_op_end()));
3274 }
3275
3276 // Methods for support type inquiry through isa, cast, and dyn_cast:
3277 static bool classof(const Instruction *I) {
3278 return (I->getOpcode() == Instruction::Br);
3279 }
3280 static bool classof(const Value *V) {
3281 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
3282 }
3283};
3284
3285template <>
3286struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> {
3287};
3288
3289DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value)
3290
3291//===----------------------------------------------------------------------===//
3292// SwitchInst Class
3293//===----------------------------------------------------------------------===//
3294
3295//===---------------------------------------------------------------------------
3296/// Multiway switch
3297///
3298class SwitchInst : public Instruction {
3299 unsigned ReservedSpace;
3300
3301 // Operand[0] = Value to switch on
3302 // Operand[1] = Default basic block destination
3303 // Operand[2n ] = Value to match
3304 // Operand[2n+1] = BasicBlock to go to on match
3305 SwitchInst(const SwitchInst &SI);
3306
3307 /// Create a new switch instruction, specifying a value to switch on and a
3308 /// default destination. The number of additional cases can be specified here
3309 /// to make memory allocation more efficient. This constructor can also
3310 /// auto-insert before another instruction.
3311 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3312 Instruction *InsertBefore);
3313
3314 /// Create a new switch instruction, specifying a value to switch on and a
3315 /// default destination. The number of additional cases can be specified here
3316 /// to make memory allocation more efficient. This constructor also
3317 /// auto-inserts at the end of the specified BasicBlock.
3318 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3319 BasicBlock *InsertAtEnd);
3320
3321 // allocate space for exactly zero operands
3322 void *operator new(size_t S) { return User::operator new(Size: S); }
3323
3324 void init(Value *Value, BasicBlock *Default, unsigned NumReserved);
3325 void growOperands();
3326
3327protected:
3328 // Note: Instruction needs to be a friend here to call cloneImpl.
3329 friend class Instruction;
3330
3331 SwitchInst *cloneImpl() const;
3332
3333public:
3334 void operator delete(void *Ptr) { User::operator delete(Usr: Ptr); }
3335
3336 // -2
3337 static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1);
3338
3339 template <typename CaseHandleT> class CaseIteratorImpl;
3340
3341 /// A handle to a particular switch case. It exposes a convenient interface
3342 /// to both the case value and the successor block.
3343 ///
3344 /// We define this as a template and instantiate it to form both a const and
3345 /// non-const handle.
3346 template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT>
3347 class CaseHandleImpl {
3348 // Directly befriend both const and non-const iterators.
3349 friend class SwitchInst::CaseIteratorImpl<
3350 CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>;
3351
3352 protected:
3353 // Expose the switch type we're parameterized with to the iterator.
3354 using SwitchInstType = SwitchInstT;
3355
3356 SwitchInstT *SI;
3357 ptrdiff_t Index;
3358
3359 CaseHandleImpl() = default;
3360 CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index) : SI(SI), Index(Index) {}
3361
3362 public:
3363 /// Resolves case value for current case.
3364 ConstantIntT *getCaseValue() const {
3365 assert((unsigned)Index < SI->getNumCases() &&
3366 "Index out the number of cases.");
3367 return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2));
3368 }
3369
3370 /// Resolves successor for current case.
3371 BasicBlockT *getCaseSuccessor() const {
3372 assert(((unsigned)Index < SI->getNumCases() ||
3373 (unsigned)Index == DefaultPseudoIndex) &&
3374 "Index out the number of cases.");
3375 return SI->getSuccessor(getSuccessorIndex());
3376 }
3377
3378 /// Returns number of current case.
3379 unsigned getCaseIndex() const { return Index; }
3380
3381 /// Returns successor index for current case successor.
3382 unsigned getSuccessorIndex() const {
3383 assert(((unsigned)Index == DefaultPseudoIndex ||
3384 (unsigned)Index < SI->getNumCases()) &&
3385 "Index out the number of cases.");
3386 return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0;
3387 }
3388
3389 bool operator==(const CaseHandleImpl &RHS) const {
3390 assert(SI == RHS.SI && "Incompatible operators.");
3391 return Index == RHS.Index;
3392 }
3393 };
3394
3395 using ConstCaseHandle =
3396 CaseHandleImpl<const SwitchInst, const ConstantInt, const BasicBlock>;
3397
3398 class CaseHandle
3399 : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> {
3400 friend class SwitchInst::CaseIteratorImpl<CaseHandle>;
3401
3402 public:
3403 CaseHandle(SwitchInst *SI, ptrdiff_t Index) : CaseHandleImpl(SI, Index) {}
3404
3405 /// Sets the new value for current case.
3406 void setValue(ConstantInt *V) const {
3407 assert((unsigned)Index < SI->getNumCases() &&
3408 "Index out the number of cases.");
3409 SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V));
3410 }
3411
3412 /// Sets the new successor for current case.
3413 void setSuccessor(BasicBlock *S) const {
3414 SI->setSuccessor(idx: getSuccessorIndex(), NewSucc: S);
3415 }
3416 };
3417
3418 template <typename CaseHandleT>
3419 class CaseIteratorImpl
3420 : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>,
3421 std::random_access_iterator_tag,
3422 const CaseHandleT> {
3423 using SwitchInstT = typename CaseHandleT::SwitchInstType;
3424
3425 CaseHandleT Case;
3426
3427 public:
3428 /// Default constructed iterator is in an invalid state until assigned to
3429 /// a case for a particular switch.
3430 CaseIteratorImpl() = default;
3431
3432 /// Initializes case iterator for given SwitchInst and for given
3433 /// case number.
3434 CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {}
3435
3436 /// Initializes case iterator for given SwitchInst and for given
3437 /// successor index.
3438 static CaseIteratorImpl fromSuccessorIndex(SwitchInstT *SI,
3439 unsigned SuccessorIndex) {
3440 assert(SuccessorIndex < SI->getNumSuccessors() &&
3441 "Successor index # out of range!");
3442 return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1)
3443 : CaseIteratorImpl(SI, DefaultPseudoIndex);
3444 }
3445
3446 /// Support converting to the const variant. This will be a no-op for const
3447 /// variant.
3448 operator CaseIteratorImpl<ConstCaseHandle>() const {
3449 return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index);
3450 }
3451
3452 CaseIteratorImpl &operator+=(ptrdiff_t N) {
3453 // Check index correctness after addition.
3454 // Note: Index == getNumCases() means end().
3455 assert(Case.Index + N >= 0 &&
3456 (unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
3457 "Case.Index out the number of cases.");
3458 Case.Index += N;
3459 return *this;
3460 }
3461 CaseIteratorImpl &operator-=(ptrdiff_t N) {
3462 // Check index correctness after subtraction.
3463 // Note: Case.Index == getNumCases() means end().
3464 assert(Case.Index - N >= 0 &&
3465 (unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
3466 "Case.Index out the number of cases.");
3467 Case.Index -= N;
3468 return *this;
3469 }
3470 ptrdiff_t operator-(const CaseIteratorImpl &RHS) const {
3471 assert(Case.SI == RHS.Case.SI && "Incompatible operators.");
3472 return Case.Index - RHS.Case.Index;
3473 }
3474 bool operator==(const CaseIteratorImpl &RHS) const {
3475 return Case == RHS.Case;
3476 }
3477 bool operator<(const CaseIteratorImpl &RHS) const {
3478 assert(Case.SI == RHS.Case.SI && "Incompatible operators.");
3479 return Case.Index < RHS.Case.Index;
3480 }
3481 const CaseHandleT &operator*() const { return Case; }
3482 };
3483
3484 using CaseIt = CaseIteratorImpl<CaseHandle>;
3485 using ConstCaseIt = CaseIteratorImpl<ConstCaseHandle>;
3486
3487 static SwitchInst *Create(Value *Value, BasicBlock *Default,
3488 unsigned NumCases,
3489 Instruction *InsertBefore = nullptr) {
3490 return new SwitchInst(Value, Default, NumCases, InsertBefore);
3491 }
3492
3493 static SwitchInst *Create(Value *Value, BasicBlock *Default,
3494 unsigned NumCases, BasicBlock *InsertAtEnd) {
3495 return new SwitchInst(Value, Default, NumCases, InsertAtEnd);
3496 }
3497
3498 /// Provide fast operand accessors
3499 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
3500
3501 // Accessor Methods for Switch stmt
3502 Value *getCondition() const { return getOperand(0); }
3503 void setCondition(Value *V) { setOperand(0, V); }
3504
3505 BasicBlock *getDefaultDest() const {
3506 return cast<BasicBlock>(Val: getOperand(1));
3507 }
3508
3509 /// Returns true if the default branch must result in immediate undefined
3510 /// behavior, false otherwise.
3511 bool defaultDestUndefined() const {
3512 return isa<UnreachableInst>(Val: getDefaultDest()->getFirstNonPHIOrDbg());
3513 }
3514
3515 void setDefaultDest(BasicBlock *DefaultCase) {
3516 setOperand(1, reinterpret_cast<Value*>(DefaultCase));
3517 }
3518
3519 /// Return the number of 'cases' in this switch instruction, excluding the
3520 /// default case.
3521 unsigned getNumCases() const {
3522 return getNumOperands()/2 - 1;
3523 }
3524
3525 /// Returns a read/write iterator that points to the first case in the
3526 /// SwitchInst.
3527 CaseIt case_begin() {
3528 return CaseIt(this, 0);
3529 }
3530
3531 /// Returns a read-only iterator that points to the first case in the
3532 /// SwitchInst.
3533 ConstCaseIt case_begin() const {
3534 return ConstCaseIt(this, 0);
3535 }
3536
3537 /// Returns a read/write iterator that points one past the last in the
3538 /// SwitchInst.
3539 CaseIt case_end() {
3540 return CaseIt(this, getNumCases());
3541 }
3542
3543 /// Returns a read-only iterator that points one past the last in the
3544 /// SwitchInst.
3545 ConstCaseIt case_end() const {
3546 return ConstCaseIt(this, getNumCases());
3547 }
3548
3549 /// Iteration adapter for range-for loops.
3550 iterator_range<CaseIt> cases() {
3551 return make_range(x: case_begin(), y: case_end());
3552 }
3553
3554 /// Constant iteration adapter for range-for loops.
3555 iterator_range<ConstCaseIt> cases() const {
3556 return make_range(x: case_begin(), y: case_end());
3557 }
3558
3559 /// Returns an iterator that points to the default case.
3560 /// Note: this iterator allows to resolve successor only. Attempt
3561 /// to resolve case value causes an assertion.
3562 /// Also note, that increment and decrement also causes an assertion and
3563 /// makes iterator invalid.
3564 CaseIt case_default() {
3565 return CaseIt(this, DefaultPseudoIndex);
3566 }
3567 ConstCaseIt case_default() const {
3568 return ConstCaseIt(this, DefaultPseudoIndex);
3569 }
3570
3571 /// Search all of the case values for the specified constant. If it is
3572 /// explicitly handled, return the case iterator of it, otherwise return
3573 /// default case iterator to indicate that it is handled by the default
3574 /// handler.
3575 CaseIt findCaseValue(const ConstantInt *C) {
3576 return CaseIt(
3577 this,
3578 const_cast<const SwitchInst *>(this)->findCaseValue(C)->getCaseIndex());
3579 }
3580 ConstCaseIt findCaseValue(const ConstantInt *C) const {
3581 ConstCaseIt I = llvm::find_if(Range: cases(), P: [C](const ConstCaseHandle &Case) {
3582 return Case.getCaseValue() == C;
3583 });
3584 if (I != case_end())
3585 return I;
3586
3587 return case_default();
3588 }
3589
3590 /// Finds the unique case value for a given successor. Returns null if the
3591 /// successor is not found, not unique, or is the default case.
3592 ConstantInt *findCaseDest(BasicBlock *BB) {
3593 if (BB == getDefaultDest())
3594 return nullptr;
3595
3596 ConstantInt *CI = nullptr;
3597 for (auto Case : cases()) {
3598 if (Case.getCaseSuccessor() != BB)
3599 continue;
3600
3601 if (CI)
3602 return nullptr; // Multiple cases lead to BB.
3603
3604 CI = Case.getCaseValue();
3605 }
3606
3607 return CI;
3608 }
3609
3610 /// Add an entry to the switch instruction.
3611 /// Note:
3612 /// This action invalidates case_end(). Old case_end() iterator will
3613 /// point to the added case.
3614 void addCase(ConstantInt *OnVal, BasicBlock *Dest);
3615
3616 /// This method removes the specified case and its successor from the switch
3617 /// instruction. Note that this operation may reorder the remaining cases at
3618 /// index idx and above.
3619 /// Note:
3620 /// This action invalidates iterators for all cases following the one removed,
3621 /// including the case_end() iterator. It returns an iterator for the next
3622 /// case.
3623 CaseIt removeCase(CaseIt I);
3624
3625 unsigned getNumSuccessors() const { return getNumOperands()/2; }
3626 BasicBlock *getSuccessor(unsigned idx) const {
3627 assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!");
3628 return cast<BasicBlock>(Val: getOperand(idx*2+1));
3629 }
3630 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3631 assert(idx < getNumSuccessors() && "Successor # out of range for switch!");
3632 setOperand(idx * 2 + 1, NewSucc);
3633 }
3634
3635 // Methods for support type inquiry through isa, cast, and dyn_cast:
3636 static bool classof(const Instruction *I) {
3637 return I->getOpcode() == Instruction::Switch;
3638 }
3639 static bool classof(const Value *V) {
3640 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
3641 }
3642};
3643
3644/// A wrapper class to simplify modification of SwitchInst cases along with
3645/// their prof branch_weights metadata.
3646class SwitchInstProfUpdateWrapper {
3647 SwitchInst &SI;
3648 std::optional<SmallVector<uint32_t, 8>> Weights;
3649 bool Changed = false;
3650
3651protected:
3652 MDNode *buildProfBranchWeightsMD();
3653
3654 void init();
3655
3656public:
3657 using CaseWeightOpt = std::optional<uint32_t>;
3658 SwitchInst *operator->() { return &SI; }
3659 SwitchInst &operator*() { return SI; }
3660 operator SwitchInst *() { return &SI; }
3661
3662 SwitchInstProfUpdateWrapper(SwitchInst &SI) : SI(SI) { init(); }
3663
3664 ~SwitchInstProfUpdateWrapper() {
3665 if (Changed)
3666 SI.setMetadata(KindID: LLVMContext::MD_prof, Node: buildProfBranchWeightsMD());
3667 }
3668
3669 /// Delegate the call to the underlying SwitchInst::removeCase() and remove
3670 /// correspondent branch weight.
3671 SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I);
3672
3673 /// Delegate the call to the underlying SwitchInst::addCase() and set the
3674 /// specified branch weight for the added case.
3675 void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W);
3676
3677 /// Delegate the call to the underlying SwitchInst::eraseFromParent() and mark
3678 /// this object to not touch the underlying SwitchInst in destructor.
3679 Instruction::InstListType::iterator eraseFromParent();
3680
3681 void setSuccessorWeight(unsigned idx, CaseWeightOpt W);
3682 CaseWeightOpt getSuccessorWeight(unsigned idx);
3683
3684 static CaseWeightOpt getSuccessorWeight(const SwitchInst &SI, unsigned idx);
3685};
3686
3687template <>
3688struct OperandTraits<SwitchInst> : public HungoffOperandTraits<2> {
3689};
3690
3691DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SwitchInst, Value)
3692
3693//===----------------------------------------------------------------------===//
3694// IndirectBrInst Class
3695//===----------------------------------------------------------------------===//
3696
3697//===---------------------------------------------------------------------------
3698/// Indirect Branch Instruction.
3699///
3700class IndirectBrInst : public Instruction {
3701 unsigned ReservedSpace;
3702
3703 // Operand[0] = Address to jump to
3704 // Operand[n+1] = n-th destination
3705 IndirectBrInst(const IndirectBrInst &IBI);
3706
3707 /// Create a new indirectbr instruction, specifying an
3708 /// Address to jump to. The number of expected destinations can be specified
3709 /// here to make memory allocation more efficient. This constructor can also
3710 /// autoinsert before another instruction.
3711 IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore);
3712
3713 /// Create a new indirectbr instruction, specifying an
3714 /// Address to jump to. The number of expected destinations can be specified
3715 /// here to make memory allocation more efficient. This constructor also
3716 /// autoinserts at the end of the specified BasicBlock.
3717 IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd);
3718
3719 // allocate space for exactly zero operands
3720 void *operator new(size_t S) { return User::operator new(Size: S); }
3721
3722 void init(Value *Address, unsigned NumDests);
3723 void growOperands();
3724
3725protected:
3726 // Note: Instruction needs to be a friend here to call cloneImpl.
3727 friend class Instruction;
3728
3729 IndirectBrInst *cloneImpl() const;
3730
3731public:
3732 void operator delete(void *Ptr) { User::operator delete(Usr: Ptr); }
3733
3734 /// Iterator type that casts an operand to a basic block.
3735 ///
3736 /// This only makes sense because the successors are stored as adjacent
3737 /// operands for indirectbr instructions.
3738 struct succ_op_iterator
3739 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3740 std::random_access_iterator_tag, BasicBlock *,
3741 ptrdiff_t, BasicBlock *, BasicBlock *> {
3742 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {}
3743
3744 BasicBlock *operator*() const { return cast<BasicBlock>(Val: *I); }
3745 BasicBlock *operator->() const { return operator*(); }
3746 };
3747
3748 /// The const version of `succ_op_iterator`.
3749 struct const_succ_op_iterator
3750 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3751 std::random_access_iterator_tag,
3752 const BasicBlock *, ptrdiff_t, const BasicBlock *,
3753 const BasicBlock *> {
3754 explicit const_succ_op_iterator(const_value_op_iterator I)
3755 : iterator_adaptor_base(I) {}
3756
3757 const BasicBlock *operator*() const { return cast<BasicBlock>(Val: *I); }
3758 const BasicBlock *operator->() const { return operator*(); }
3759 };
3760
3761 static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3762 Instruction *InsertBefore = nullptr) {
3763 return new IndirectBrInst(Address, NumDests, InsertBefore);
3764 }
3765
3766 static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3767 BasicBlock *InsertAtEnd) {
3768 return new IndirectBrInst(Address, NumDests, InsertAtEnd);
3769 }
3770
3771 /// Provide fast operand accessors.
3772 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
3773
3774 // Accessor Methods for IndirectBrInst instruction.
3775 Value *getAddress() { return getOperand(0); }
3776 const Value *getAddress() const { return getOperand(0); }
3777 void setAddress(Value *V) { setOperand(0, V); }
3778
3779 /// return the number of possible destinations in this
3780 /// indirectbr instruction.
3781 unsigned getNumDestinations() const { return getNumOperands()-1; }
3782
3783 /// Return the specified destination.
3784 BasicBlock *getDestination(unsigned i) { return getSuccessor(i); }
3785 const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); }
3786
3787 /// Add a destination.
3788 ///
3789 void addDestination(BasicBlock *Dest);
3790
3791 /// This method removes the specified successor from the
3792 /// indirectbr instruction.
3793 void removeDestination(unsigned i);
3794
3795 unsigned getNumSuccessors() const { return getNumOperands()-1; }
3796 BasicBlock *getSuccessor(unsigned i) const {
3797 return cast<BasicBlock>(Val: getOperand(i+1));
3798 }
3799 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3800 setOperand(i + 1, NewSucc);
3801 }
3802
3803 iterator_range<succ_op_iterator> successors() {
3804 return make_range(x: succ_op_iterator(std::next(x: value_op_begin())),
3805 y: succ_op_iterator(value_op_end()));
3806 }
3807
3808 iterator_range<const_succ_op_iterator> successors() const {
3809 return make_range(x: const_succ_op_iterator(std::next(x: value_op_begin())),
3810 y: const_succ_op_iterator(value_op_end()));
3811 }
3812
3813 // Methods for support type inquiry through isa, cast, and dyn_cast:
3814 static bool classof(const Instruction *I) {
3815 return I->getOpcode() == Instruction::IndirectBr;
3816 }
3817 static bool classof(const Value *V) {
3818 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
3819 }
3820};
3821
3822template <>
3823struct OperandTraits<IndirectBrInst> : public HungoffOperandTraits<1> {
3824};
3825
3826DEFINE_TRANSPARENT_OPERAND_ACCESSORS(IndirectBrInst, Value)
3827
3828//===----------------------------------------------------------------------===//
3829// InvokeInst Class
3830//===----------------------------------------------------------------------===//
3831
3832/// Invoke instruction. The SubclassData field is used to hold the
3833/// calling convention of the call.
3834///
3835class InvokeInst : public CallBase {
3836 /// The number of operands for this call beyond the called function,
3837 /// arguments, and operand bundles.
3838 static constexpr int NumExtraOperands = 2;
3839
3840 /// The index from the end of the operand array to the normal destination.
3841 static constexpr int NormalDestOpEndIdx = -3;
3842
3843 /// The index from the end of the operand array to the unwind destination.
3844 static constexpr int UnwindDestOpEndIdx = -2;
3845
3846 InvokeInst(const InvokeInst &BI);
3847
3848 /// Construct an InvokeInst given a range of arguments.
3849 ///
3850 /// Construct an InvokeInst from a range of arguments
3851 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3852 BasicBlock *IfException, ArrayRef<Value *> Args,
3853 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3854 const Twine &NameStr, Instruction *InsertBefore);
3855
3856 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3857 BasicBlock *IfException, ArrayRef<Value *> Args,
3858 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3859 const Twine &NameStr, BasicBlock *InsertAtEnd);
3860
3861 void init(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3862 BasicBlock *IfException, ArrayRef<Value *> Args,
3863 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
3864
3865 /// Compute the number of operands to allocate.
3866 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
3867 // We need one operand for the called function, plus our extra operands and
3868 // the input operand counts provided.
3869 return 1 + NumExtraOperands + NumArgs + NumBundleInputs;
3870 }
3871
3872protected:
3873 // Note: Instruction needs to be a friend here to call cloneImpl.
3874 friend class Instruction;
3875
3876 InvokeInst *cloneImpl() const;
3877
3878public:
3879 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3880 BasicBlock *IfException, ArrayRef<Value *> Args,
3881 const Twine &NameStr,
3882 Instruction *InsertBefore = nullptr) {
3883 int NumOperands = ComputeNumOperands(NumArgs: Args.size());
3884 return new (NumOperands)
3885 InvokeInst(Ty, Func, IfNormal, IfException, Args, std::nullopt,
3886 NumOperands, NameStr, InsertBefore);
3887 }
3888
3889 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3890 BasicBlock *IfException, ArrayRef<Value *> Args,
3891 ArrayRef<OperandBundleDef> Bundles = std::nullopt,
3892 const Twine &NameStr = "",
3893 Instruction *InsertBefore = nullptr) {
3894 int NumOperands =
3895 ComputeNumOperands(NumArgs: Args.size(), NumBundleInputs: CountBundleInputs(Bundles));
3896 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3897
3898 return new (NumOperands, DescriptorBytes)
3899 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
3900 NameStr, InsertBefore);
3901 }
3902
3903 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3904 BasicBlock *IfException, ArrayRef<Value *> Args,
3905 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3906 int NumOperands = ComputeNumOperands(NumArgs: Args.size());
3907 return new (NumOperands)
3908 InvokeInst(Ty, Func, IfNormal, IfException, Args, std::nullopt,
3909 NumOperands, NameStr, InsertAtEnd);
3910 }
3911
3912 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3913 BasicBlock *IfException, ArrayRef<Value *> Args,
3914 ArrayRef<OperandBundleDef> Bundles,
3915 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3916 int NumOperands =
3917 ComputeNumOperands(NumArgs: Args.size(), NumBundleInputs: CountBundleInputs(Bundles));
3918 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3919
3920 return new (NumOperands, DescriptorBytes)
3921 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
3922 NameStr, InsertAtEnd);
3923 }
3924
3925 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3926 BasicBlock *IfException, ArrayRef<Value *> Args,
3927 const Twine &NameStr,
3928 Instruction *InsertBefore = nullptr) {
3929 return Create(Ty: Func.getFunctionType(), Func: Func.getCallee(), IfNormal,
3930 IfException, Args, Bundles: std::nullopt, NameStr, InsertBefore);
3931 }
3932
3933 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3934 BasicBlock *IfException, ArrayRef<Value *> Args,
3935 ArrayRef<OperandBundleDef> Bundles = std::nullopt,
3936 const Twine &NameStr = "",
3937 Instruction *InsertBefore = nullptr) {
3938 return Create(Ty: Func.getFunctionType(), Func: Func.getCallee(), IfNormal,
3939 IfException, Args, Bundles, NameStr, InsertBefore);
3940 }
3941
3942 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3943 BasicBlock *IfException, ArrayRef<Value *> Args,
3944 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3945 return Create(Ty: Func.getFunctionType(), Func: Func.getCallee(), IfNormal,
3946 IfException, Args, NameStr, InsertAtEnd);
3947 }
3948
3949 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3950 BasicBlock *IfException, ArrayRef<Value *> Args,
3951 ArrayRef<OperandBundleDef> Bundles,
3952 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3953 return Create(Ty: Func.getFunctionType(), Func: Func.getCallee(), IfNormal,
3954 IfException, Args, Bundles, NameStr, InsertAtEnd);
3955 }
3956
3957 /// Create a clone of \p II with a different set of operand bundles and
3958 /// insert it before \p InsertPt.
3959 ///
3960 /// The returned invoke instruction is identical to \p II in every way except
3961 /// that the operand bundles for the new instruction are set to the operand
3962 /// bundles in \p Bundles.
3963 static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles,
3964 Instruction *InsertPt = nullptr);
3965
3966 // get*Dest - Return the destination basic blocks...
3967 BasicBlock *getNormalDest() const {
3968 return cast<BasicBlock>(Val: Op<NormalDestOpEndIdx>());
3969 }
3970 BasicBlock *getUnwindDest() const {
3971 return cast<BasicBlock>(Val: Op<UnwindDestOpEndIdx>());
3972 }
3973 void setNormalDest(BasicBlock *B) {
3974 Op<NormalDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3975 }
3976 void setUnwindDest(BasicBlock *B) {
3977 Op<UnwindDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3978 }
3979
3980 /// Get the landingpad instruction from the landing pad
3981 /// block (the unwind destination).
3982 LandingPadInst *getLandingPadInst() const;
3983
3984 BasicBlock *getSuccessor(unsigned i) const {
3985 assert(i < 2 && "Successor # out of range for invoke!");
3986 return i == 0 ? getNormalDest() : getUnwindDest();
3987 }
3988
3989 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3990 assert(i < 2 && "Successor # out of range for invoke!");
3991 if (i == 0)
3992 setNormalDest(NewSucc);
3993 else
3994 setUnwindDest(NewSucc);
3995 }
3996
3997 unsigned getNumSuccessors() const { return 2; }
3998
3999 // Methods for support type inquiry through isa, cast, and dyn_cast:
4000 static bool classof(const Instruction *I) {
4001 return (I->getOpcode() == Instruction::Invoke);
4002 }
4003 static bool classof(const Value *V) {
4004 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
4005 }
4006
4007private:
4008 // Shadow Instruction::setInstructionSubclassData with a private forwarding
4009 // method so that subclasses cannot accidentally use it.
4010 template <typename Bitfield>
4011 void setSubclassData(typename Bitfield::Type Value) {
4012 Instruction::setSubclassData<Bitfield>(Value);
4013 }
4014};
4015
4016InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
4017 BasicBlock *IfException, ArrayRef<Value *> Args,
4018 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4019 const Twine &NameStr, Instruction *InsertBefore)
4020 : CallBase(Ty->getReturnType(), Instruction::Invoke,
4021 OperandTraits<CallBase>::op_end(U: this) - NumOperands, NumOperands,
4022 InsertBefore) {
4023 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
4024}
4025
4026InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
4027 BasicBlock *IfException, ArrayRef<Value *> Args,
4028 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4029 const Twine &NameStr, BasicBlock *InsertAtEnd)
4030 : CallBase(Ty->getReturnType(), Instruction::Invoke,
4031 OperandTraits<CallBase>::op_end(U: this) - NumOperands, NumOperands,
4032 InsertAtEnd) {
4033 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
4034}
4035
4036//===----------------------------------------------------------------------===//
4037// CallBrInst Class
4038//===----------------------------------------------------------------------===//
4039
4040/// CallBr instruction, tracking function calls that may not return control but
4041/// instead transfer it to a third location. The SubclassData field is used to
4042/// hold the calling convention of the call.
4043///
4044class CallBrInst : public CallBase {
4045
4046 unsigned NumIndirectDests;
4047
4048 CallBrInst(const CallBrInst &BI);
4049
4050 /// Construct a CallBrInst given a range of arguments.
4051 ///
4052 /// Construct a CallBrInst from a range of arguments
4053 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4054 ArrayRef<BasicBlock *> IndirectDests,
4055 ArrayRef<Value *> Args,
4056 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4057 const Twine &NameStr, Instruction *InsertBefore);
4058
4059 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4060 ArrayRef<BasicBlock *> IndirectDests,
4061 ArrayRef<Value *> Args,
4062 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4063 const Twine &NameStr, BasicBlock *InsertAtEnd);
4064
4065 void init(FunctionType *FTy, Value *Func, BasicBlock *DefaultDest,
4066 ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args,
4067 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
4068
4069 /// Compute the number of operands to allocate.
4070 static int ComputeNumOperands(int NumArgs, int NumIndirectDests,
4071 int NumBundleInputs = 0) {
4072 // We need one operand for the called function, plus our extra operands and
4073 // the input operand counts provided.
4074 return 2 + NumIndirectDests + NumArgs + NumBundleInputs;
4075 }
4076
4077protected:
4078 // Note: Instruction needs to be a friend here to call cloneImpl.
4079 friend class Instruction;
4080
4081 CallBrInst *cloneImpl() const;
4082
4083public:
4084 static CallBrInst *Create(FunctionType *Ty, Value *Func,
4085 BasicBlock *DefaultDest,
4086 ArrayRef<BasicBlock *> IndirectDests,
4087 ArrayRef<Value *> Args, const Twine &NameStr,
4088 Instruction *InsertBefore = nullptr) {
4089 int NumOperands = ComputeNumOperands(NumArgs: Args.size(), NumIndirectDests: IndirectDests.size());
4090 return new (NumOperands)
4091 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, std::nullopt,
4092 NumOperands, NameStr, InsertBefore);
4093 }
4094
4095 static CallBrInst *
4096 Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4097 ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args,
4098 ArrayRef<OperandBundleDef> Bundles = std::nullopt,
4099 const Twine &NameStr = "", Instruction *InsertBefore = nullptr) {
4100 int NumOperands = ComputeNumOperands(NumArgs: Args.size(), NumIndirectDests: IndirectDests.size(),
4101 NumBundleInputs: CountBundleInputs(Bundles));
4102 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
4103
4104 return new (NumOperands, DescriptorBytes)
4105 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
4106 NumOperands, NameStr, InsertBefore);
4107 }
4108
4109 static CallBrInst *Create(FunctionType *Ty, Value *Func,
4110 BasicBlock *DefaultDest,
4111 ArrayRef<BasicBlock *> IndirectDests,
4112 ArrayRef<Value *> Args, const Twine &NameStr,
4113 BasicBlock *InsertAtEnd) {
4114 int NumOperands = ComputeNumOperands(NumArgs: Args.size(), NumIndirectDests: IndirectDests.size());
4115 return new (NumOperands)
4116 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, std::nullopt,
4117 NumOperands, NameStr, InsertAtEnd);
4118 }
4119
4120 static CallBrInst *Create(FunctionType *Ty, Value *Func,
4121 BasicBlock *DefaultDest,
4122 ArrayRef<BasicBlock *> IndirectDests,
4123 ArrayRef<Value *> Args,
4124 ArrayRef<OperandBundleDef> Bundles,
4125 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4126 int NumOperands = ComputeNumOperands(NumArgs: Args.size(), NumIndirectDests: IndirectDests.size(),
4127 NumBundleInputs: CountBundleInputs(Bundles));
4128 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
4129
4130 return new (NumOperands, DescriptorBytes)
4131 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
4132 NumOperands, NameStr, InsertAtEnd);
4133 }
4134
4135 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4136 ArrayRef<BasicBlock *> IndirectDests,
4137 ArrayRef<Value *> Args, const Twine &NameStr,
4138 Instruction *InsertBefore = nullptr) {
4139 return Create(Ty: Func.getFunctionType(), Func: Func.getCallee(), DefaultDest,
4140 IndirectDests, Args, NameStr, InsertBefore);
4141 }
4142
4143 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4144 ArrayRef<BasicBlock *> IndirectDests,
4145 ArrayRef<Value *> Args,
4146 ArrayRef<OperandBundleDef> Bundles = std::nullopt,
4147 const Twine &NameStr = "",
4148 Instruction *InsertBefore = nullptr) {
4149 return Create(Ty: Func.getFunctionType(), Func: Func.getCallee(), DefaultDest,
4150 IndirectDests, Args, Bundles, NameStr, InsertBefore);
4151 }
4152
4153 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4154 ArrayRef<BasicBlock *> IndirectDests,
4155 ArrayRef<Value *> Args, const Twine &NameStr,
4156 BasicBlock *InsertAtEnd) {
4157 return Create(Ty: Func.getFunctionType(), Func: Func.getCallee(), DefaultDest,
4158 IndirectDests, Args, NameStr, InsertAtEnd);
4159 }
4160
4161 static CallBrInst *Create(FunctionCallee Func,
4162 BasicBlock *DefaultDest,
4163 ArrayRef<BasicBlock *> IndirectDests,
4164 ArrayRef<Value *> Args,
4165 ArrayRef<OperandBundleDef> Bundles,
4166 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4167 return Create(Ty: Func.getFunctionType(), Func: Func.getCallee(), DefaultDest,
4168 IndirectDests, Args, Bundles, NameStr, InsertAtEnd);
4169 }
4170
4171 /// Create a clone of \p CBI with a different set of operand bundles and
4172 /// insert it before \p InsertPt.
4173 ///
4174 /// The returned callbr instruction is identical to \p CBI in every way
4175 /// except that the operand bundles for the new instruction are set to the
4176 /// operand bundles in \p Bundles.
4177 static CallBrInst *Create(CallBrInst *CBI,
4178 ArrayRef<OperandBundleDef> Bundles,
4179 Instruction *InsertPt = nullptr);
4180
4181 /// Return the number of callbr indirect dest labels.
4182 ///
4183 unsigned getNumIndirectDests() const { return NumIndirectDests; }
4184
4185 /// getIndirectDestLabel - Return the i-th indirect dest label.
4186 ///
4187 Value *getIndirectDestLabel(unsigned i) const {
4188 assert(i < getNumIndirectDests() && "Out of bounds!");
4189 return getOperand(i_nocapture: i + arg_size() + getNumTotalBundleOperands() + 1);
4190 }
4191
4192 Value *getIndirectDestLabelUse(unsigned i) const {
4193 assert(i < getNumIndirectDests() && "Out of bounds!");
4194 return getOperandUse(i: i + arg_size() + getNumTotalBundleOperands() + 1);
4195 }
4196
4197 // Return the destination basic blocks...
4198 BasicBlock *getDefaultDest() const {
4199 return cast<BasicBlock>(Val: *(&Op<-1>() - getNumIndirectDests() - 1));
4200 }
4201 BasicBlock *getIndirectDest(unsigned i) const {
4202 return cast_or_null<BasicBlock>(Val: *(&Op<-1>() - getNumIndirectDests() + i));
4203 }
4204 SmallVector<BasicBlock *, 16> getIndirectDests() const {
4205 SmallVector<BasicBlock *, 16> IndirectDests;
4206 for (unsigned i = 0, e = getNumIndirectDests(); i < e; ++i)
4207 IndirectDests.push_back(Elt: getIndirectDest(i));
4208 return IndirectDests;
4209 }
4210 void setDefaultDest(BasicBlock *B) {
4211 *(&Op<-1>() - getNumIndirectDests() - 1) = reinterpret_cast<Value *>(B);
4212 }
4213 void setIndirectDest(unsigned i, BasicBlock *B) {
4214 *(&Op<-1>() - getNumIndirectDests() + i) = reinterpret_cast<Value *>(B);
4215 }
4216
4217 BasicBlock *getSuccessor(unsigned i) const {
4218 assert(i < getNumSuccessors() + 1 &&
4219 "Successor # out of range for callbr!");
4220 return i == 0 ? getDefaultDest() : getIndirectDest(i: i - 1);
4221 }
4222
4223 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
4224 assert(i < getNumIndirectDests() + 1 &&
4225 "Successor # out of range for callbr!");
4226 return i == 0 ? setDefaultDest(NewSucc) : setIndirectDest(i: i - 1, B: NewSucc);
4227 }
4228
4229 unsigned getNumSuccessors() const { return getNumIndirectDests() + 1; }
4230
4231 // Methods for support type inquiry through isa, cast, and dyn_cast:
4232 static bool classof(const Instruction *I) {
4233 return (I->getOpcode() == Instruction::CallBr);
4234 }
4235 static bool classof(const Value *V) {
4236 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
4237 }
4238
4239private:
4240 // Shadow Instruction::setInstructionSubclassData with a private forwarding
4241 // method so that subclasses cannot accidentally use it.
4242 template <typename Bitfield>
4243 void setSubclassData(typename Bitfield::Type Value) {
4244 Instruction::setSubclassData<Bitfield>(Value);
4245 }
4246};
4247
4248CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4249 ArrayRef<BasicBlock *> IndirectDests,
4250 ArrayRef<Value *> Args,
4251 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4252 const Twine &NameStr, Instruction *InsertBefore)
4253 : CallBase(Ty->getReturnType(), Instruction::CallBr,
4254 OperandTraits<CallBase>::op_end(U: this) - NumOperands, NumOperands,
4255 InsertBefore) {
4256 init(FTy: Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4257}
4258
4259CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4260 ArrayRef<BasicBlock *> IndirectDests,
4261 ArrayRef<Value *> Args,
4262 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4263 const Twine &NameStr, BasicBlock *InsertAtEnd)
4264 : CallBase(Ty->getReturnType(), Instruction::CallBr,
4265 OperandTraits<CallBase>::op_end(U: this) - NumOperands, NumOperands,
4266 InsertAtEnd) {
4267 init(FTy: Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4268}
4269
4270//===----------------------------------------------------------------------===//
4271// ResumeInst Class
4272//===----------------------------------------------------------------------===//
4273
4274//===---------------------------------------------------------------------------
4275/// Resume the propagation of an exception.
4276///
4277class ResumeInst : public Instruction {
4278 ResumeInst(const ResumeInst &RI);
4279
4280 explicit ResumeInst(Value *Exn, Instruction *InsertBefore=nullptr);
4281 ResumeInst(Value *Exn, BasicBlock *InsertAtEnd);
4282
4283protected:
4284 // Note: Instruction needs to be a friend here to call cloneImpl.
4285 friend class Instruction;
4286
4287 ResumeInst *cloneImpl() const;
4288
4289public:
4290 static ResumeInst *Create(Value *Exn, Instruction *InsertBefore = nullptr) {
4291 return new(1) ResumeInst(Exn, InsertBefore);
4292 }
4293
4294 static ResumeInst *Create(Value *Exn, BasicBlock *InsertAtEnd) {
4295 return new(1) ResumeInst(Exn, InsertAtEnd);
4296 }
4297
4298 /// Provide fast operand accessors
4299 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
4300
4301 /// Convenience accessor.
4302 Value *getValue() const { return Op<0>(); }
4303
4304 unsigned getNumSuccessors() const { return 0; }
4305
4306 // Methods for support type inquiry through isa, cast, and dyn_cast:
4307 static bool classof(const Instruction *I) {
4308 return I->getOpcode() == Instruction::Resume;
4309 }
4310 static bool classof(const Value *V) {
4311 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
4312 }
4313
4314private:
4315 BasicBlock *getSuccessor(unsigned idx) const {
4316 llvm_unreachable("ResumeInst has no successors!");
4317 }
4318
4319 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
4320 llvm_unreachable("ResumeInst has no successors!");
4321 }
4322};
4323
4324template <>
4325struct OperandTraits<ResumeInst> :
4326 public FixedNumOperandTraits<ResumeInst, 1> {
4327};
4328
4329DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ResumeInst, Value)
4330
4331//===----------------------------------------------------------------------===//
4332// CatchSwitchInst Class
4333//===----------------------------------------------------------------------===//
4334class CatchSwitchInst : public Instruction {
4335 using UnwindDestField = BoolBitfieldElementT<0>;
4336
4337 /// The number of operands actually allocated. NumOperands is
4338 /// the number actually in use.
4339 unsigned ReservedSpace;
4340
4341 // Operand[0] = Outer scope
4342 // Operand[1] = Unwind block destination
4343 // Operand[n] = BasicBlock to go to on match
4344 CatchSwitchInst(const CatchSwitchInst &CSI);
4345
4346 /// Create a new switch instruction, specifying a
4347 /// default destination. The number of additional handlers can be specified
4348 /// here to make memory allocation more efficient.
4349 /// This constructor can also autoinsert before another instruction.
4350 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4351 unsigned NumHandlers, const Twine &NameStr,
4352 Instruction *InsertBefore);
4353
4354 /// Create a new switch instruction, specifying a
4355 /// default destination. The number of additional handlers can be specified
4356 /// here to make memory allocation more efficient.
4357 /// This constructor also autoinserts at the end of the specified BasicBlock.
4358 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4359 unsigned NumHandlers, const Twine &NameStr,
4360 BasicBlock *InsertAtEnd);
4361
4362 // allocate space for exactly zero operands
4363 void *operator new(size_t S) { return User::operator new(Size: S); }
4364
4365 void init(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumReserved);
4366 void growOperands(unsigned Size);
4367
4368protected:
4369 // Note: Instruction needs to be a friend here to call cloneImpl.
4370 friend class Instruction;
4371
4372 CatchSwitchInst *cloneImpl() const;
4373
4374public:
4375 void operator delete(void *Ptr) { return User::operator delete(Usr: Ptr); }
4376
4377 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4378 unsigned NumHandlers,
4379 const Twine &NameStr = "",
4380 Instruction *InsertBefore = nullptr) {
4381 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4382 InsertBefore);
4383 }
4384
4385 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4386 unsigned NumHandlers, const Twine &NameStr,
4387 BasicBlock *InsertAtEnd) {
4388 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4389 InsertAtEnd);
4390 }
4391
4392 /// Provide fast operand accessors
4393 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
4394
4395 // Accessor Methods for CatchSwitch stmt
4396 Value *getParentPad() const { return getOperand(0); }
4397 void setParentPad(Value *ParentPad) { setOperand(0, ParentPad); }
4398
4399 // Accessor Methods for CatchSwitch stmt
4400 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); }
4401 bool unwindsToCaller() const { return !hasUnwindDest(); }
4402 BasicBlock *getUnwindDest() const {
4403 if (hasUnwindDest())
4404 return cast<BasicBlock>(Val: getOperand(1));
4405 return nullptr;
4406 }
4407 void setUnwindDest(BasicBlock *UnwindDest) {
4408 assert(UnwindDest);
4409 assert(hasUnwindDest());
4410 setOperand(1, UnwindDest);
4411 }
4412
4413 /// return the number of 'handlers' in this catchswitch
4414 /// instruction, except the default handler
4415 unsigned getNumHandlers() const {
4416 if (hasUnwindDest())
4417 return getNumOperands() - 2;
4418 return getNumOperands() - 1;
4419 }
4420
4421private:
4422 static BasicBlock *handler_helper(Value *V) { return cast<BasicBlock>(Val: V); }
4423 static const BasicBlock *handler_helper(const Value *V) {
4424 return cast<BasicBlock>(Val: V);
4425 }
4426
4427public:
4428 using DerefFnTy = BasicBlock *(*)(Value *);
4429 using handler_iterator = mapped_iterator<op_iterator, DerefFnTy>;
4430 using handler_range = iterator_range<handler_iterator>;
4431 using ConstDerefFnTy = const BasicBlock *(*)(const Value *);
4432 using const_handler_iterator =
4433 mapped_iterator<const_op_iterator, ConstDerefFnTy>;
4434 using const_handler_range = iterator_range<const_handler_iterator>;
4435
4436 /// Returns an iterator that points to the first handler in CatchSwitchInst.
4437 handler_iterator handler_begin() {
4438 op_iterator It = op_begin() + 1;
4439 if (hasUnwindDest())
4440 ++It;
4441 return handler_iterator(It, DerefFnTy(handler_helper));
4442 }
4443
4444 /// Returns an iterator that points to the first handler in the
4445 /// CatchSwitchInst.
4446 const_handler_iterator handler_begin() const {
4447 const_op_iterator It = op_begin() + 1;
4448 if (hasUnwindDest())
4449 ++It;
4450 return const_handler_iterator(It, ConstDerefFnTy(handler_helper));
4451 }
4452
4453 /// Returns a read-only iterator that points one past the last
4454 /// handler in the CatchSwitchInst.
4455 handler_iterator handler_end() {
4456 return handler_iterator(op_end(), DerefFnTy(handler_helper));
4457 }
4458
4459 /// Returns an iterator that points one past the last handler in the
4460 /// CatchSwitchInst.
4461 const_handler_iterator handler_end() const {
4462 return const_handler_iterator(op_end(), ConstDerefFnTy(handler_helper));
4463 }
4464
4465 /// iteration adapter for range-for loops.
4466 handler_range handlers() {
4467 return make_range(x: handler_begin(), y: handler_end());
4468 }
4469
4470 /// iteration adapter for range-for loops.
4471 const_handler_range handlers() const {
4472 return make_range(x: handler_begin(), y: handler_end());
4473 }
4474
4475 /// Add an entry to the switch instruction...
4476 /// Note:
4477 /// This action invalidates handler_end(). Old handler_end() iterator will
4478 /// point to the added handler.
4479 void addHandler(BasicBlock *Dest);
4480
4481 void removeHandler(handler_iterator HI);
4482
4483 unsigned getNumSuccessors() const { return getNumOperands() - 1; }
4484 BasicBlock *getSuccessor(unsigned Idx) const {
4485 assert(Idx < getNumSuccessors() &&
4486 "Successor # out of range for catchswitch!");
4487 return cast<BasicBlock>(Val: getOperand(Idx + 1));
4488 }
4489 void setSuccessor(unsigned Idx, BasicBlock *NewSucc) {
4490 assert(Idx < getNumSuccessors() &&
4491 "Successor # out of range for catchswitch!");
4492 setOperand(Idx + 1, NewSucc);
4493 }
4494
4495 // Methods for support type inquiry through isa, cast, and dyn_cast:
4496 static bool classof(const Instruction *I) {
4497 return I->getOpcode() == Instruction::CatchSwitch;
4498 }
4499 static bool classof(const Value *V) {
4500 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
4501 }
4502};
4503
4504template <>
4505struct OperandTraits<CatchSwitchInst> : public HungoffOperandTraits<2> {};
4506
4507DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchSwitchInst, Value)
4508
4509//===----------------------------------------------------------------------===//
4510// CleanupPadInst Class
4511//===----------------------------------------------------------------------===//
4512class CleanupPadInst : public FuncletPadInst {
4513private:
4514 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4515 unsigned Values, const Twine &NameStr,
4516 Instruction *InsertBefore)
4517 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
4518 NameStr, InsertBefore) {}
4519 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4520 unsigned Values, const Twine &NameStr,
4521 BasicBlock *InsertAtEnd)
4522 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
4523 NameStr, InsertAtEnd) {}
4524
4525public:
4526 static CleanupPadInst *Create(Value *ParentPad,
4527 ArrayRef<Value *> Args = std::nullopt,
4528 const Twine &NameStr = "",
4529 Instruction *InsertBefore = nullptr) {
4530 unsigned Values = 1 + Args.size();
4531 return new (Values)
4532 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertBefore);
4533 }
4534
4535 static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args,
4536 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4537 unsigned Values = 1 + Args.size();
4538 return new (Values)
4539 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertAtEnd);
4540 }
4541
4542 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4543 static bool classof(const Instruction *I) {
4544 return I->getOpcode() == Instruction::CleanupPad;
4545 }
4546 static bool classof(const Value *V) {
4547 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
4548 }
4549};
4550
4551//===----------------------------------------------------------------------===//
4552// CatchPadInst Class
4553//===----------------------------------------------------------------------===//
4554class CatchPadInst : public FuncletPadInst {
4555private:
4556 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
4557 unsigned Values, const Twine &NameStr,
4558 Instruction *InsertBefore)
4559 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
4560 NameStr, InsertBefore) {}
4561 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
4562 unsigned Values, const Twine &NameStr,
4563 BasicBlock *InsertAtEnd)
4564 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
4565 NameStr, InsertAtEnd) {}
4566
4567public:
4568 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args,
4569 const Twine &NameStr = "",
4570 Instruction *InsertBefore = nullptr) {
4571 unsigned Values = 1 + Args.size();
4572 return new (Values)
4573 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertBefore);
4574 }
4575
4576 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args,
4577 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4578 unsigned Values = 1 + Args.size();
4579 return new (Values)
4580 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertAtEnd);
4581 }
4582
4583 /// Convenience accessors
4584 CatchSwitchInst *getCatchSwitch() const {
4585 return cast<CatchSwitchInst>(Val: Op<-1>());
4586 }
4587 void setCatchSwitch(Value *CatchSwitch) {
4588 assert(CatchSwitch);
4589 Op<-1>() = CatchSwitch;
4590 }
4591
4592 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4593 static bool classof(const Instruction *I) {
4594 return I->getOpcode() == Instruction::CatchPad;
4595 }
4596 static bool classof(const Value *V) {
4597 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
4598 }
4599};
4600
4601//===----------------------------------------------------------------------===//
4602// CatchReturnInst Class
4603//===----------------------------------------------------------------------===//
4604
4605class CatchReturnInst : public Instruction {
4606 CatchReturnInst(const CatchReturnInst &RI);
4607 CatchReturnInst(Value *CatchPad, BasicBlock *BB, Instruction *InsertBefore);
4608 CatchReturnInst(Value *CatchPad, BasicBlock *BB, BasicBlock *InsertAtEnd);
4609
4610 void init(Value *CatchPad, BasicBlock *BB);
4611
4612protected:
4613 // Note: Instruction needs to be a friend here to call cloneImpl.
4614 friend class Instruction;
4615
4616 CatchReturnInst *cloneImpl() const;
4617
4618public:
4619 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB,
4620 Instruction *InsertBefore = nullptr) {
4621 assert(CatchPad);
4622 assert(BB);
4623 return new (2) CatchReturnInst(CatchPad, BB, InsertBefore);
4624 }
4625
4626 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB,
4627 BasicBlock *InsertAtEnd) {
4628 assert(CatchPad);
4629 assert(BB);
4630 return new (2) CatchReturnInst(CatchPad, BB, InsertAtEnd);
4631 }
4632
4633 /// Provide fast operand accessors
4634 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
4635
4636 /// Convenience accessors.
4637 CatchPadInst *getCatchPad() const { return cast<CatchPadInst>(Val: Op<0>()); }
4638 void setCatchPad(CatchPadInst *CatchPad) {
4639 assert(CatchPad);
4640 Op<0>() = CatchPad;
4641 }
4642
4643 BasicBlock *getSuccessor() const { return cast<BasicBlock>(Val: Op<1>()); }
4644 void setSuccessor(BasicBlock *NewSucc) {
4645 assert(NewSucc);
4646 Op<1>() = NewSucc;
4647 }
4648 unsigned getNumSuccessors() const { return 1; }
4649
4650 /// Get the parentPad of this catchret's catchpad's catchswitch.
4651 /// The successor block is implicitly a member of this funclet.
4652 Value *getCatchSwitchParentPad() const {
4653 return getCatchPad()->getCatchSwitch()->getParentPad();
4654 }
4655
4656 // Methods for support type inquiry through isa, cast, and dyn_cast:
4657 static bool classof(const Instruction *I) {
4658 return (I->getOpcode() == Instruction::CatchRet);
4659 }
4660 static bool classof(const Value *V) {
4661 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
4662 }
4663
4664private:
4665 BasicBlock *getSuccessor(unsigned Idx) const {
4666 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!");
4667 return getSuccessor();
4668 }
4669
4670 void setSuccessor(unsigned Idx, BasicBlock *B) {
4671 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!");
4672 setSuccessor(B);
4673 }
4674};
4675
4676template <>
4677struct OperandTraits<CatchReturnInst>
4678 : public FixedNumOperandTraits<CatchReturnInst, 2> {};
4679
4680DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchReturnInst, Value)
4681
4682//===----------------------------------------------------------------------===//
4683// CleanupReturnInst Class
4684//===----------------------------------------------------------------------===//
4685
4686class CleanupReturnInst : public Instruction {
4687 using UnwindDestField = BoolBitfieldElementT<0>;
4688
4689private:
4690 CleanupReturnInst(const CleanupReturnInst &RI);
4691 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
4692 Instruction *InsertBefore = nullptr);
4693 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
4694 BasicBlock *InsertAtEnd);
4695
4696 void init(Value *CleanupPad, BasicBlock *UnwindBB);
4697
4698protected:
4699 // Note: Instruction needs to be a friend here to call cloneImpl.
4700 friend class Instruction;
4701
4702 CleanupReturnInst *cloneImpl() const;
4703
4704public:
4705 static CleanupReturnInst *Create(Value *CleanupPad,
4706 BasicBlock *UnwindBB = nullptr,
4707 Instruction *InsertBefore = nullptr) {
4708 assert(CleanupPad);
4709 unsigned Values = 1;
4710 if (UnwindBB)
4711 ++Values;
4712 return new (Values)
4713 CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertBefore);
4714 }
4715
4716 static CleanupReturnInst *Create(Value *CleanupPad, BasicBlock *UnwindBB,
4717 BasicBlock *InsertAtEnd) {
4718 assert(CleanupPad);
4719 unsigned Values = 1;
4720 if (UnwindBB)
4721 ++Values;
4722 return new (Values)
4723 CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertAtEnd);
4724 }
4725
4726 /// Provide fast operand accessors
4727 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
4728
4729 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); }
4730 bool unwindsToCaller() const { return !hasUnwindDest(); }
4731
4732 /// Convenience accessor.
4733 CleanupPadInst *getCleanupPad() const {
4734 return cast<CleanupPadInst>(Val: Op<0>());
4735 }
4736 void setCleanupPad(CleanupPadInst *CleanupPad) {
4737 assert(CleanupPad);
4738 Op<0>() = CleanupPad;
4739 }
4740
4741 unsigned getNumSuccessors() const { return hasUnwindDest() ? 1 : 0; }
4742
4743 BasicBlock *getUnwindDest() const {
4744 return hasUnwindDest() ? cast<BasicBlock>(Val: Op<1>()) : nullptr;
4745 }
4746 void setUnwindDest(BasicBlock *NewDest) {
4747 assert(NewDest);
4748 assert(hasUnwindDest());
4749 Op<1>() = NewDest;
4750 }
4751
4752 // Methods for support type inquiry through isa, cast, and dyn_cast:
4753 static bool classof(const Instruction *I) {
4754 return (I->getOpcode() == Instruction::CleanupRet);
4755 }
4756 static bool classof(const Value *V) {
4757 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
4758 }
4759
4760private:
4761 BasicBlock *getSuccessor(unsigned Idx) const {
4762 assert(Idx == 0);
4763 return getUnwindDest();
4764 }
4765
4766 void setSuccessor(unsigned Idx, BasicBlock *B) {
4767 assert(Idx == 0);
4768 setUnwindDest(B);
4769 }
4770
4771 // Shadow Instruction::setInstructionSubclassData with a private forwarding
4772 // method so that subclasses cannot accidentally use it.
4773 template <typename Bitfield>
4774 void setSubclassData(typename Bitfield::Type Value) {
4775 Instruction::setSubclassData<Bitfield>(Value);
4776 }
4777};
4778
4779template <>
4780struct OperandTraits<CleanupReturnInst>
4781 : public VariadicOperandTraits<CleanupReturnInst, /*MINARITY=*/1> {};
4782
4783DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CleanupReturnInst, Value)
4784
4785//===----------------------------------------------------------------------===//
4786// UnreachableInst Class
4787//===----------------------------------------------------------------------===//
4788
4789//===---------------------------------------------------------------------------
4790/// This function has undefined behavior. In particular, the
4791/// presence of this instruction indicates some higher level knowledge that the
4792/// end of the block cannot be reached.
4793///
4794class UnreachableInst : public Instruction {
4795protected:
4796 // Note: Instruction needs to be a friend here to call cloneImpl.
4797 friend class Instruction;
4798
4799 UnreachableInst *cloneImpl() const;
4800
4801public:
4802 explicit UnreachableInst(LLVMContext &C, Instruction *InsertBefore = nullptr);
4803 explicit UnreachableInst(LLVMContext &C, BasicBlock *InsertAtEnd);
4804
4805 // allocate space for exactly zero operands
4806 void *operator new(size_t S) { return User::operator new(Size: S, Us: 0); }
4807 void operator delete(void *Ptr) { User::operator delete(Usr: Ptr); }
4808
4809 unsigned getNumSuccessors() const { return 0; }
4810
4811 // Methods for support type inquiry through isa, cast, and dyn_cast:
4812 static bool classof(const Instruction *I) {
4813 return I->getOpcode() == Instruction::Unreachable;
4814 }
4815 static bool classof(const Value *V) {
4816 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
4817 }
4818
4819private:
4820 BasicBlock *getSuccessor(unsigned idx) const {
4821 llvm_unreachable("UnreachableInst has no successors!");
4822 }
4823
4824 void setSuccessor(unsigned idx, BasicBlock *B) {
4825 llvm_unreachable("UnreachableInst has no successors!");
4826 }
4827};
4828
4829//===----------------------------------------------------------------------===//
4830// TruncInst Class
4831//===----------------------------------------------------------------------===//
4832
4833/// This class represents a truncation of integer types.
4834class TruncInst : public CastInst {
4835protected:
4836 // Note: Instruction needs to be a friend here to call cloneImpl.
4837 friend class Instruction;
4838
4839 /// Clone an identical TruncInst
4840 TruncInst *cloneImpl() const;
4841
4842public:
4843 /// Constructor with insert-before-instruction semantics
4844 TruncInst(
4845 Value *S, ///< The value to be truncated
4846 Type *Ty, ///< The (smaller) type to truncate to
4847 const Twine &NameStr = "", ///< A name for the new instruction
4848 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4849 );
4850
4851 /// Constructor with insert-at-end-of-block semantics
4852 TruncInst(
4853 Value *S, ///< The value to be truncated
4854 Type *Ty, ///< The (smaller) type to truncate to
4855 const Twine &NameStr, ///< A name for the new instruction
4856 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4857 );
4858
4859 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4860 static bool classof(const Instruction *I) {
4861 return I->getOpcode() == Trunc;
4862 }
4863 static bool classof(const Value *V) {
4864 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
4865 }
4866};
4867
4868//===----------------------------------------------------------------------===//
4869// ZExtInst Class
4870//===----------------------------------------------------------------------===//
4871
4872/// This class represents zero extension of integer types.
4873class ZExtInst : public CastInst {
4874protected:
4875 // Note: Instruction needs to be a friend here to call cloneImpl.
4876 friend class Instruction;
4877
4878 /// Clone an identical ZExtInst
4879 ZExtInst *cloneImpl() const;
4880
4881public:
4882 /// Constructor with insert-before-instruction semantics
4883 ZExtInst(
4884 Value *S, ///< The value to be zero extended
4885 Type *Ty, ///< The type to zero extend to
4886 const Twine &NameStr = "", ///< A name for the new instruction
4887 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4888 );
4889
4890 /// Constructor with insert-at-end semantics.
4891 ZExtInst(
4892 Value *S, ///< The value to be zero extended
4893 Type *Ty, ///< The type to zero extend to
4894 const Twine &NameStr, ///< A name for the new instruction
4895 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4896 );
4897
4898 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4899 static bool classof(const Instruction *I) {
4900 return I->getOpcode() == ZExt;
4901 }
4902 static bool classof(const Value *V) {
4903 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
4904 }
4905};
4906
4907//===----------------------------------------------------------------------===//
4908// SExtInst Class
4909//===----------------------------------------------------------------------===//
4910
4911/// This class represents a sign extension of integer types.
4912class SExtInst : public CastInst {
4913protected:
4914 // Note: Instruction needs to be a friend here to call cloneImpl.
4915 friend class Instruction;
4916
4917 /// Clone an identical SExtInst
4918 SExtInst *cloneImpl() const;
4919
4920public:
4921 /// Constructor with insert-before-instruction semantics
4922 SExtInst(
4923 Value *S, ///< The value to be sign extended
4924 Type *Ty, ///< The type to sign extend to
4925 const Twine &NameStr = "", ///< A name for the new instruction
4926 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4927 );
4928
4929 /// Constructor with insert-at-end-of-block semantics
4930 SExtInst(
4931 Value *S, ///< The value to be sign extended
4932 Type *Ty, ///< The type to sign extend to
4933 const Twine &NameStr, ///< A name for the new instruction
4934 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4935 );
4936
4937 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4938 static bool classof(const Instruction *I) {
4939 return I->getOpcode() == SExt;
4940 }
4941 static bool classof(const Value *V) {
4942 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
4943 }
4944};
4945
4946//===----------------------------------------------------------------------===//
4947// FPTruncInst Class
4948//===----------------------------------------------------------------------===//
4949
4950/// This class represents a truncation of floating point types.
4951class FPTruncInst : public CastInst {
4952protected:
4953 // Note: Instruction needs to be a friend here to call cloneImpl.
4954 friend class Instruction;
4955
4956 /// Clone an identical FPTruncInst
4957 FPTruncInst *cloneImpl() const;
4958
4959public:
4960 /// Constructor with insert-before-instruction semantics
4961 FPTruncInst(
4962 Value *S, ///< The value to be truncated
4963 Type *Ty, ///< The type to truncate to
4964 const Twine &NameStr = "", ///< A name for the new instruction
4965 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4966 );
4967
4968 /// Constructor with insert-before-instruction semantics
4969 FPTruncInst(
4970 Value *S, ///< The value to be truncated
4971 Type *Ty, ///< The type to truncate to
4972 const Twine &NameStr, ///< A name for the new instruction
4973 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4974 );
4975
4976 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4977 static bool classof(const Instruction *I) {
4978 return I->getOpcode() == FPTrunc;
4979 }
4980 static bool classof(const Value *V) {
4981 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
4982 }
4983};
4984
4985//===----------------------------------------------------------------------===//
4986// FPExtInst Class
4987//===----------------------------------------------------------------------===//
4988
4989/// This class represents an extension of floating point types.
4990class FPExtInst : public CastInst {
4991protected:
4992 // Note: Instruction needs to be a friend here to call cloneImpl.
4993 friend class Instruction;
4994
4995 /// Clone an identical FPExtInst
4996 FPExtInst *cloneImpl() const;
4997
4998public:
4999 /// Constructor with insert-before-instruction semantics
5000 FPExtInst(
5001 Value *S, ///< The value to be extended
5002 Type *Ty, ///< The type to extend to
5003 const Twine &NameStr = "", ///< A name for the new instruction
5004 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5005 );
5006
5007 /// Constructor with insert-at-end-of-block semantics
5008 FPExtInst(
5009 Value *S, ///< The value to be extended
5010 Type *Ty, ///< The type to extend to
5011 const Twine &NameStr, ///< A name for the new instruction
5012 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5013 );
5014
5015 /// Methods for support type inquiry through isa, cast, and dyn_cast:
5016 static bool classof(const Instruction *I) {
5017 return I->getOpcode() == FPExt;
5018 }
5019 static bool classof(const Value *V) {
5020 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
5021 }
5022};
5023
5024//===----------------------------------------------------------------------===//
5025// UIToFPInst Class
5026//===----------------------------------------------------------------------===//
5027
5028/// This class represents a cast unsigned integer to floating point.
5029class UIToFPInst : public CastInst {
5030protected:
5031 // Note: Instruction needs to be a friend here to call cloneImpl.
5032 friend class Instruction;
5033
5034 /// Clone an identical UIToFPInst
5035 UIToFPInst *cloneImpl() const;
5036
5037public:
5038 /// Constructor with insert-before-instruction semantics
5039 UIToFPInst(
5040 Value *S, ///< The value to be converted
5041 Type *Ty, ///< The type to convert to
5042 const Twine &NameStr = "", ///< A name for the new instruction
5043 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5044 );
5045
5046 /// Constructor with insert-at-end-of-block semantics
5047 UIToFPInst(
5048 Value *S, ///< The value to be converted
5049 Type *Ty, ///< The type to convert to
5050 const Twine &NameStr, ///< A name for the new instruction
5051 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5052 );
5053
5054 /// Methods for support type inquiry through isa, cast, and dyn_cast:
5055 static bool classof(const Instruction *I) {
5056 return I->getOpcode() == UIToFP;
5057 }
5058 static bool classof(const Value *V) {
5059 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
5060 }
5061};
5062
5063//===----------------------------------------------------------------------===//
5064// SIToFPInst Class
5065//===----------------------------------------------------------------------===//
5066
5067/// This class represents a cast from signed integer to floating point.
5068class SIToFPInst : public CastInst {
5069protected:
5070 // Note: Instruction needs to be a friend here to call cloneImpl.
5071 friend class Instruction;
5072
5073 /// Clone an identical SIToFPInst
5074 SIToFPInst *cloneImpl() const;
5075
5076public:
5077 /// Constructor with insert-before-instruction semantics
5078 SIToFPInst(
5079 Value *S, ///< The value to be converted
5080 Type *Ty, ///< The type to convert to
5081 const Twine &NameStr = "", ///< A name for the new instruction
5082 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5083 );
5084
5085 /// Constructor with insert-at-end-of-block semantics
5086 SIToFPInst(
5087 Value *S, ///< The value to be converted
5088 Type *Ty, ///< The type to convert to
5089 const Twine &NameStr, ///< A name for the new instruction
5090 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5091 );
5092
5093 /// Methods for support type inquiry through isa, cast, and dyn_cast:
5094 static bool classof(const Instruction *I) {
5095 return I->getOpcode() == SIToFP;
5096 }
5097 static bool classof(const Value *V) {
5098 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
5099 }
5100};
5101
5102//===----------------------------------------------------------------------===//
5103// FPToUIInst Class
5104//===----------------------------------------------------------------------===//
5105
5106/// This class represents a cast from floating point to unsigned integer
5107class FPToUIInst : public CastInst {
5108protected:
5109 // Note: Instruction needs to be a friend here to call cloneImpl.
5110 friend class Instruction;
5111
5112 /// Clone an identical FPToUIInst
5113 FPToUIInst *cloneImpl() const;
5114
5115public:
5116 /// Constructor with insert-before-instruction semantics
5117 FPToUIInst(
5118 Value *S, ///< The value to be converted
5119 Type *Ty, ///< The type to convert to
5120 const Twine &NameStr = "", ///< A name for the new instruction
5121 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5122 );
5123
5124 /// Constructor with insert-at-end-of-block semantics
5125 FPToUIInst(
5126 Value *S, ///< The value to be converted
5127 Type *Ty, ///< The type to convert to
5128 const Twine &NameStr, ///< A name for the new instruction
5129 BasicBlock *InsertAtEnd ///< Where to insert the new instruction
5130 );
5131
5132 /// Methods for support type inquiry through isa, cast, and dyn_cast:
5133 static bool classof(const Instruction *I) {
5134 return I->getOpcode() == FPToUI;
5135 }
5136 static bool classof(const Value *V) {
5137 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
5138 }
5139};
5140
5141//===----------------------------------------------------------------------===//
5142// FPToSIInst Class
5143//===----------------------------------------------------------------------===//
5144
5145/// This class represents a cast from floating point to signed integer.
5146class FPToSIInst : public CastInst {
5147protected:
5148 // Note: Instruction needs to be a friend here to call cloneImpl.
5149 friend class Instruction;
5150
5151 /// Clone an identical FPToSIInst
5152 FPToSIInst *cloneImpl() const;
5153
5154public:
5155 /// Constructor with insert-before-instruction semantics
5156 FPToSIInst(
5157 Value *S, ///< The value to be converted
5158 Type *Ty, ///< The type to convert to
5159 const Twine &NameStr = "", ///< A name for the new instruction
5160 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5161 );
5162
5163 /// Constructor with insert-at-end-of-block semantics
5164 FPToSIInst(
5165 Value *S, ///< The value to be converted
5166 Type *Ty, ///< The type to convert to
5167 const Twine &NameStr, ///< A name for the new instruction
5168 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5169 );
5170
5171 /// Methods for support type inquiry through isa, cast, and dyn_cast:
5172 static bool classof(const Instruction *I) {
5173 return I->getOpcode() == FPToSI;
5174 }
5175 static bool classof(const Value *V) {
5176 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
5177 }
5178};
5179
5180//===----------------------------------------------------------------------===//
5181// IntToPtrInst Class
5182//===----------------------------------------------------------------------===//
5183
5184/// This class represents a cast from an integer to a pointer.
5185class IntToPtrInst : public CastInst {
5186public:
5187 // Note: Instruction needs to be a friend here to call cloneImpl.
5188 friend class Instruction;
5189
5190 /// Constructor with insert-before-instruction semantics
5191 IntToPtrInst(
5192 Value *S, ///< The value to be converted
5193 Type *Ty, ///< The type to convert to
5194 const Twine &NameStr = "", ///< A name for the new instruction
5195 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5196 );
5197
5198 /// Constructor with insert-at-end-of-block semantics
5199 IntToPtrInst(
5200 Value *S, ///< The value to be converted
5201 Type *Ty, ///< The type to convert to
5202 const Twine &NameStr, ///< A name for the new instruction
5203 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5204 );
5205
5206 /// Clone an identical IntToPtrInst.
5207 IntToPtrInst *cloneImpl() const;
5208
5209 /// Returns the address space of this instruction's pointer type.
5210 unsigned getAddressSpace() const {
5211 return getType()->getPointerAddressSpace();
5212 }
5213
5214 // Methods for support type inquiry through isa, cast, and dyn_cast:
5215 static bool classof(const Instruction *I) {
5216 return I->getOpcode() == IntToPtr;
5217 }
5218 static bool classof(const Value *V) {
5219 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
5220 }
5221};
5222
5223//===----------------------------------------------------------------------===//
5224// PtrToIntInst Class
5225//===----------------------------------------------------------------------===//
5226
5227/// This class represents a cast from a pointer to an integer.
5228class PtrToIntInst : public CastInst {
5229protected:
5230 // Note: Instruction needs to be a friend here to call cloneImpl.
5231 friend class Instruction;
5232
5233 /// Clone an identical PtrToIntInst.
5234 PtrToIntInst *cloneImpl() const;
5235
5236public:
5237 /// Constructor with insert-before-instruction semantics
5238 PtrToIntInst(
5239 Value *S, ///< The value to be converted
5240 Type *Ty, ///< The type to convert to
5241 const Twine &NameStr = "", ///< A name for the new instruction
5242 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5243 );
5244
5245 /// Constructor with insert-at-end-of-block semantics
5246 PtrToIntInst(
5247 Value *S, ///< The value to be converted
5248 Type *Ty, ///< The type to convert to
5249 const Twine &NameStr, ///< A name for the new instruction
5250 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5251 );
5252
5253 /// Gets the pointer operand.
5254 Value *getPointerOperand() { return getOperand(i_nocapture: 0); }
5255 /// Gets the pointer operand.
5256 const Value *getPointerOperand() const { return getOperand(i_nocapture: 0); }
5257 /// Gets the operand index of the pointer operand.
5258 static unsigned getPointerOperandIndex() { return 0U; }
5259
5260 /// Returns the address space of the pointer operand.
5261 unsigned getPointerAddressSpace() const {
5262 return getPointerOperand()->getType()->getPointerAddressSpace();
5263 }
5264
5265 // Methods for support type inquiry through isa, cast, and dyn_cast:
5266 static bool classof(const Instruction *I) {
5267 return I->getOpcode() == PtrToInt;
5268 }
5269 static bool classof(const Value *V) {
5270 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
5271 }
5272};
5273
5274//===----------------------------------------------------------------------===//
5275// BitCastInst Class
5276//===----------------------------------------------------------------------===//
5277
5278/// This class represents a no-op cast from one type to another.
5279class BitCastInst : public CastInst {
5280protected:
5281 // Note: Instruction needs to be a friend here to call cloneImpl.
5282 friend class Instruction;
5283
5284 /// Clone an identical BitCastInst.
5285 BitCastInst *cloneImpl() const;
5286
5287public:
5288 /// Constructor with insert-before-instruction semantics
5289 BitCastInst(
5290 Value *S, ///< The value to be casted
5291 Type *Ty, ///< The type to casted to
5292 const Twine &NameStr = "", ///< A name for the new instruction
5293 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5294 );
5295
5296 /// Constructor with insert-at-end-of-block semantics
5297 BitCastInst(
5298 Value *S, ///< The value to be casted
5299 Type *Ty, ///< The type to casted to
5300 const Twine &NameStr, ///< A name for the new instruction
5301 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5302 );
5303
5304 // Methods for support type inquiry through isa, cast, and dyn_cast:
5305 static bool classof(const Instruction *I) {
5306 return I->getOpcode() == BitCast;
5307 }
5308 static bool classof(const Value *V) {
5309 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
5310 }
5311};
5312
5313//===----------------------------------------------------------------------===//
5314// AddrSpaceCastInst Class
5315//===----------------------------------------------------------------------===//
5316
5317/// This class represents a conversion between pointers from one address space
5318/// to another.
5319class AddrSpaceCastInst : public CastInst {
5320protected:
5321 // Note: Instruction needs to be a friend here to call cloneImpl.
5322 friend class Instruction;
5323
5324 /// Clone an identical AddrSpaceCastInst.
5325 AddrSpaceCastInst *cloneImpl() const;
5326
5327public:
5328 /// Constructor with insert-before-instruction semantics
5329 AddrSpaceCastInst(
5330 Value *S, ///< The value to be casted
5331 Type *Ty, ///< The type to casted to
5332 const Twine &NameStr = "", ///< A name for the new instruction
5333 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5334 );
5335
5336 /// Constructor with insert-at-end-of-block semantics
5337 AddrSpaceCastInst(
5338 Value *S, ///< The value to be casted
5339 Type *Ty, ///< The type to casted to
5340 const Twine &NameStr, ///< A name for the new instruction
5341 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5342 );
5343
5344 // Methods for support type inquiry through isa, cast, and dyn_cast:
5345 static bool classof(const Instruction *I) {
5346 return I->getOpcode() == AddrSpaceCast;
5347 }
5348 static bool classof(const Value *V) {
5349 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
5350 }
5351
5352 /// Gets the pointer operand.
5353 Value *getPointerOperand() {
5354 return getOperand(i_nocapture: 0);
5355 }
5356
5357 /// Gets the pointer operand.
5358 const Value *getPointerOperand() const {
5359 return getOperand(i_nocapture: 0);
5360 }
5361
5362 /// Gets the operand index of the pointer operand.
5363 static unsigned getPointerOperandIndex() {
5364 return 0U;
5365 }
5366
5367 /// Returns the address space of the pointer operand.
5368 unsigned getSrcAddressSpace() const {
5369 return getPointerOperand()->getType()->getPointerAddressSpace();
5370 }
5371
5372 /// Returns the address space of the result.
5373 unsigned getDestAddressSpace() const {
5374 return getType()->getPointerAddressSpace();
5375 }
5376};
5377
5378//===----------------------------------------------------------------------===//
5379// Helper functions
5380//===----------------------------------------------------------------------===//
5381
5382/// A helper function that returns the pointer operand of a load or store
5383/// instruction. Returns nullptr if not load or store.
5384inline const Value *getLoadStorePointerOperand(const Value *V) {
5385 if (auto *Load = dyn_cast<LoadInst>(Val: V))
5386 return Load->getPointerOperand();
5387 if (auto *Store = dyn_cast<StoreInst>(Val: V))
5388 return Store->getPointerOperand();
5389 return nullptr;
5390}
5391inline Value *getLoadStorePointerOperand(Value *V) {
5392 return const_cast<Value *>(
5393 getLoadStorePointerOperand(V: static_cast<const Value *>(V)));
5394}
5395
5396/// A helper function that returns the pointer operand of a load, store
5397/// or GEP instruction. Returns nullptr if not load, store, or GEP.
5398inline const Value *getPointerOperand(const Value *V) {
5399 if (auto *Ptr = getLoadStorePointerOperand(V))
5400 return Ptr;
5401 if (auto *Gep = dyn_cast<GetElementPtrInst>(Val: V))
5402 return Gep->getPointerOperand();
5403 return nullptr;
5404}
5405inline Value *getPointerOperand(Value *V) {
5406 return const_cast<Value *>(getPointerOperand(V: static_cast<const Value *>(V)));
5407}
5408
5409/// A helper function that returns the alignment of load or store instruction.
5410inline Align getLoadStoreAlignment(Value *I) {
5411 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
5412 "Expected Load or Store instruction");
5413 if (auto *LI = dyn_cast<LoadInst>(Val: I))
5414 return LI->getAlign();
5415 return cast<StoreInst>(Val: I)->getAlign();
5416}
5417
5418/// A helper function that returns the address space of the pointer operand of
5419/// load or store instruction.
5420inline unsigned getLoadStoreAddressSpace(Value *I) {
5421 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
5422 "Expected Load or Store instruction");
5423 if (auto *LI = dyn_cast<LoadInst>(Val: I))
5424 return LI->getPointerAddressSpace();
5425 return cast<StoreInst>(Val: I)->getPointerAddressSpace();
5426}
5427
5428/// A helper function that returns the type of a load or store instruction.
5429inline Type *getLoadStoreType(Value *I) {
5430 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
5431 "Expected Load or Store instruction");
5432 if (auto *LI = dyn_cast<LoadInst>(Val: I))
5433 return LI->getType();
5434 return cast<StoreInst>(Val: I)->getValueOperand()->getType();
5435}
5436
5437/// A helper function that returns an atomic operation's sync scope; returns
5438/// std::nullopt if it is not an atomic operation.
5439inline std::optional<SyncScope::ID> getAtomicSyncScopeID(const Instruction *I) {
5440 if (!I->isAtomic())
5441 return std::nullopt;
5442 if (auto *AI = dyn_cast<LoadInst>(Val: I))
5443 return AI->getSyncScopeID();
5444 if (auto *AI = dyn_cast<StoreInst>(Val: I))
5445 return AI->getSyncScopeID();
5446 if (auto *AI = dyn_cast<FenceInst>(Val: I))
5447 return AI->getSyncScopeID();
5448 if (auto *AI = dyn_cast<AtomicCmpXchgInst>(Val: I))
5449 return AI->getSyncScopeID();
5450 if (auto *AI = dyn_cast<AtomicRMWInst>(Val: I))
5451 return AI->getSyncScopeID();
5452 llvm_unreachable("unhandled atomic operation");
5453}
5454
5455//===----------------------------------------------------------------------===//
5456// FreezeInst Class
5457//===----------------------------------------------------------------------===//
5458
5459/// This class represents a freeze function that returns random concrete
5460/// value if an operand is either a poison value or an undef value
5461class FreezeInst : public UnaryInstruction {
5462protected:
5463 // Note: Instruction needs to be a friend here to call cloneImpl.
5464 friend class Instruction;
5465
5466 /// Clone an identical FreezeInst
5467 FreezeInst *cloneImpl() const;
5468
5469public:
5470 explicit FreezeInst(Value *S,
5471 const Twine &NameStr = "",
5472 Instruction *InsertBefore = nullptr);
5473 FreezeInst(Value *S, const Twine &NameStr, BasicBlock *InsertAtEnd);
5474
5475 // Methods for support type inquiry through isa, cast, and dyn_cast:
5476 static inline bool classof(const Instruction *I) {
5477 return I->getOpcode() == Freeze;
5478 }
5479 static inline bool classof(const Value *V) {
5480 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
5481 }
5482};
5483
5484} // end namespace llvm
5485
5486#endif // LLVM_IR_INSTRUCTIONS_H
5487

source code of llvm/include/llvm/IR/Instructions.h