1//===-- Instruction.cpp - Implement the Instruction class -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the Instruction class for the IR library.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/IR/Instruction.h"
14#include "llvm/ADT/DenseSet.h"
15#include "llvm/IR/AttributeMask.h"
16#include "llvm/IR/Attributes.h"
17#include "llvm/IR/Constants.h"
18#include "llvm/IR/InstrTypes.h"
19#include "llvm/IR/Instructions.h"
20#include "llvm/IR/IntrinsicInst.h"
21#include "llvm/IR/Intrinsics.h"
22#include "llvm/IR/MemoryModelRelaxationAnnotations.h"
23#include "llvm/IR/Operator.h"
24#include "llvm/IR/ProfDataUtils.h"
25#include "llvm/IR/Type.h"
26using namespace llvm;
27
28Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
29 InstListType::iterator InsertBefore)
30 : User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(nullptr) {
31
32 // When called with an iterator, there must be a block to insert into.
33 BasicBlock *BB = InsertBefore->getParent();
34 assert(BB && "Instruction to insert before is not in a basic block!");
35 insertInto(ParentBB: BB, It: InsertBefore);
36}
37
38Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
39 Instruction *InsertBefore)
40 : User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(nullptr) {
41
42 // If requested, insert this instruction into a basic block...
43 if (InsertBefore) {
44 BasicBlock *BB = InsertBefore->getParent();
45 assert(BB && "Instruction to insert before is not in a basic block!");
46 insertInto(ParentBB: BB, It: InsertBefore->getIterator());
47 }
48}
49
50Instruction::Instruction(Type *ty, unsigned it, Use *Ops, unsigned NumOps,
51 BasicBlock *InsertAtEnd)
52 : User(ty, Value::InstructionVal + it, Ops, NumOps), Parent(nullptr) {
53
54 // If requested, append this instruction into the basic block.
55 if (InsertAtEnd)
56 insertInto(ParentBB: InsertAtEnd, It: InsertAtEnd->end());
57}
58
59Instruction::~Instruction() {
60 assert(!Parent && "Instruction still linked in the program!");
61
62 // Replace any extant metadata uses of this instruction with undef to
63 // preserve debug info accuracy. Some alternatives include:
64 // - Treat Instruction like any other Value, and point its extant metadata
65 // uses to an empty ValueAsMetadata node. This makes extant dbg.value uses
66 // trivially dead (i.e. fair game for deletion in many passes), leading to
67 // stale dbg.values being in effect for too long.
68 // - Call salvageDebugInfoOrMarkUndef. Not needed to make instruction removal
69 // correct. OTOH results in wasted work in some common cases (e.g. when all
70 // instructions in a BasicBlock are deleted).
71 if (isUsedByMetadata())
72 ValueAsMetadata::handleRAUW(From: this, To: UndefValue::get(T: getType()));
73
74 // Explicitly remove DIAssignID metadata to clear up ID -> Instruction(s)
75 // mapping in LLVMContext.
76 setMetadata(KindID: LLVMContext::MD_DIAssignID, Node: nullptr);
77}
78
79void Instruction::setParent(BasicBlock *P) {
80 Parent = P;
81}
82
83const Module *Instruction::getModule() const {
84 return getParent()->getModule();
85}
86
87const Function *Instruction::getFunction() const {
88 return getParent()->getParent();
89}
90
91void Instruction::removeFromParent() {
92 // Perform any debug-info maintenence required.
93 handleMarkerRemoval();
94
95 getParent()->getInstList().remove(IT: getIterator());
96}
97
98void Instruction::handleMarkerRemoval() {
99 if (!Parent->IsNewDbgInfoFormat || !DebugMarker)
100 return;
101
102 DebugMarker->removeMarker();
103}
104
105BasicBlock::iterator Instruction::eraseFromParent() {
106 handleMarkerRemoval();
107 return getParent()->getInstList().erase(where: getIterator());
108}
109
110void Instruction::insertBefore(Instruction *InsertPos) {
111 insertBefore(InsertPos: InsertPos->getIterator());
112}
113
114/// Insert an unlinked instruction into a basic block immediately before the
115/// specified instruction.
116void Instruction::insertBefore(BasicBlock::iterator InsertPos) {
117 insertBefore(BB&: *InsertPos->getParent(), InsertPos);
118}
119
120/// Insert an unlinked instruction into a basic block immediately after the
121/// specified instruction.
122void Instruction::insertAfter(Instruction *InsertPos) {
123 BasicBlock *DestParent = InsertPos->getParent();
124
125 DestParent->getInstList().insertAfter(where: InsertPos->getIterator(), New: this);
126}
127
128BasicBlock::iterator Instruction::insertInto(BasicBlock *ParentBB,
129 BasicBlock::iterator It) {
130 assert(getParent() == nullptr && "Expected detached instruction");
131 assert((It == ParentBB->end() || It->getParent() == ParentBB) &&
132 "It not in ParentBB");
133 insertBefore(BB&: *ParentBB, InsertPos: It);
134 return getIterator();
135}
136
137extern cl::opt<bool> UseNewDbgInfoFormat;
138
139void Instruction::insertBefore(BasicBlock &BB,
140 InstListType::iterator InsertPos) {
141 assert(!DebugMarker);
142
143 BB.getInstList().insert(where: InsertPos, New: this);
144
145 if (!BB.IsNewDbgInfoFormat)
146 return;
147
148 // We've inserted "this": if InsertAtHead is set then it comes before any
149 // DbgVariableRecords attached to InsertPos. But if it's not set, then any
150 // DbgRecords should now come before "this".
151 bool InsertAtHead = InsertPos.getHeadBit();
152 if (!InsertAtHead) {
153 DbgMarker *SrcMarker = BB.getMarker(It: InsertPos);
154 if (SrcMarker && !SrcMarker->empty()) {
155 // If this assertion fires, the calling code is about to insert a PHI
156 // after debug-records, which would form a sequence like:
157 // %0 = PHI
158 // #dbg_value
159 // %1 = PHI
160 // Which is de-normalised and undesired -- hence the assertion. To avoid
161 // this, you must insert at that position using an iterator, and it must
162 // be aquired by calling getFirstNonPHIIt / begin or similar methods on
163 // the block. This will signal to this behind-the-scenes debug-info
164 // maintenence code that you intend the PHI to be ahead of everything,
165 // including any debug-info.
166 assert(!isa<PHINode>(this) && "Inserting PHI after debug-records!");
167 adoptDbgRecords(BB: &BB, It: InsertPos, InsertAtHead: false);
168 }
169 }
170
171 // If we're inserting a terminator, check if we need to flush out
172 // TrailingDbgRecords. Inserting instructions at the end of an incomplete
173 // block is handled by the code block above.
174 if (isTerminator())
175 getParent()->flushTerminatorDbgRecords();
176}
177
178/// Unlink this instruction from its current basic block and insert it into the
179/// basic block that MovePos lives in, right before MovePos.
180void Instruction::moveBefore(Instruction *MovePos) {
181 moveBeforeImpl(BB&: *MovePos->getParent(), I: MovePos->getIterator(), Preserve: false);
182}
183
184void Instruction::moveBeforePreserving(Instruction *MovePos) {
185 moveBeforeImpl(BB&: *MovePos->getParent(), I: MovePos->getIterator(), Preserve: true);
186}
187
188void Instruction::moveAfter(Instruction *MovePos) {
189 auto NextIt = std::next(x: MovePos->getIterator());
190 // We want this instruction to be moved to before NextIt in the instruction
191 // list, but before NextIt's debug value range.
192 NextIt.setHeadBit(true);
193 moveBeforeImpl(BB&: *MovePos->getParent(), I: NextIt, Preserve: false);
194}
195
196void Instruction::moveAfterPreserving(Instruction *MovePos) {
197 auto NextIt = std::next(x: MovePos->getIterator());
198 // We want this instruction and its debug range to be moved to before NextIt
199 // in the instruction list, but before NextIt's debug value range.
200 NextIt.setHeadBit(true);
201 moveBeforeImpl(BB&: *MovePos->getParent(), I: NextIt, Preserve: true);
202}
203
204void Instruction::moveBefore(BasicBlock &BB, InstListType::iterator I) {
205 moveBeforeImpl(BB, I, Preserve: false);
206}
207
208void Instruction::moveBeforePreserving(BasicBlock &BB,
209 InstListType::iterator I) {
210 moveBeforeImpl(BB, I, Preserve: true);
211}
212
213void Instruction::moveBeforeImpl(BasicBlock &BB, InstListType::iterator I,
214 bool Preserve) {
215 assert(I == BB.end() || I->getParent() == &BB);
216 bool InsertAtHead = I.getHeadBit();
217
218 // If we've been given the "Preserve" flag, then just move the DbgRecords with
219 // the instruction, no more special handling needed.
220 if (BB.IsNewDbgInfoFormat && DebugMarker && !Preserve) {
221 if (I != this->getIterator() || InsertAtHead) {
222 // "this" is definitely moving in the list, or it's moving ahead of its
223 // attached DbgVariableRecords. Detach any existing DbgRecords.
224 handleMarkerRemoval();
225 }
226 }
227
228 // Move this single instruction. Use the list splice method directly, not
229 // the block splicer, which will do more debug-info things.
230 BB.getInstList().splice(where: I, L2&: getParent()->getInstList(), first: getIterator());
231
232 if (BB.IsNewDbgInfoFormat && !Preserve) {
233 DbgMarker *NextMarker = getParent()->getNextMarker(I: this);
234
235 // If we're inserting at point I, and not in front of the DbgRecords
236 // attached there, then we should absorb the DbgRecords attached to I.
237 if (!InsertAtHead && NextMarker && !NextMarker->empty()) {
238 adoptDbgRecords(BB: &BB, It: I, InsertAtHead: false);
239 }
240 }
241
242 if (isTerminator())
243 getParent()->flushTerminatorDbgRecords();
244}
245
246iterator_range<DbgRecord::self_iterator> Instruction::cloneDebugInfoFrom(
247 const Instruction *From, std::optional<DbgRecord::self_iterator> FromHere,
248 bool InsertAtHead) {
249 if (!From->DebugMarker)
250 return DbgMarker::getEmptyDbgRecordRange();
251
252 assert(getParent()->IsNewDbgInfoFormat);
253 assert(getParent()->IsNewDbgInfoFormat ==
254 From->getParent()->IsNewDbgInfoFormat);
255
256 if (!DebugMarker)
257 getParent()->createMarker(I: this);
258
259 return DebugMarker->cloneDebugInfoFrom(From: From->DebugMarker, FromHere,
260 InsertAtHead);
261}
262
263std::optional<DbgRecord::self_iterator>
264Instruction::getDbgReinsertionPosition() {
265 // Is there a marker on the next instruction?
266 DbgMarker *NextMarker = getParent()->getNextMarker(I: this);
267 if (!NextMarker)
268 return std::nullopt;
269
270 // Are there any DbgRecords in the next marker?
271 if (NextMarker->StoredDbgRecords.empty())
272 return std::nullopt;
273
274 return NextMarker->StoredDbgRecords.begin();
275}
276
277bool Instruction::hasDbgRecords() const { return !getDbgRecordRange().empty(); }
278
279void Instruction::adoptDbgRecords(BasicBlock *BB, BasicBlock::iterator It,
280 bool InsertAtHead) {
281 DbgMarker *SrcMarker = BB->getMarker(It);
282 auto ReleaseTrailingDbgRecords = [BB, It, SrcMarker]() {
283 if (BB->end() == It) {
284 SrcMarker->eraseFromParent();
285 BB->deleteTrailingDbgRecords();
286 }
287 };
288
289 if (!SrcMarker || SrcMarker->StoredDbgRecords.empty()) {
290 ReleaseTrailingDbgRecords();
291 return;
292 }
293
294 // If we have DbgMarkers attached to this instruction, we have to honour the
295 // ordering of DbgRecords between this and the other marker. Fall back to just
296 // absorbing from the source.
297 if (DebugMarker || It == BB->end()) {
298 // Ensure we _do_ have a marker.
299 getParent()->createMarker(I: this);
300 DebugMarker->absorbDebugValues(Src&: *SrcMarker, InsertAtHead);
301
302 // Having transferred everything out of SrcMarker, we _could_ clean it up
303 // and free the marker now. However, that's a lot of heap-accounting for a
304 // small amount of memory with a good chance of re-use. Leave it for the
305 // moment. It will be released when the Instruction is freed in the worst
306 // case.
307 // However: if we transferred from a trailing marker off the end of the
308 // block, it's important to not leave the empty marker trailing. It will
309 // give a misleading impression that some debug records have been left
310 // trailing.
311 ReleaseTrailingDbgRecords();
312 } else {
313 // Optimisation: we're transferring all the DbgRecords from the source
314 // marker onto this empty location: just adopt the other instructions
315 // marker.
316 DebugMarker = SrcMarker;
317 DebugMarker->MarkedInstr = this;
318 It->DebugMarker = nullptr;
319 }
320}
321
322void Instruction::dropDbgRecords() {
323 if (DebugMarker)
324 DebugMarker->dropDbgRecords();
325}
326
327void Instruction::dropOneDbgRecord(DbgRecord *DVR) {
328 DebugMarker->dropOneDbgRecord(DR: DVR);
329}
330
331bool Instruction::comesBefore(const Instruction *Other) const {
332 assert(Parent && Other->Parent &&
333 "instructions without BB parents have no order");
334 assert(Parent == Other->Parent && "cross-BB instruction order comparison");
335 if (!Parent->isInstrOrderValid())
336 Parent->renumberInstructions();
337 return Order < Other->Order;
338}
339
340std::optional<BasicBlock::iterator> Instruction::getInsertionPointAfterDef() {
341 assert(!getType()->isVoidTy() && "Instruction must define result");
342 BasicBlock *InsertBB;
343 BasicBlock::iterator InsertPt;
344 if (auto *PN = dyn_cast<PHINode>(Val: this)) {
345 InsertBB = PN->getParent();
346 InsertPt = InsertBB->getFirstInsertionPt();
347 } else if (auto *II = dyn_cast<InvokeInst>(Val: this)) {
348 InsertBB = II->getNormalDest();
349 InsertPt = InsertBB->getFirstInsertionPt();
350 } else if (isa<CallBrInst>(Val: this)) {
351 // Def is available in multiple successors, there's no single dominating
352 // insertion point.
353 return std::nullopt;
354 } else {
355 assert(!isTerminator() && "Only invoke/callbr terminators return value");
356 InsertBB = getParent();
357 InsertPt = std::next(x: getIterator());
358 // Any instruction inserted immediately after "this" will come before any
359 // debug-info records take effect -- thus, set the head bit indicating that
360 // to debug-info-transfer code.
361 InsertPt.setHeadBit(true);
362 }
363
364 // catchswitch blocks don't have any legal insertion point (because they
365 // are both an exception pad and a terminator).
366 if (InsertPt == InsertBB->end())
367 return std::nullopt;
368 return InsertPt;
369}
370
371bool Instruction::isOnlyUserOfAnyOperand() {
372 return any_of(Range: operands(), P: [](Value *V) { return V->hasOneUser(); });
373}
374
375void Instruction::setHasNoUnsignedWrap(bool b) {
376 if (auto *Inst = dyn_cast<OverflowingBinaryOperator>(Val: this))
377 Inst->setHasNoUnsignedWrap(b);
378 else
379 cast<TruncInst>(Val: this)->setHasNoUnsignedWrap(b);
380}
381
382void Instruction::setHasNoSignedWrap(bool b) {
383 if (auto *Inst = dyn_cast<OverflowingBinaryOperator>(Val: this))
384 Inst->setHasNoSignedWrap(b);
385 else
386 cast<TruncInst>(Val: this)->setHasNoSignedWrap(b);
387}
388
389void Instruction::setIsExact(bool b) {
390 cast<PossiblyExactOperator>(Val: this)->setIsExact(b);
391}
392
393void Instruction::setNonNeg(bool b) {
394 assert(isa<PossiblyNonNegInst>(this) && "Must be zext/uitofp");
395 SubclassOptionalData = (SubclassOptionalData & ~PossiblyNonNegInst::NonNeg) |
396 (b * PossiblyNonNegInst::NonNeg);
397}
398
399bool Instruction::hasNoUnsignedWrap() const {
400 if (auto *Inst = dyn_cast<OverflowingBinaryOperator>(Val: this))
401 return Inst->hasNoUnsignedWrap();
402
403 return cast<TruncInst>(Val: this)->hasNoUnsignedWrap();
404}
405
406bool Instruction::hasNoSignedWrap() const {
407 if (auto *Inst = dyn_cast<OverflowingBinaryOperator>(Val: this))
408 return Inst->hasNoSignedWrap();
409
410 return cast<TruncInst>(Val: this)->hasNoSignedWrap();
411}
412
413bool Instruction::hasNonNeg() const {
414 assert(isa<PossiblyNonNegInst>(this) && "Must be zext/uitofp");
415 return (SubclassOptionalData & PossiblyNonNegInst::NonNeg) != 0;
416}
417
418bool Instruction::hasPoisonGeneratingFlags() const {
419 return cast<Operator>(Val: this)->hasPoisonGeneratingFlags();
420}
421
422void Instruction::dropPoisonGeneratingFlags() {
423 switch (getOpcode()) {
424 case Instruction::Add:
425 case Instruction::Sub:
426 case Instruction::Mul:
427 case Instruction::Shl:
428 cast<OverflowingBinaryOperator>(Val: this)->setHasNoUnsignedWrap(false);
429 cast<OverflowingBinaryOperator>(Val: this)->setHasNoSignedWrap(false);
430 break;
431
432 case Instruction::UDiv:
433 case Instruction::SDiv:
434 case Instruction::AShr:
435 case Instruction::LShr:
436 cast<PossiblyExactOperator>(Val: this)->setIsExact(false);
437 break;
438
439 case Instruction::Or:
440 cast<PossiblyDisjointInst>(Val: this)->setIsDisjoint(false);
441 break;
442
443 case Instruction::GetElementPtr:
444 cast<GetElementPtrInst>(Val: this)->setIsInBounds(false);
445 break;
446
447 case Instruction::UIToFP:
448 case Instruction::ZExt:
449 setNonNeg(false);
450 break;
451
452 case Instruction::Trunc:
453 cast<TruncInst>(Val: this)->setHasNoUnsignedWrap(false);
454 cast<TruncInst>(Val: this)->setHasNoSignedWrap(false);
455 break;
456 }
457
458 if (isa<FPMathOperator>(Val: this)) {
459 setHasNoNaNs(false);
460 setHasNoInfs(false);
461 }
462
463 assert(!hasPoisonGeneratingFlags() && "must be kept in sync");
464}
465
466bool Instruction::hasPoisonGeneratingMetadata() const {
467 return hasMetadata(KindID: LLVMContext::MD_range) ||
468 hasMetadata(KindID: LLVMContext::MD_nonnull) ||
469 hasMetadata(KindID: LLVMContext::MD_align);
470}
471
472void Instruction::dropPoisonGeneratingMetadata() {
473 eraseMetadata(KindID: LLVMContext::MD_range);
474 eraseMetadata(KindID: LLVMContext::MD_nonnull);
475 eraseMetadata(KindID: LLVMContext::MD_align);
476}
477
478bool Instruction::hasPoisonGeneratingReturnAttributes() const {
479 if (const auto *CB = dyn_cast<CallBase>(Val: this)) {
480 AttributeSet RetAttrs = CB->getAttributes().getRetAttrs();
481 return RetAttrs.hasAttribute(Attribute::Range) ||
482 RetAttrs.hasAttribute(Attribute::Alignment) ||
483 RetAttrs.hasAttribute(Attribute::NonNull);
484 }
485 return false;
486}
487
488void Instruction::dropPoisonGeneratingReturnAttributes() {
489 if (auto *CB = dyn_cast<CallBase>(Val: this)) {
490 AttributeMask AM;
491 AM.addAttribute(Attribute::Range);
492 AM.addAttribute(Attribute::Alignment);
493 AM.addAttribute(Attribute::NonNull);
494 CB->removeRetAttrs(AttrsToRemove: AM);
495 }
496 assert(!hasPoisonGeneratingReturnAttributes() && "must be kept in sync");
497}
498
499void Instruction::dropUBImplyingAttrsAndUnknownMetadata(
500 ArrayRef<unsigned> KnownIDs) {
501 dropUnknownNonDebugMetadata(KnownIDs);
502 auto *CB = dyn_cast<CallBase>(Val: this);
503 if (!CB)
504 return;
505 // For call instructions, we also need to drop parameter and return attributes
506 // that are can cause UB if the call is moved to a location where the
507 // attribute is not valid.
508 AttributeList AL = CB->getAttributes();
509 if (AL.isEmpty())
510 return;
511 AttributeMask UBImplyingAttributes =
512 AttributeFuncs::getUBImplyingAttributes();
513 for (unsigned ArgNo = 0; ArgNo < CB->arg_size(); ArgNo++)
514 CB->removeParamAttrs(ArgNo, AttrsToRemove: UBImplyingAttributes);
515 CB->removeRetAttrs(AttrsToRemove: UBImplyingAttributes);
516}
517
518void Instruction::dropUBImplyingAttrsAndMetadata() {
519 // !annotation metadata does not impact semantics.
520 // !range, !nonnull and !align produce poison, so they are safe to speculate.
521 // !noundef and various AA metadata must be dropped, as it generally produces
522 // immediate undefined behavior.
523 unsigned KnownIDs[] = {LLVMContext::MD_annotation, LLVMContext::MD_range,
524 LLVMContext::MD_nonnull, LLVMContext::MD_align};
525 dropUBImplyingAttrsAndUnknownMetadata(KnownIDs);
526}
527
528bool Instruction::isExact() const {
529 return cast<PossiblyExactOperator>(Val: this)->isExact();
530}
531
532void Instruction::setFast(bool B) {
533 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
534 cast<FPMathOperator>(Val: this)->setFast(B);
535}
536
537void Instruction::setHasAllowReassoc(bool B) {
538 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
539 cast<FPMathOperator>(Val: this)->setHasAllowReassoc(B);
540}
541
542void Instruction::setHasNoNaNs(bool B) {
543 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
544 cast<FPMathOperator>(Val: this)->setHasNoNaNs(B);
545}
546
547void Instruction::setHasNoInfs(bool B) {
548 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
549 cast<FPMathOperator>(Val: this)->setHasNoInfs(B);
550}
551
552void Instruction::setHasNoSignedZeros(bool B) {
553 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
554 cast<FPMathOperator>(Val: this)->setHasNoSignedZeros(B);
555}
556
557void Instruction::setHasAllowReciprocal(bool B) {
558 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
559 cast<FPMathOperator>(Val: this)->setHasAllowReciprocal(B);
560}
561
562void Instruction::setHasAllowContract(bool B) {
563 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
564 cast<FPMathOperator>(Val: this)->setHasAllowContract(B);
565}
566
567void Instruction::setHasApproxFunc(bool B) {
568 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
569 cast<FPMathOperator>(Val: this)->setHasApproxFunc(B);
570}
571
572void Instruction::setFastMathFlags(FastMathFlags FMF) {
573 assert(isa<FPMathOperator>(this) && "setting fast-math flag on invalid op");
574 cast<FPMathOperator>(Val: this)->setFastMathFlags(FMF);
575}
576
577void Instruction::copyFastMathFlags(FastMathFlags FMF) {
578 assert(isa<FPMathOperator>(this) && "copying fast-math flag on invalid op");
579 cast<FPMathOperator>(Val: this)->copyFastMathFlags(FMF);
580}
581
582bool Instruction::isFast() const {
583 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
584 return cast<FPMathOperator>(Val: this)->isFast();
585}
586
587bool Instruction::hasAllowReassoc() const {
588 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
589 return cast<FPMathOperator>(Val: this)->hasAllowReassoc();
590}
591
592bool Instruction::hasNoNaNs() const {
593 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
594 return cast<FPMathOperator>(Val: this)->hasNoNaNs();
595}
596
597bool Instruction::hasNoInfs() const {
598 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
599 return cast<FPMathOperator>(Val: this)->hasNoInfs();
600}
601
602bool Instruction::hasNoSignedZeros() const {
603 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
604 return cast<FPMathOperator>(Val: this)->hasNoSignedZeros();
605}
606
607bool Instruction::hasAllowReciprocal() const {
608 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
609 return cast<FPMathOperator>(Val: this)->hasAllowReciprocal();
610}
611
612bool Instruction::hasAllowContract() const {
613 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
614 return cast<FPMathOperator>(Val: this)->hasAllowContract();
615}
616
617bool Instruction::hasApproxFunc() const {
618 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
619 return cast<FPMathOperator>(Val: this)->hasApproxFunc();
620}
621
622FastMathFlags Instruction::getFastMathFlags() const {
623 assert(isa<FPMathOperator>(this) && "getting fast-math flag on invalid op");
624 return cast<FPMathOperator>(Val: this)->getFastMathFlags();
625}
626
627void Instruction::copyFastMathFlags(const Instruction *I) {
628 copyFastMathFlags(FMF: I->getFastMathFlags());
629}
630
631void Instruction::copyIRFlags(const Value *V, bool IncludeWrapFlags) {
632 // Copy the wrapping flags.
633 if (IncludeWrapFlags && isa<OverflowingBinaryOperator>(Val: this)) {
634 if (auto *OB = dyn_cast<OverflowingBinaryOperator>(Val: V)) {
635 setHasNoSignedWrap(OB->hasNoSignedWrap());
636 setHasNoUnsignedWrap(OB->hasNoUnsignedWrap());
637 }
638 }
639
640 if (auto *TI = dyn_cast<TruncInst>(Val: V)) {
641 if (isa<TruncInst>(Val: this)) {
642 setHasNoSignedWrap(TI->hasNoSignedWrap());
643 setHasNoUnsignedWrap(TI->hasNoUnsignedWrap());
644 }
645 }
646
647 // Copy the exact flag.
648 if (auto *PE = dyn_cast<PossiblyExactOperator>(Val: V))
649 if (isa<PossiblyExactOperator>(Val: this))
650 setIsExact(PE->isExact());
651
652 if (auto *SrcPD = dyn_cast<PossiblyDisjointInst>(Val: V))
653 if (auto *DestPD = dyn_cast<PossiblyDisjointInst>(Val: this))
654 DestPD->setIsDisjoint(SrcPD->isDisjoint());
655
656 // Copy the fast-math flags.
657 if (auto *FP = dyn_cast<FPMathOperator>(Val: V))
658 if (isa<FPMathOperator>(Val: this))
659 copyFastMathFlags(FMF: FP->getFastMathFlags());
660
661 if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(Val: V))
662 if (auto *DestGEP = dyn_cast<GetElementPtrInst>(Val: this))
663 DestGEP->setIsInBounds(SrcGEP->isInBounds() || DestGEP->isInBounds());
664
665 if (auto *NNI = dyn_cast<PossiblyNonNegInst>(Val: V))
666 if (isa<PossiblyNonNegInst>(Val: this))
667 setNonNeg(NNI->hasNonNeg());
668}
669
670void Instruction::andIRFlags(const Value *V) {
671 if (auto *OB = dyn_cast<OverflowingBinaryOperator>(Val: V)) {
672 if (isa<OverflowingBinaryOperator>(Val: this)) {
673 setHasNoSignedWrap(hasNoSignedWrap() && OB->hasNoSignedWrap());
674 setHasNoUnsignedWrap(hasNoUnsignedWrap() && OB->hasNoUnsignedWrap());
675 }
676 }
677
678 if (auto *TI = dyn_cast<TruncInst>(Val: V)) {
679 if (isa<TruncInst>(Val: this)) {
680 setHasNoSignedWrap(hasNoSignedWrap() && TI->hasNoSignedWrap());
681 setHasNoUnsignedWrap(hasNoUnsignedWrap() && TI->hasNoUnsignedWrap());
682 }
683 }
684
685 if (auto *PE = dyn_cast<PossiblyExactOperator>(Val: V))
686 if (isa<PossiblyExactOperator>(Val: this))
687 setIsExact(isExact() && PE->isExact());
688
689 if (auto *SrcPD = dyn_cast<PossiblyDisjointInst>(Val: V))
690 if (auto *DestPD = dyn_cast<PossiblyDisjointInst>(Val: this))
691 DestPD->setIsDisjoint(DestPD->isDisjoint() && SrcPD->isDisjoint());
692
693 if (auto *FP = dyn_cast<FPMathOperator>(Val: V)) {
694 if (isa<FPMathOperator>(Val: this)) {
695 FastMathFlags FM = getFastMathFlags();
696 FM &= FP->getFastMathFlags();
697 copyFastMathFlags(FMF: FM);
698 }
699 }
700
701 if (auto *SrcGEP = dyn_cast<GetElementPtrInst>(Val: V))
702 if (auto *DestGEP = dyn_cast<GetElementPtrInst>(Val: this))
703 DestGEP->setIsInBounds(SrcGEP->isInBounds() && DestGEP->isInBounds());
704
705 if (auto *NNI = dyn_cast<PossiblyNonNegInst>(Val: V))
706 if (isa<PossiblyNonNegInst>(Val: this))
707 setNonNeg(hasNonNeg() && NNI->hasNonNeg());
708}
709
710const char *Instruction::getOpcodeName(unsigned OpCode) {
711 switch (OpCode) {
712 // Terminators
713 case Ret: return "ret";
714 case Br: return "br";
715 case Switch: return "switch";
716 case IndirectBr: return "indirectbr";
717 case Invoke: return "invoke";
718 case Resume: return "resume";
719 case Unreachable: return "unreachable";
720 case CleanupRet: return "cleanupret";
721 case CatchRet: return "catchret";
722 case CatchPad: return "catchpad";
723 case CatchSwitch: return "catchswitch";
724 case CallBr: return "callbr";
725
726 // Standard unary operators...
727 case FNeg: return "fneg";
728
729 // Standard binary operators...
730 case Add: return "add";
731 case FAdd: return "fadd";
732 case Sub: return "sub";
733 case FSub: return "fsub";
734 case Mul: return "mul";
735 case FMul: return "fmul";
736 case UDiv: return "udiv";
737 case SDiv: return "sdiv";
738 case FDiv: return "fdiv";
739 case URem: return "urem";
740 case SRem: return "srem";
741 case FRem: return "frem";
742
743 // Logical operators...
744 case And: return "and";
745 case Or : return "or";
746 case Xor: return "xor";
747
748 // Memory instructions...
749 case Alloca: return "alloca";
750 case Load: return "load";
751 case Store: return "store";
752 case AtomicCmpXchg: return "cmpxchg";
753 case AtomicRMW: return "atomicrmw";
754 case Fence: return "fence";
755 case GetElementPtr: return "getelementptr";
756
757 // Convert instructions...
758 case Trunc: return "trunc";
759 case ZExt: return "zext";
760 case SExt: return "sext";
761 case FPTrunc: return "fptrunc";
762 case FPExt: return "fpext";
763 case FPToUI: return "fptoui";
764 case FPToSI: return "fptosi";
765 case UIToFP: return "uitofp";
766 case SIToFP: return "sitofp";
767 case IntToPtr: return "inttoptr";
768 case PtrToInt: return "ptrtoint";
769 case BitCast: return "bitcast";
770 case AddrSpaceCast: return "addrspacecast";
771
772 // Other instructions...
773 case ICmp: return "icmp";
774 case FCmp: return "fcmp";
775 case PHI: return "phi";
776 case Select: return "select";
777 case Call: return "call";
778 case Shl: return "shl";
779 case LShr: return "lshr";
780 case AShr: return "ashr";
781 case VAArg: return "va_arg";
782 case ExtractElement: return "extractelement";
783 case InsertElement: return "insertelement";
784 case ShuffleVector: return "shufflevector";
785 case ExtractValue: return "extractvalue";
786 case InsertValue: return "insertvalue";
787 case LandingPad: return "landingpad";
788 case CleanupPad: return "cleanuppad";
789 case Freeze: return "freeze";
790
791 default: return "<Invalid operator> ";
792 }
793}
794
795/// This must be kept in sync with FunctionComparator::cmpOperations in
796/// lib/Transforms/IPO/MergeFunctions.cpp.
797bool Instruction::hasSameSpecialState(const Instruction *I2,
798 bool IgnoreAlignment) const {
799 auto I1 = this;
800 assert(I1->getOpcode() == I2->getOpcode() &&
801 "Can not compare special state of different instructions");
802
803 if (const AllocaInst *AI = dyn_cast<AllocaInst>(Val: I1))
804 return AI->getAllocatedType() == cast<AllocaInst>(Val: I2)->getAllocatedType() &&
805 (AI->getAlign() == cast<AllocaInst>(Val: I2)->getAlign() ||
806 IgnoreAlignment);
807 if (const LoadInst *LI = dyn_cast<LoadInst>(Val: I1))
808 return LI->isVolatile() == cast<LoadInst>(Val: I2)->isVolatile() &&
809 (LI->getAlign() == cast<LoadInst>(Val: I2)->getAlign() ||
810 IgnoreAlignment) &&
811 LI->getOrdering() == cast<LoadInst>(Val: I2)->getOrdering() &&
812 LI->getSyncScopeID() == cast<LoadInst>(Val: I2)->getSyncScopeID();
813 if (const StoreInst *SI = dyn_cast<StoreInst>(Val: I1))
814 return SI->isVolatile() == cast<StoreInst>(Val: I2)->isVolatile() &&
815 (SI->getAlign() == cast<StoreInst>(Val: I2)->getAlign() ||
816 IgnoreAlignment) &&
817 SI->getOrdering() == cast<StoreInst>(Val: I2)->getOrdering() &&
818 SI->getSyncScopeID() == cast<StoreInst>(Val: I2)->getSyncScopeID();
819 if (const CmpInst *CI = dyn_cast<CmpInst>(Val: I1))
820 return CI->getPredicate() == cast<CmpInst>(Val: I2)->getPredicate();
821 if (const CallInst *CI = dyn_cast<CallInst>(Val: I1))
822 return CI->isTailCall() == cast<CallInst>(Val: I2)->isTailCall() &&
823 CI->getCallingConv() == cast<CallInst>(Val: I2)->getCallingConv() &&
824 CI->getAttributes() == cast<CallInst>(Val: I2)->getAttributes() &&
825 CI->hasIdenticalOperandBundleSchema(Other: *cast<CallInst>(Val: I2));
826 if (const InvokeInst *CI = dyn_cast<InvokeInst>(Val: I1))
827 return CI->getCallingConv() == cast<InvokeInst>(Val: I2)->getCallingConv() &&
828 CI->getAttributes() == cast<InvokeInst>(Val: I2)->getAttributes() &&
829 CI->hasIdenticalOperandBundleSchema(Other: *cast<InvokeInst>(Val: I2));
830 if (const CallBrInst *CI = dyn_cast<CallBrInst>(Val: I1))
831 return CI->getCallingConv() == cast<CallBrInst>(Val: I2)->getCallingConv() &&
832 CI->getAttributes() == cast<CallBrInst>(Val: I2)->getAttributes() &&
833 CI->hasIdenticalOperandBundleSchema(Other: *cast<CallBrInst>(Val: I2));
834 if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(Val: I1))
835 return IVI->getIndices() == cast<InsertValueInst>(Val: I2)->getIndices();
836 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(Val: I1))
837 return EVI->getIndices() == cast<ExtractValueInst>(Val: I2)->getIndices();
838 if (const FenceInst *FI = dyn_cast<FenceInst>(Val: I1))
839 return FI->getOrdering() == cast<FenceInst>(Val: I2)->getOrdering() &&
840 FI->getSyncScopeID() == cast<FenceInst>(Val: I2)->getSyncScopeID();
841 if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(Val: I1))
842 return CXI->isVolatile() == cast<AtomicCmpXchgInst>(Val: I2)->isVolatile() &&
843 CXI->isWeak() == cast<AtomicCmpXchgInst>(Val: I2)->isWeak() &&
844 CXI->getSuccessOrdering() ==
845 cast<AtomicCmpXchgInst>(Val: I2)->getSuccessOrdering() &&
846 CXI->getFailureOrdering() ==
847 cast<AtomicCmpXchgInst>(Val: I2)->getFailureOrdering() &&
848 CXI->getSyncScopeID() ==
849 cast<AtomicCmpXchgInst>(Val: I2)->getSyncScopeID();
850 if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(Val: I1))
851 return RMWI->getOperation() == cast<AtomicRMWInst>(Val: I2)->getOperation() &&
852 RMWI->isVolatile() == cast<AtomicRMWInst>(Val: I2)->isVolatile() &&
853 RMWI->getOrdering() == cast<AtomicRMWInst>(Val: I2)->getOrdering() &&
854 RMWI->getSyncScopeID() == cast<AtomicRMWInst>(Val: I2)->getSyncScopeID();
855 if (const ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(Val: I1))
856 return SVI->getShuffleMask() ==
857 cast<ShuffleVectorInst>(Val: I2)->getShuffleMask();
858 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Val: I1))
859 return GEP->getSourceElementType() ==
860 cast<GetElementPtrInst>(Val: I2)->getSourceElementType();
861
862 return true;
863}
864
865bool Instruction::isIdenticalTo(const Instruction *I) const {
866 return isIdenticalToWhenDefined(I) &&
867 SubclassOptionalData == I->SubclassOptionalData;
868}
869
870bool Instruction::isIdenticalToWhenDefined(const Instruction *I) const {
871 if (getOpcode() != I->getOpcode() ||
872 getNumOperands() != I->getNumOperands() ||
873 getType() != I->getType())
874 return false;
875
876 // If both instructions have no operands, they are identical.
877 if (getNumOperands() == 0 && I->getNumOperands() == 0)
878 return this->hasSameSpecialState(I2: I);
879
880 // We have two instructions of identical opcode and #operands. Check to see
881 // if all operands are the same.
882 if (!std::equal(first1: op_begin(), last1: op_end(), first2: I->op_begin()))
883 return false;
884
885 // WARNING: this logic must be kept in sync with EliminateDuplicatePHINodes()!
886 if (const PHINode *thisPHI = dyn_cast<PHINode>(Val: this)) {
887 const PHINode *otherPHI = cast<PHINode>(Val: I);
888 return std::equal(first1: thisPHI->block_begin(), last1: thisPHI->block_end(),
889 first2: otherPHI->block_begin());
890 }
891
892 return this->hasSameSpecialState(I2: I);
893}
894
895// Keep this in sync with FunctionComparator::cmpOperations in
896// lib/Transforms/IPO/MergeFunctions.cpp.
897bool Instruction::isSameOperationAs(const Instruction *I,
898 unsigned flags) const {
899 bool IgnoreAlignment = flags & CompareIgnoringAlignment;
900 bool UseScalarTypes = flags & CompareUsingScalarTypes;
901
902 if (getOpcode() != I->getOpcode() ||
903 getNumOperands() != I->getNumOperands() ||
904 (UseScalarTypes ?
905 getType()->getScalarType() != I->getType()->getScalarType() :
906 getType() != I->getType()))
907 return false;
908
909 // We have two instructions of identical opcode and #operands. Check to see
910 // if all operands are the same type
911 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
912 if (UseScalarTypes ?
913 getOperand(i)->getType()->getScalarType() !=
914 I->getOperand(i)->getType()->getScalarType() :
915 getOperand(i)->getType() != I->getOperand(i)->getType())
916 return false;
917
918 return this->hasSameSpecialState(I2: I, IgnoreAlignment);
919}
920
921bool Instruction::isUsedOutsideOfBlock(const BasicBlock *BB) const {
922 for (const Use &U : uses()) {
923 // PHI nodes uses values in the corresponding predecessor block. For other
924 // instructions, just check to see whether the parent of the use matches up.
925 const Instruction *I = cast<Instruction>(Val: U.getUser());
926 const PHINode *PN = dyn_cast<PHINode>(Val: I);
927 if (!PN) {
928 if (I->getParent() != BB)
929 return true;
930 continue;
931 }
932
933 if (PN->getIncomingBlock(U) != BB)
934 return true;
935 }
936 return false;
937}
938
939bool Instruction::mayReadFromMemory() const {
940 switch (getOpcode()) {
941 default: return false;
942 case Instruction::VAArg:
943 case Instruction::Load:
944 case Instruction::Fence: // FIXME: refine definition of mayReadFromMemory
945 case Instruction::AtomicCmpXchg:
946 case Instruction::AtomicRMW:
947 case Instruction::CatchPad:
948 case Instruction::CatchRet:
949 return true;
950 case Instruction::Call:
951 case Instruction::Invoke:
952 case Instruction::CallBr:
953 return !cast<CallBase>(Val: this)->onlyWritesMemory();
954 case Instruction::Store:
955 return !cast<StoreInst>(Val: this)->isUnordered();
956 }
957}
958
959bool Instruction::mayWriteToMemory() const {
960 switch (getOpcode()) {
961 default: return false;
962 case Instruction::Fence: // FIXME: refine definition of mayWriteToMemory
963 case Instruction::Store:
964 case Instruction::VAArg:
965 case Instruction::AtomicCmpXchg:
966 case Instruction::AtomicRMW:
967 case Instruction::CatchPad:
968 case Instruction::CatchRet:
969 return true;
970 case Instruction::Call:
971 case Instruction::Invoke:
972 case Instruction::CallBr:
973 return !cast<CallBase>(Val: this)->onlyReadsMemory();
974 case Instruction::Load:
975 return !cast<LoadInst>(Val: this)->isUnordered();
976 }
977}
978
979bool Instruction::isAtomic() const {
980 switch (getOpcode()) {
981 default:
982 return false;
983 case Instruction::AtomicCmpXchg:
984 case Instruction::AtomicRMW:
985 case Instruction::Fence:
986 return true;
987 case Instruction::Load:
988 return cast<LoadInst>(Val: this)->getOrdering() != AtomicOrdering::NotAtomic;
989 case Instruction::Store:
990 return cast<StoreInst>(Val: this)->getOrdering() != AtomicOrdering::NotAtomic;
991 }
992}
993
994bool Instruction::hasAtomicLoad() const {
995 assert(isAtomic());
996 switch (getOpcode()) {
997 default:
998 return false;
999 case Instruction::AtomicCmpXchg:
1000 case Instruction::AtomicRMW:
1001 case Instruction::Load:
1002 return true;
1003 }
1004}
1005
1006bool Instruction::hasAtomicStore() const {
1007 assert(isAtomic());
1008 switch (getOpcode()) {
1009 default:
1010 return false;
1011 case Instruction::AtomicCmpXchg:
1012 case Instruction::AtomicRMW:
1013 case Instruction::Store:
1014 return true;
1015 }
1016}
1017
1018bool Instruction::isVolatile() const {
1019 switch (getOpcode()) {
1020 default:
1021 return false;
1022 case Instruction::AtomicRMW:
1023 return cast<AtomicRMWInst>(Val: this)->isVolatile();
1024 case Instruction::Store:
1025 return cast<StoreInst>(Val: this)->isVolatile();
1026 case Instruction::Load:
1027 return cast<LoadInst>(Val: this)->isVolatile();
1028 case Instruction::AtomicCmpXchg:
1029 return cast<AtomicCmpXchgInst>(Val: this)->isVolatile();
1030 case Instruction::Call:
1031 case Instruction::Invoke:
1032 // There are a very limited number of intrinsics with volatile flags.
1033 if (auto *II = dyn_cast<IntrinsicInst>(Val: this)) {
1034 if (auto *MI = dyn_cast<MemIntrinsic>(Val: II))
1035 return MI->isVolatile();
1036 switch (II->getIntrinsicID()) {
1037 default: break;
1038 case Intrinsic::matrix_column_major_load:
1039 return cast<ConstantInt>(Val: II->getArgOperand(i: 2))->isOne();
1040 case Intrinsic::matrix_column_major_store:
1041 return cast<ConstantInt>(Val: II->getArgOperand(i: 3))->isOne();
1042 }
1043 }
1044 return false;
1045 }
1046}
1047
1048Type *Instruction::getAccessType() const {
1049 switch (getOpcode()) {
1050 case Instruction::Store:
1051 return cast<StoreInst>(Val: this)->getValueOperand()->getType();
1052 case Instruction::Load:
1053 case Instruction::AtomicRMW:
1054 return getType();
1055 case Instruction::AtomicCmpXchg:
1056 return cast<AtomicCmpXchgInst>(Val: this)->getNewValOperand()->getType();
1057 case Instruction::Call:
1058 case Instruction::Invoke:
1059 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Val: this)) {
1060 switch (II->getIntrinsicID()) {
1061 case Intrinsic::masked_load:
1062 case Intrinsic::masked_gather:
1063 case Intrinsic::masked_expandload:
1064 case Intrinsic::vp_load:
1065 case Intrinsic::vp_gather:
1066 case Intrinsic::experimental_vp_strided_load:
1067 return II->getType();
1068 case Intrinsic::masked_store:
1069 case Intrinsic::masked_scatter:
1070 case Intrinsic::masked_compressstore:
1071 case Intrinsic::vp_store:
1072 case Intrinsic::vp_scatter:
1073 case Intrinsic::experimental_vp_strided_store:
1074 return II->getOperand(i_nocapture: 0)->getType();
1075 default:
1076 break;
1077 }
1078 }
1079 }
1080
1081 return nullptr;
1082}
1083
1084static bool canUnwindPastLandingPad(const LandingPadInst *LP,
1085 bool IncludePhaseOneUnwind) {
1086 // Because phase one unwinding skips cleanup landingpads, we effectively
1087 // unwind past this frame, and callers need to have valid unwind info.
1088 if (LP->isCleanup())
1089 return IncludePhaseOneUnwind;
1090
1091 for (unsigned I = 0; I < LP->getNumClauses(); ++I) {
1092 Constant *Clause = LP->getClause(Idx: I);
1093 // catch ptr null catches all exceptions.
1094 if (LP->isCatch(Idx: I) && isa<ConstantPointerNull>(Val: Clause))
1095 return false;
1096 // filter [0 x ptr] catches all exceptions.
1097 if (LP->isFilter(Idx: I) && Clause->getType()->getArrayNumElements() == 0)
1098 return false;
1099 }
1100
1101 // May catch only some subset of exceptions, in which case other exceptions
1102 // will continue unwinding.
1103 return true;
1104}
1105
1106bool Instruction::mayThrow(bool IncludePhaseOneUnwind) const {
1107 switch (getOpcode()) {
1108 case Instruction::Call:
1109 return !cast<CallInst>(Val: this)->doesNotThrow();
1110 case Instruction::CleanupRet:
1111 return cast<CleanupReturnInst>(Val: this)->unwindsToCaller();
1112 case Instruction::CatchSwitch:
1113 return cast<CatchSwitchInst>(Val: this)->unwindsToCaller();
1114 case Instruction::Resume:
1115 return true;
1116 case Instruction::Invoke: {
1117 // Landingpads themselves don't unwind -- however, an invoke of a skipped
1118 // landingpad may continue unwinding.
1119 BasicBlock *UnwindDest = cast<InvokeInst>(Val: this)->getUnwindDest();
1120 Instruction *Pad = UnwindDest->getFirstNonPHI();
1121 if (auto *LP = dyn_cast<LandingPadInst>(Val: Pad))
1122 return canUnwindPastLandingPad(LP, IncludePhaseOneUnwind);
1123 return false;
1124 }
1125 case Instruction::CleanupPad:
1126 // Treat the same as cleanup landingpad.
1127 return IncludePhaseOneUnwind;
1128 default:
1129 return false;
1130 }
1131}
1132
1133bool Instruction::mayHaveSideEffects() const {
1134 return mayWriteToMemory() || mayThrow() || !willReturn();
1135}
1136
1137bool Instruction::isSafeToRemove() const {
1138 return (!isa<CallInst>(Val: this) || !this->mayHaveSideEffects()) &&
1139 !this->isTerminator() && !this->isEHPad();
1140}
1141
1142bool Instruction::willReturn() const {
1143 // Volatile store isn't guaranteed to return; see LangRef.
1144 if (auto *SI = dyn_cast<StoreInst>(Val: this))
1145 return !SI->isVolatile();
1146
1147 if (const auto *CB = dyn_cast<CallBase>(Val: this))
1148 return CB->hasFnAttr(Attribute::WillReturn);
1149 return true;
1150}
1151
1152bool Instruction::isLifetimeStartOrEnd() const {
1153 auto *II = dyn_cast<IntrinsicInst>(Val: this);
1154 if (!II)
1155 return false;
1156 Intrinsic::ID ID = II->getIntrinsicID();
1157 return ID == Intrinsic::lifetime_start || ID == Intrinsic::lifetime_end;
1158}
1159
1160bool Instruction::isLaunderOrStripInvariantGroup() const {
1161 auto *II = dyn_cast<IntrinsicInst>(Val: this);
1162 if (!II)
1163 return false;
1164 Intrinsic::ID ID = II->getIntrinsicID();
1165 return ID == Intrinsic::launder_invariant_group ||
1166 ID == Intrinsic::strip_invariant_group;
1167}
1168
1169bool Instruction::isDebugOrPseudoInst() const {
1170 return isa<DbgInfoIntrinsic>(Val: this) || isa<PseudoProbeInst>(Val: this);
1171}
1172
1173const Instruction *
1174Instruction::getNextNonDebugInstruction(bool SkipPseudoOp) const {
1175 for (const Instruction *I = getNextNode(); I; I = I->getNextNode())
1176 if (!isa<DbgInfoIntrinsic>(Val: I) && !(SkipPseudoOp && isa<PseudoProbeInst>(Val: I)))
1177 return I;
1178 return nullptr;
1179}
1180
1181const Instruction *
1182Instruction::getPrevNonDebugInstruction(bool SkipPseudoOp) const {
1183 for (const Instruction *I = getPrevNode(); I; I = I->getPrevNode())
1184 if (!isa<DbgInfoIntrinsic>(Val: I) && !(SkipPseudoOp && isa<PseudoProbeInst>(Val: I)))
1185 return I;
1186 return nullptr;
1187}
1188
1189const DebugLoc &Instruction::getStableDebugLoc() const {
1190 if (isa<DbgInfoIntrinsic>(Val: this))
1191 if (const Instruction *Next = getNextNonDebugInstruction())
1192 return Next->getDebugLoc();
1193 return getDebugLoc();
1194}
1195
1196bool Instruction::isAssociative() const {
1197 if (auto *II = dyn_cast<IntrinsicInst>(Val: this))
1198 return II->isAssociative();
1199 unsigned Opcode = getOpcode();
1200 if (isAssociative(Opcode))
1201 return true;
1202
1203 switch (Opcode) {
1204 case FMul:
1205 case FAdd:
1206 return cast<FPMathOperator>(Val: this)->hasAllowReassoc() &&
1207 cast<FPMathOperator>(Val: this)->hasNoSignedZeros();
1208 default:
1209 return false;
1210 }
1211}
1212
1213bool Instruction::isCommutative() const {
1214 if (auto *II = dyn_cast<IntrinsicInst>(Val: this))
1215 return II->isCommutative();
1216 // TODO: Should allow icmp/fcmp?
1217 return isCommutative(Opcode: getOpcode());
1218}
1219
1220unsigned Instruction::getNumSuccessors() const {
1221 switch (getOpcode()) {
1222#define HANDLE_TERM_INST(N, OPC, CLASS) \
1223 case Instruction::OPC: \
1224 return static_cast<const CLASS *>(this)->getNumSuccessors();
1225#include "llvm/IR/Instruction.def"
1226 default:
1227 break;
1228 }
1229 llvm_unreachable("not a terminator");
1230}
1231
1232BasicBlock *Instruction::getSuccessor(unsigned idx) const {
1233 switch (getOpcode()) {
1234#define HANDLE_TERM_INST(N, OPC, CLASS) \
1235 case Instruction::OPC: \
1236 return static_cast<const CLASS *>(this)->getSuccessor(idx);
1237#include "llvm/IR/Instruction.def"
1238 default:
1239 break;
1240 }
1241 llvm_unreachable("not a terminator");
1242}
1243
1244void Instruction::setSuccessor(unsigned idx, BasicBlock *B) {
1245 switch (getOpcode()) {
1246#define HANDLE_TERM_INST(N, OPC, CLASS) \
1247 case Instruction::OPC: \
1248 return static_cast<CLASS *>(this)->setSuccessor(idx, B);
1249#include "llvm/IR/Instruction.def"
1250 default:
1251 break;
1252 }
1253 llvm_unreachable("not a terminator");
1254}
1255
1256void Instruction::replaceSuccessorWith(BasicBlock *OldBB, BasicBlock *NewBB) {
1257 for (unsigned Idx = 0, NumSuccessors = Instruction::getNumSuccessors();
1258 Idx != NumSuccessors; ++Idx)
1259 if (getSuccessor(idx: Idx) == OldBB)
1260 setSuccessor(idx: Idx, B: NewBB);
1261}
1262
1263Instruction *Instruction::cloneImpl() const {
1264 llvm_unreachable("Subclass of Instruction failed to implement cloneImpl");
1265}
1266
1267void Instruction::swapProfMetadata() {
1268 MDNode *ProfileData = getBranchWeightMDNode(I: *this);
1269 if (!ProfileData || ProfileData->getNumOperands() != 3)
1270 return;
1271
1272 // The first operand is the name. Fetch them backwards and build a new one.
1273 Metadata *Ops[] = {ProfileData->getOperand(I: 0), ProfileData->getOperand(I: 2),
1274 ProfileData->getOperand(I: 1)};
1275 setMetadata(KindID: LLVMContext::MD_prof,
1276 Node: MDNode::get(Context&: ProfileData->getContext(), MDs: Ops));
1277}
1278
1279void Instruction::copyMetadata(const Instruction &SrcInst,
1280 ArrayRef<unsigned> WL) {
1281 if (!SrcInst.hasMetadata())
1282 return;
1283
1284 SmallDenseSet<unsigned, 4> WLS(WL.begin(), WL.end());
1285
1286 // Otherwise, enumerate and copy over metadata from the old instruction to the
1287 // new one.
1288 SmallVector<std::pair<unsigned, MDNode *>, 4> TheMDs;
1289 SrcInst.getAllMetadataOtherThanDebugLoc(MDs&: TheMDs);
1290 for (const auto &MD : TheMDs) {
1291 if (WL.empty() || WLS.count(V: MD.first))
1292 setMetadata(KindID: MD.first, Node: MD.second);
1293 }
1294 if (WL.empty() || WLS.count(V: LLVMContext::MD_dbg))
1295 setDebugLoc(SrcInst.getDebugLoc());
1296}
1297
1298Instruction *Instruction::clone() const {
1299 Instruction *New = nullptr;
1300 switch (getOpcode()) {
1301 default:
1302 llvm_unreachable("Unhandled Opcode.");
1303#define HANDLE_INST(num, opc, clas) \
1304 case Instruction::opc: \
1305 New = cast<clas>(this)->cloneImpl(); \
1306 break;
1307#include "llvm/IR/Instruction.def"
1308#undef HANDLE_INST
1309 }
1310
1311 New->SubclassOptionalData = SubclassOptionalData;
1312 New->copyMetadata(SrcInst: *this);
1313 return New;
1314}
1315

source code of llvm/lib/IR/Instruction.cpp